Skip to content
Snippets Groups Projects
memory.c 73.7 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     *  linux/mm/memory.c
     *
     *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
     */
    
    /*
     * demand-loading started 01.12.91 - seems it is high on the list of
     * things wanted, and it should be easy to implement. - Linus
     */
    
    /*
     * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
     * pages started 02.12.91, seems to work. - Linus.
     *
     * Tested sharing by executing about 30 /bin/sh: under the old kernel it
     * would have taken more than the 6M I have free, but it worked well as
     * far as I could see.
     *
     * Also corrected some "invalidate()"s - I wasn't doing enough of them.
     */
    
    /*
     * Real VM (paging to/from disk) started 18.12.91. Much more work and
     * thought has to go into this. Oh, well..
     * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
     *		Found it. Everything seems to work now.
     * 20.12.91  -  Ok, making the swap-device changeable like the root.
     */
    
    /*
     * 05.04.94  -  Multi-page memory management added for v1.1.
     * 		Idea by Alex Bligh (alex@cconcepts.co.uk)
     *
     * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
     *		(Gerhard.Wichert@pdb.siemens.de)
     *
     * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
     */
    
    #include <linux/kernel_stat.h>
    #include <linux/mm.h>
    #include <linux/hugetlb.h>
    #include <linux/mman.h>
    #include <linux/swap.h>
    #include <linux/highmem.h>
    #include <linux/pagemap.h>
    #include <linux/rmap.h>
    #include <linux/module.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #include <linux/init.h>
    
    #include <linux/writeback.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    #include <asm/pgalloc.h>
    #include <asm/uaccess.h>
    #include <asm/tlb.h>
    #include <asm/tlbflush.h>
    #include <asm/pgtable.h>
    
    #include <linux/swapops.h>
    #include <linux/elf.h>
    
    
    #ifndef CONFIG_NEED_MULTIPLE_NODES
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /* use the per-pgdat data instead for discontigmem - mbligh */
    unsigned long max_mapnr;
    struct page *mem_map;
    
    EXPORT_SYMBOL(max_mapnr);
    EXPORT_SYMBOL(mem_map);
    #endif
    
    unsigned long num_physpages;
    /*
     * A number of key systems in x86 including ioremap() rely on the assumption
     * that high_memory defines the upper bound on direct map memory, then end
     * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
     * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
     * and ZONE_HIGHMEM.
     */
    void * high_memory;
    
    EXPORT_SYMBOL(num_physpages);
    EXPORT_SYMBOL(high_memory);
    
    
    int randomize_va_space __read_mostly = 1;
    
    static int __init disable_randmaps(char *s)
    {
    	randomize_va_space = 0;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     * If a p?d_bad entry is found while walking page tables, report
     * the error, before resetting entry to p?d_none.  Usually (but
     * very seldom) called out from the p?d_none_or_clear_bad macros.
     */
    
    void pgd_clear_bad(pgd_t *pgd)
    {
    	pgd_ERROR(*pgd);
    	pgd_clear(pgd);
    }
    
    void pud_clear_bad(pud_t *pud)
    {
    	pud_ERROR(*pud);
    	pud_clear(pud);
    }
    
    void pmd_clear_bad(pmd_t *pmd)
    {
    	pmd_ERROR(*pmd);
    	pmd_clear(pmd);
    }
    
    /*
     * Note: this doesn't free the actual pages themselves. That
     * has been handled earlier when unmapping all the memory regions.
     */
    
    static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct page *page = pmd_page(*pmd);
    	pmd_clear(pmd);
    
    	pte_lock_deinit(page);
    
    	pte_free_tlb(tlb, page);
    
    	dec_zone_page_state(page, NR_PAGETABLE);
    
    	tlb->mm->nr_ptes--;
    
    static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
    				unsigned long addr, unsigned long end,
    				unsigned long floor, unsigned long ceiling)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pmd_t *pmd;
    	unsigned long next;
    
    	unsigned long start;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pmd = pmd_offset(pud, addr);
    	do {
    		next = pmd_addr_end(addr, end);
    		if (pmd_none_or_clear_bad(pmd))
    			continue;
    
    		free_pte_range(tlb, pmd);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} while (pmd++, addr = next, addr != end);
    
    
    	start &= PUD_MASK;
    	if (start < floor)
    		return;
    	if (ceiling) {
    		ceiling &= PUD_MASK;
    		if (!ceiling)
    			return;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	if (end - 1 > ceiling - 1)
    		return;
    
    	pmd = pmd_offset(pud, start);
    	pud_clear(pud);
    	pmd_free_tlb(tlb, pmd);
    
    static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
    				unsigned long addr, unsigned long end,
    				unsigned long floor, unsigned long ceiling)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pud_t *pud;
    	unsigned long next;
    
    	unsigned long start;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pud = pud_offset(pgd, addr);
    	do {
    		next = pud_addr_end(addr, end);
    		if (pud_none_or_clear_bad(pud))
    			continue;
    
    		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} while (pud++, addr = next, addr != end);
    
    
    	start &= PGDIR_MASK;
    	if (start < floor)
    		return;
    	if (ceiling) {
    		ceiling &= PGDIR_MASK;
    		if (!ceiling)
    			return;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	if (end - 1 > ceiling - 1)
    		return;
    
    	pud = pud_offset(pgd, start);
    	pgd_clear(pgd);
    	pud_free_tlb(tlb, pud);
    
     * This function frees user-level page tables of a process.
     *
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * Must be called with pagetable lock held.
     */
    
    void free_pgd_range(struct mmu_gather **tlb,
    
    			unsigned long addr, unsigned long end,
    			unsigned long floor, unsigned long ceiling)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pgd_t *pgd;
    	unsigned long next;
    
    	unsigned long start;
    
    	/*
    	 * The next few lines have given us lots of grief...
    	 *
    	 * Why are we testing PMD* at this top level?  Because often
    	 * there will be no work to do at all, and we'd prefer not to
    	 * go all the way down to the bottom just to discover that.
    	 *
    	 * Why all these "- 1"s?  Because 0 represents both the bottom
    	 * of the address space and the top of it (using -1 for the
    	 * top wouldn't help much: the masks would do the wrong thing).
    	 * The rule is that addr 0 and floor 0 refer to the bottom of
    	 * the address space, but end 0 and ceiling 0 refer to the top
    	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
    	 * that end 0 case should be mythical).
    	 *
    	 * Wherever addr is brought up or ceiling brought down, we must
    	 * be careful to reject "the opposite 0" before it confuses the
    	 * subsequent tests.  But what about where end is brought down
    	 * by PMD_SIZE below? no, end can't go down to 0 there.
    	 *
    	 * Whereas we round start (addr) and ceiling down, by different
    	 * masks at different levels, in order to test whether a table
    	 * now has no other vmas using it, so can be freed, we don't
    	 * bother to round floor or end up - the tests don't need that.
    	 */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	addr &= PMD_MASK;
    	if (addr < floor) {
    		addr += PMD_SIZE;
    		if (!addr)
    			return;
    	}
    	if (ceiling) {
    		ceiling &= PMD_MASK;
    		if (!ceiling)
    			return;
    	}
    	if (end - 1 > ceiling - 1)
    		end -= PMD_SIZE;
    	if (addr > end - 1)
    		return;
    
    	start = addr;
    
    	pgd = pgd_offset((*tlb)->mm, addr);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	do {
    		next = pgd_addr_end(addr, end);
    		if (pgd_none_or_clear_bad(pgd))
    			continue;
    
    		free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} while (pgd++, addr = next, addr != end);
    
    }
    
    void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
    
    		unsigned long floor, unsigned long ceiling)
    
    {
    	while (vma) {
    		struct vm_area_struct *next = vma->vm_next;
    		unsigned long addr = vma->vm_start;
    
    
    		/*
    		 * Hide vma from rmap and vmtruncate before freeing pgtables
    		 */
    		anon_vma_unlink(vma);
    		unlink_file_vma(vma);
    
    
    		if (is_vm_hugetlb_page(vma)) {
    
    			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
    
    				floor, next? next->vm_start: ceiling);
    
    		} else {
    			/*
    			 * Optimization: gather nearby vmas into one call down
    			 */
    			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
    
    			       && !is_vm_hugetlb_page(next)) {
    
    				vma = next;
    				next = vma->vm_next;
    
    				anon_vma_unlink(vma);
    				unlink_file_vma(vma);
    
    			}
    			free_pgd_range(tlb, addr, vma->vm_end,
    				floor, next? next->vm_start: ceiling);
    		}
    
    int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct page *new = pte_alloc_one(mm, address);
    
    	if (!new)
    		return -ENOMEM;
    
    
    	pte_lock_init(new);
    
    	spin_lock(&mm->page_table_lock);
    
    	if (pmd_present(*pmd)) {	/* Another has populated it */
    		pte_lock_deinit(new);
    
    		pte_free(new);
    
    	} else {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		mm->nr_ptes++;
    
    		inc_zone_page_state(new, NR_PAGETABLE);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		pmd_populate(mm, pmd, new);
    	}
    
    	spin_unlock(&mm->page_table_lock);
    
    int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	pte_t *new = pte_alloc_one_kernel(&init_mm, address);
    	if (!new)
    		return -ENOMEM;
    
    	spin_lock(&init_mm.page_table_lock);
    	if (pmd_present(*pmd))		/* Another has populated it */
    		pte_free_kernel(new);
    	else
    		pmd_populate_kernel(&init_mm, pmd, new);
    	spin_unlock(&init_mm.page_table_lock);
    	return 0;
    
    static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
    {
    	if (file_rss)
    		add_mm_counter(mm, file_rss, file_rss);
    	if (anon_rss)
    		add_mm_counter(mm, anon_rss, anon_rss);
    }
    
    
     * This function is called to print an error when a bad pte
     * is found. For example, we might have a PFN-mapped pte in
     * a region that doesn't allow it.
    
     *
     * The calling function must still handle the error.
     */
    void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
    {
    	printk(KERN_ERR "Bad pte = %08llx, process = %s, "
    			"vm_flags = %lx, vaddr = %lx\n",
    		(long long)pte_val(pte),
    		(vma->vm_mm == current->mm ? current->comm : "???"),
    		vma->vm_flags, vaddr);
    	dump_stack();
    }
    
    
    static inline int is_cow_mapping(unsigned int flags)
    {
    	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
    }
    
    
     * This function gets the "struct page" associated with a pte.
     *
     * NOTE! Some mappings do not have "struct pages". A raw PFN mapping
     * will have each page table entry just pointing to a raw page frame
     * number, and as far as the VM layer is concerned, those do not have
     * pages associated with them - even if the PFN might point to memory
     * that otherwise is perfectly fine and has a "struct page".
     *
     * The way we recognize those mappings is through the rules set up
     * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
     * and the vm_pgoff will point to the first PFN mapped: thus every
     * page that is a raw mapping will always honor the rule
     *
     *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
     *
     * and if that isn't true, the page has been COW'ed (in which case it
     * _does_ have a "struct page" associated with it even if it is in a
     * VM_PFNMAP range).
    
    struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
    
    	unsigned long pfn = pte_pfn(pte);
    
    
    	if (unlikely(vma->vm_flags & VM_PFNMAP)) {
    
    		unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
    		if (pfn == vma->vm_pgoff + off)
    			return NULL;
    
    		if (!is_cow_mapping(vma->vm_flags))
    
    			return NULL;
    
    #ifdef CONFIG_DEBUG_VM
    
    	/*
    	 * Add some anal sanity checks for now. Eventually,
    	 * we should just do "return pfn_to_page(pfn)", but
    	 * in the meantime we check that we get a valid pfn,
    	 * and that the resulting page looks ok.
    	 */
    
    	if (unlikely(!pfn_valid(pfn))) {
    		print_bad_pte(vma, pte, addr);
    		return NULL;
    	}
    
    
    	/*
    	 * NOTE! We still have PageReserved() pages in the page 
    	 * tables. 
    	 *
    	 * The PAGE_ZERO() pages and various VDSO mappings can
    	 * cause them to exist.
    	 */
    	return pfn_to_page(pfn);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     * copy one vm_area from one task to the other. Assumes the page tables
     * already present in the new task to be cleared in the whole range
     * covered by this vma.
     */
    
    
    static inline void
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    
    		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
    
    		unsigned long addr, int *rss)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	unsigned long vm_flags = vma->vm_flags;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pte_t pte = *src_pte;
    	struct page *page;
    
    	/* pte contains position in swap or file, so copy. */
    	if (unlikely(!pte_present(pte))) {
    		if (!pte_file(pte)) {
    
    			swp_entry_t entry = pte_to_swp_entry(pte);
    
    			swap_duplicate(entry);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			/* make sure dst_mm is on swapoff's mmlist. */
    			if (unlikely(list_empty(&dst_mm->mmlist))) {
    				spin_lock(&mmlist_lock);
    
    				if (list_empty(&dst_mm->mmlist))
    					list_add(&dst_mm->mmlist,
    						 &src_mm->mmlist);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				spin_unlock(&mmlist_lock);
    			}
    
    			if (is_write_migration_entry(entry) &&
    					is_cow_mapping(vm_flags)) {
    				/*
    				 * COW mappings require pages in both parent
    				 * and child to be set to read.
    				 */
    				make_migration_entry_read(&entry);
    				pte = swp_entry_to_pte(entry);
    				set_pte_at(src_mm, addr, src_pte, pte);
    			}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		}
    
    		goto out_set_pte;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	/*
    	 * If it's a COW mapping, write protect it both
    	 * in the parent and the child
    	 */
    
    	if (is_cow_mapping(vm_flags)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		ptep_set_wrprotect(src_mm, addr, src_pte);
    
    		pte = pte_wrprotect(pte);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	/*
    	 * If it's a shared mapping, mark it clean in
    	 * the child
    	 */
    	if (vm_flags & VM_SHARED)
    		pte = pte_mkclean(pte);
    	pte = pte_mkold(pte);
    
    
    	page = vm_normal_page(vma, addr, pte);
    	if (page) {
    		get_page(page);
    
    Nicholas Piggin's avatar
    Nicholas Piggin committed
    		page_dup_rmap(page, vma, addr);
    
    		rss[!!PageAnon(page)]++;
    	}
    
    
    out_set_pte:
    	set_pte_at(dst_mm, addr, dst_pte, pte);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    		pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
    		unsigned long addr, unsigned long end)
    {
    	pte_t *src_pte, *dst_pte;
    
    	spinlock_t *src_ptl, *dst_ptl;
    
    	int progress = 0;
    
    	int rss[2];
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    again:
    
    	rss[1] = rss[0] = 0;
    
    	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (!dst_pte)
    		return -ENOMEM;
    	src_pte = pte_offset_map_nested(src_pmd, addr);
    
    	src_ptl = pte_lockptr(src_mm, src_pmd);
    
    	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
    
    	arch_enter_lazy_mmu_mode();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	do {
    		/*
    		 * We are holding two locks at this point - either of them
    		 * could generate latencies in another task on another CPU.
    		 */
    
    		if (progress >= 32) {
    			progress = 0;
    			if (need_resched() ||
    
    			    need_lockbreak(src_ptl) ||
    			    need_lockbreak(dst_ptl))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (pte_none(*src_pte)) {
    			progress++;
    			continue;
    		}
    
    		copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		progress += 8;
    	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
    
    
    	arch_leave_lazy_mmu_mode();
    
    	spin_unlock(src_ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pte_unmap_nested(src_pte - 1);
    
    	add_mm_rss(dst_mm, rss[0], rss[1]);
    
    	pte_unmap_unlock(dst_pte - 1, dst_ptl);
    	cond_resched();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (addr != end)
    		goto again;
    	return 0;
    }
    
    static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
    		unsigned long addr, unsigned long end)
    {
    	pmd_t *src_pmd, *dst_pmd;
    	unsigned long next;
    
    	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
    	if (!dst_pmd)
    		return -ENOMEM;
    	src_pmd = pmd_offset(src_pud, addr);
    	do {
    		next = pmd_addr_end(addr, end);
    		if (pmd_none_or_clear_bad(src_pmd))
    			continue;
    		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
    						vma, addr, next))
    			return -ENOMEM;
    	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
    	return 0;
    }
    
    static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
    		unsigned long addr, unsigned long end)
    {
    	pud_t *src_pud, *dst_pud;
    	unsigned long next;
    
    	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
    	if (!dst_pud)
    		return -ENOMEM;
    	src_pud = pud_offset(src_pgd, addr);
    	do {
    		next = pud_addr_end(addr, end);
    		if (pud_none_or_clear_bad(src_pud))
    			continue;
    		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
    						vma, addr, next))
    			return -ENOMEM;
    	} while (dst_pud++, src_pud++, addr = next, addr != end);
    	return 0;
    }
    
    int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    		struct vm_area_struct *vma)
    {
    	pgd_t *src_pgd, *dst_pgd;
    	unsigned long next;
    	unsigned long addr = vma->vm_start;
    	unsigned long end = vma->vm_end;
    
    
    	/*
    	 * Don't copy ptes where a page fault will fill them correctly.
    	 * Fork becomes much lighter when there are big shared or private
    	 * readonly mappings. The tradeoff is that copy_page_range is more
    	 * efficient than faulting.
    	 */
    
    	if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
    
    		if (!vma->anon_vma)
    			return 0;
    	}
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (is_vm_hugetlb_page(vma))
    		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
    
    	dst_pgd = pgd_offset(dst_mm, addr);
    	src_pgd = pgd_offset(src_mm, addr);
    	do {
    		next = pgd_addr_end(addr, end);
    		if (pgd_none_or_clear_bad(src_pgd))
    			continue;
    		if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
    						vma, addr, next))
    			return -ENOMEM;
    	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
    	return 0;
    }
    
    
    static unsigned long zap_pte_range(struct mmu_gather *tlb,
    
    				struct vm_area_struct *vma, pmd_t *pmd,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				unsigned long addr, unsigned long end,
    
    				long *zap_work, struct zap_details *details)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct mm_struct *mm = tlb->mm;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pte_t *pte;
    
    	spinlock_t *ptl;
    
    	int file_rss = 0;
    	int anon_rss = 0;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
    
    	arch_enter_lazy_mmu_mode();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	do {
    		pte_t ptent = *pte;
    
    		if (pte_none(ptent)) {
    			(*zap_work)--;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			continue;
    
    
    		(*zap_work) -= PAGE_SIZE;
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (pte_present(ptent)) {
    
    			struct page *page;
    
    			page = vm_normal_page(vma, addr, ptent);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			if (unlikely(details) && page) {
    				/*
    				 * unmap_shared_mapping_pages() wants to
    				 * invalidate cache without truncating:
    				 * unmap shared but keep private pages.
    				 */
    				if (details->check_mapping &&
    				    details->check_mapping != page->mapping)
    					continue;
    				/*
    				 * Each page->index must be checked when
    				 * invalidating or truncating nonlinear.
    				 */
    				if (details->nonlinear_vma &&
    				    (page->index < details->first_index ||
    				     page->index > details->last_index))
    					continue;
    			}
    
    			ptent = ptep_get_and_clear_full(mm, addr, pte,
    
    							tlb->fullmm);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tlb_remove_tlb_entry(tlb, pte, addr);
    			if (unlikely(!page))
    				continue;
    			if (unlikely(details) && details->nonlinear_vma
    			    && linear_page_index(details->nonlinear_vma,
    						addr) != page->index)
    
    				set_pte_at(mm, addr, pte,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    					   pgoff_to_pte(page->index));
    			if (PageAnon(page))
    
    				anon_rss--;
    
    			else {
    				if (pte_dirty(ptent))
    					set_page_dirty(page);
    				if (pte_young(ptent))
    
    				file_rss--;
    
    			page_remove_rmap(page, vma);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tlb_remove_page(tlb, page);
    			continue;
    		}
    		/*
    		 * If details->check_mapping, we leave swap entries;
    		 * if details->nonlinear_vma, we leave file entries.
    		 */
    		if (unlikely(details))
    			continue;
    		if (!pte_file(ptent))
    			free_swap_and_cache(pte_to_swp_entry(ptent));
    
    		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
    
    	} while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
    
    	add_mm_rss(mm, file_rss, anon_rss);
    
    	arch_leave_lazy_mmu_mode();
    
    	pte_unmap_unlock(pte - 1, ptl);
    
    static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
    
    				struct vm_area_struct *vma, pud_t *pud,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				unsigned long addr, unsigned long end,
    
    				long *zap_work, struct zap_details *details)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pmd_t *pmd;
    	unsigned long next;
    
    	pmd = pmd_offset(pud, addr);
    	do {
    		next = pmd_addr_end(addr, end);
    
    		if (pmd_none_or_clear_bad(pmd)) {
    			(*zap_work)--;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			continue;
    
    		}
    		next = zap_pte_range(tlb, vma, pmd, addr, next,
    						zap_work, details);
    	} while (pmd++, addr = next, (addr != end && *zap_work > 0));
    
    	return addr;
    
    static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
    
    				struct vm_area_struct *vma, pgd_t *pgd,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				unsigned long addr, unsigned long end,
    
    				long *zap_work, struct zap_details *details)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pud_t *pud;
    	unsigned long next;
    
    	pud = pud_offset(pgd, addr);
    	do {
    		next = pud_addr_end(addr, end);
    
    		if (pud_none_or_clear_bad(pud)) {
    			(*zap_work)--;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			continue;
    
    		}
    		next = zap_pmd_range(tlb, vma, pud, addr, next,
    						zap_work, details);
    	} while (pud++, addr = next, (addr != end && *zap_work > 0));
    
    	return addr;
    
    static unsigned long unmap_page_range(struct mmu_gather *tlb,
    				struct vm_area_struct *vma,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				unsigned long addr, unsigned long end,
    
    				long *zap_work, struct zap_details *details)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pgd_t *pgd;
    	unsigned long next;
    
    	if (details && !details->check_mapping && !details->nonlinear_vma)
    		details = NULL;
    
    	BUG_ON(addr >= end);
    	tlb_start_vma(tlb, vma);
    	pgd = pgd_offset(vma->vm_mm, addr);
    	do {
    		next = pgd_addr_end(addr, end);
    
    		if (pgd_none_or_clear_bad(pgd)) {
    			(*zap_work)--;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			continue;
    
    		}
    		next = zap_pud_range(tlb, vma, pgd, addr, next,
    						zap_work, details);
    	} while (pgd++, addr = next, (addr != end && *zap_work > 0));
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	tlb_end_vma(tlb, vma);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    #ifdef CONFIG_PREEMPT
    # define ZAP_BLOCK_SIZE	(8 * PAGE_SIZE)
    #else
    /* No preempt: go for improved straight-line efficiency */
    # define ZAP_BLOCK_SIZE	(1024 * PAGE_SIZE)
    #endif
    
    /**
     * unmap_vmas - unmap a range of memory covered by a list of vma's
     * @tlbp: address of the caller's struct mmu_gather
     * @vma: the starting vma
     * @start_addr: virtual address at which to start unmapping
     * @end_addr: virtual address at which to end unmapping
     * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
     * @details: details of nonlinear truncation or shared cache invalidation
     *
    
     * Returns the end address of the unmapping (restart addr if interrupted).
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
    
     * Unmap all pages in the vma list.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
    
     * We aim to not hold locks for too long (for scheduling latency reasons).
     * So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * return the ending mmu_gather to the caller.
     *
     * Only addresses between `start' and `end' will be unmapped.
     *
     * The VMA list must be sorted in ascending virtual address order.
     *
     * unmap_vmas() assumes that the caller will flush the whole unmapped address
     * range after unmap_vmas() returns.  So the only responsibility here is to
     * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
     * drops the lock and schedules.
     */
    
    unsigned long unmap_vmas(struct mmu_gather **tlbp,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		struct vm_area_struct *vma, unsigned long start_addr,
    		unsigned long end_addr, unsigned long *nr_accounted,
    		struct zap_details *details)
    {
    
    	long zap_work = ZAP_BLOCK_SIZE;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	unsigned long tlb_start = 0;	/* For tlb_finish_mmu */
    	int tlb_start_valid = 0;
    
    	unsigned long start = start_addr;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
    
    	int fullmm = (*tlbp)->fullmm;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
    		unsigned long end;
    
    		start = max(vma->vm_start, start_addr);
    		if (start >= vma->vm_end)
    			continue;
    		end = min(vma->vm_end, end_addr);
    		if (end <= vma->vm_start)
    			continue;
    
    		if (vma->vm_flags & VM_ACCOUNT)
    			*nr_accounted += (end - start) >> PAGE_SHIFT;
    
    		while (start != end) {
    			if (!tlb_start_valid) {
    				tlb_start = start;
    				tlb_start_valid = 1;
    			}
    
    
    			if (unlikely(is_vm_hugetlb_page(vma))) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				unmap_hugepage_range(vma, start, end);
    
    				zap_work -= (end - start) /
    						(HPAGE_SIZE / PAGE_SIZE);
    				start = end;
    			} else
    				start = unmap_page_range(*tlbp, vma,
    						start, end, &zap_work, details);
    
    			if (zap_work > 0) {
    				BUG_ON(start != end);
    				break;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			}
    
    			tlb_finish_mmu(*tlbp, tlb_start, start);
    
    			if (need_resched() ||
    				(i_mmap_lock && need_lockbreak(i_mmap_lock))) {
    				if (i_mmap_lock) {
    
    					*tlbp = NULL;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    					goto out;
    				}
    				cond_resched();
    			}
    
    
    			*tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tlb_start_valid = 0;
    
    			zap_work = ZAP_BLOCK_SIZE;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		}
    	}
    out:
    
    	return start;	/* which is now the end (or restart) address */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /**
     * zap_page_range - remove user pages in a given range
     * @vma: vm_area_struct holding the applicable pages
     * @address: starting address of pages to zap
     * @size: number of bytes to zap
     * @details: details of nonlinear truncation or shared cache invalidation
     */
    
    unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		unsigned long size, struct zap_details *details)
    {
    	struct mm_struct *mm = vma->vm_mm;
    	struct mmu_gather *tlb;
    	unsigned long end = address + size;
    	unsigned long nr_accounted = 0;
    
    	lru_add_drain();
    	tlb = tlb_gather_mmu(mm, 0);
    
    	update_hiwater_rss(mm);
    
    	end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
    	if (tlb)
    		tlb_finish_mmu(tlb, address, end);
    
    	return end;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /*
     * Do a quick page-table lookup for a single page.
     */
    
    struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
    
    			unsigned int flags)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pgd_t *pgd;
    	pud_t *pud;
    	pmd_t *pmd;
    	pte_t *ptep, pte;
    
    	spinlock_t *ptl;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct page *page;
    
    	struct mm_struct *mm = vma->vm_mm;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
    	if (!IS_ERR(page)) {
    		BUG_ON(flags & FOLL_GET);
    		goto out;
    	}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pgd = pgd_offset(mm, address);
    	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
    
    		goto no_page_table;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	pud = pud_offset(pgd, address);
    	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
    
    		goto no_page_table;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	
    	pmd = pmd_offset(pud, address);
    	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
    
    		goto no_page_table;
    
    	if (pmd_huge(*pmd)) {
    		BUG_ON(flags & FOLL_GET);
    		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		goto out;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (!ptep)
    		goto out;
    
    	pte = *ptep;
    
    	if (!pte_present(pte))
    		goto unlock;
    	if ((flags & FOLL_WRITE) && !pte_write(pte))
    		goto unlock;
    
    	page = vm_normal_page(vma, address, pte);
    	if (unlikely(!page))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	if (flags & FOLL_GET)
    		get_page(page);
    	if (flags & FOLL_TOUCH) {
    		if ((flags & FOLL_WRITE) &&
    		    !pte_dirty(pte) && !PageDirty(page))
    			set_page_dirty(page);
    		mark_page_accessed(page);
    	}
    unlock:
    	pte_unmap_unlock(ptep, ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    out:
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    no_page_table:
    	/*
    	 * When core dumping an enormous anonymous area that nobody
    	 * has touched so far, we don't want to allocate page tables.
    	 */
    	if (flags & FOLL_ANON) {
    
    Nicholas Piggin's avatar
    Nicholas Piggin committed
    		page = ZERO_PAGE(0);
    
    		if (flags & FOLL_GET)
    			get_page(page);
    		BUG_ON(flags & FOLL_WRITE);
    	}
    	return page;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
    		unsigned long start, int len, int write, int force,
    		struct page **pages, struct vm_area_struct **vmas)
    {
    	int i;
    
    	unsigned int vm_flags;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* 
    	 * Require read or write permissions.
    	 * If 'force' is set, we only require the "MAY" flags.
    	 */
    
    	vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
    	vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	i = 0;
    
    	do {
    
    		struct vm_area_struct *vma;
    		unsigned int foll_flags;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    		vma = find_extend_vma(mm, start);
    		if (!vma && in_gate_area(tsk, start)) {
    			unsigned long pg = start & PAGE_MASK;
    			struct vm_area_struct *gate_vma = get_gate_vma(tsk);
    			pgd_t *pgd;
    			pud_t *pud;