Skip to content
Snippets Groups Projects
memory.c 58.1 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     *  linux/mm/memory.c
     *
     *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
     */
    
    /*
     * demand-loading started 01.12.91 - seems it is high on the list of
     * things wanted, and it should be easy to implement. - Linus
     */
    
    /*
     * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
     * pages started 02.12.91, seems to work. - Linus.
     *
     * Tested sharing by executing about 30 /bin/sh: under the old kernel it
     * would have taken more than the 6M I have free, but it worked well as
     * far as I could see.
     *
     * Also corrected some "invalidate()"s - I wasn't doing enough of them.
     */
    
    /*
     * Real VM (paging to/from disk) started 18.12.91. Much more work and
     * thought has to go into this. Oh, well..
     * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
     *		Found it. Everything seems to work now.
     * 20.12.91  -  Ok, making the swap-device changeable like the root.
     */
    
    /*
     * 05.04.94  -  Multi-page memory management added for v1.1.
     * 		Idea by Alex Bligh (alex@cconcepts.co.uk)
     *
     * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
     *		(Gerhard.Wichert@pdb.siemens.de)
     *
     * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
     */
    
    #include <linux/kernel_stat.h>
    #include <linux/mm.h>
    #include <linux/hugetlb.h>
    #include <linux/mman.h>
    #include <linux/swap.h>
    #include <linux/highmem.h>
    #include <linux/pagemap.h>
    #include <linux/rmap.h>
    #include <linux/module.h>
    #include <linux/init.h>
    
    #include <asm/pgalloc.h>
    #include <asm/uaccess.h>
    #include <asm/tlb.h>
    #include <asm/tlbflush.h>
    #include <asm/pgtable.h>
    
    #include <linux/swapops.h>
    #include <linux/elf.h>
    
    
    #ifndef CONFIG_NEED_MULTIPLE_NODES
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /* use the per-pgdat data instead for discontigmem - mbligh */
    unsigned long max_mapnr;
    struct page *mem_map;
    
    EXPORT_SYMBOL(max_mapnr);
    EXPORT_SYMBOL(mem_map);
    #endif
    
    unsigned long num_physpages;
    /*
     * A number of key systems in x86 including ioremap() rely on the assumption
     * that high_memory defines the upper bound on direct map memory, then end
     * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
     * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
     * and ZONE_HIGHMEM.
     */
    void * high_memory;
    unsigned long vmalloc_earlyreserve;
    
    EXPORT_SYMBOL(num_physpages);
    EXPORT_SYMBOL(high_memory);
    EXPORT_SYMBOL(vmalloc_earlyreserve);
    
    /*
     * If a p?d_bad entry is found while walking page tables, report
     * the error, before resetting entry to p?d_none.  Usually (but
     * very seldom) called out from the p?d_none_or_clear_bad macros.
     */
    
    void pgd_clear_bad(pgd_t *pgd)
    {
    	pgd_ERROR(*pgd);
    	pgd_clear(pgd);
    }
    
    void pud_clear_bad(pud_t *pud)
    {
    	pud_ERROR(*pud);
    	pud_clear(pud);
    }
    
    void pmd_clear_bad(pmd_t *pmd)
    {
    	pmd_ERROR(*pmd);
    	pmd_clear(pmd);
    }
    
    /*
     * Note: this doesn't free the actual pages themselves. That
     * has been handled earlier when unmapping all the memory regions.
     */
    
    static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct page *page = pmd_page(*pmd);
    	pmd_clear(pmd);
    
    	pte_lock_deinit(page);
    
    	pte_free_tlb(tlb, page);
    	dec_page_state(nr_page_table_pages);
    	tlb->mm->nr_ptes--;
    
    static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
    				unsigned long addr, unsigned long end,
    				unsigned long floor, unsigned long ceiling)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pmd_t *pmd;
    	unsigned long next;
    
    	unsigned long start;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pmd = pmd_offset(pud, addr);
    	do {
    		next = pmd_addr_end(addr, end);
    		if (pmd_none_or_clear_bad(pmd))
    			continue;
    
    		free_pte_range(tlb, pmd);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} while (pmd++, addr = next, addr != end);
    
    
    	start &= PUD_MASK;
    	if (start < floor)
    		return;
    	if (ceiling) {
    		ceiling &= PUD_MASK;
    		if (!ceiling)
    			return;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	if (end - 1 > ceiling - 1)
    		return;
    
    	pmd = pmd_offset(pud, start);
    	pud_clear(pud);
    	pmd_free_tlb(tlb, pmd);
    
    static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
    				unsigned long addr, unsigned long end,
    				unsigned long floor, unsigned long ceiling)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pud_t *pud;
    	unsigned long next;
    
    	unsigned long start;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pud = pud_offset(pgd, addr);
    	do {
    		next = pud_addr_end(addr, end);
    		if (pud_none_or_clear_bad(pud))
    			continue;
    
    		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} while (pud++, addr = next, addr != end);
    
    
    	start &= PGDIR_MASK;
    	if (start < floor)
    		return;
    	if (ceiling) {
    		ceiling &= PGDIR_MASK;
    		if (!ceiling)
    			return;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	if (end - 1 > ceiling - 1)
    		return;
    
    	pud = pud_offset(pgd, start);
    	pgd_clear(pgd);
    	pud_free_tlb(tlb, pud);
    
     * This function frees user-level page tables of a process.
     *
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * Must be called with pagetable lock held.
     */
    
    void free_pgd_range(struct mmu_gather **tlb,
    
    			unsigned long addr, unsigned long end,
    			unsigned long floor, unsigned long ceiling)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pgd_t *pgd;
    	unsigned long next;
    
    	unsigned long start;
    
    	/*
    	 * The next few lines have given us lots of grief...
    	 *
    	 * Why are we testing PMD* at this top level?  Because often
    	 * there will be no work to do at all, and we'd prefer not to
    	 * go all the way down to the bottom just to discover that.
    	 *
    	 * Why all these "- 1"s?  Because 0 represents both the bottom
    	 * of the address space and the top of it (using -1 for the
    	 * top wouldn't help much: the masks would do the wrong thing).
    	 * The rule is that addr 0 and floor 0 refer to the bottom of
    	 * the address space, but end 0 and ceiling 0 refer to the top
    	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
    	 * that end 0 case should be mythical).
    	 *
    	 * Wherever addr is brought up or ceiling brought down, we must
    	 * be careful to reject "the opposite 0" before it confuses the
    	 * subsequent tests.  But what about where end is brought down
    	 * by PMD_SIZE below? no, end can't go down to 0 there.
    	 *
    	 * Whereas we round start (addr) and ceiling down, by different
    	 * masks at different levels, in order to test whether a table
    	 * now has no other vmas using it, so can be freed, we don't
    	 * bother to round floor or end up - the tests don't need that.
    	 */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	addr &= PMD_MASK;
    	if (addr < floor) {
    		addr += PMD_SIZE;
    		if (!addr)
    			return;
    	}
    	if (ceiling) {
    		ceiling &= PMD_MASK;
    		if (!ceiling)
    			return;
    	}
    	if (end - 1 > ceiling - 1)
    		end -= PMD_SIZE;
    	if (addr > end - 1)
    		return;
    
    	start = addr;
    
    	pgd = pgd_offset((*tlb)->mm, addr);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	do {
    		next = pgd_addr_end(addr, end);
    		if (pgd_none_or_clear_bad(pgd))
    			continue;
    
    		free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} while (pgd++, addr = next, addr != end);
    
    	if (!(*tlb)->fullmm)
    
    		flush_tlb_pgtables((*tlb)->mm, start, end);
    
    }
    
    void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
    
    		unsigned long floor, unsigned long ceiling)
    
    {
    	while (vma) {
    		struct vm_area_struct *next = vma->vm_next;
    		unsigned long addr = vma->vm_start;
    
    
    		/*
    		 * Hide vma from rmap and vmtruncate before freeing pgtables
    		 */
    		anon_vma_unlink(vma);
    		unlink_file_vma(vma);
    
    
    		if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
    			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
    
    				floor, next? next->vm_start: ceiling);
    
    		} else {
    			/*
    			 * Optimization: gather nearby vmas into one call down
    			 */
    			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
    			  && !is_hugepage_only_range(vma->vm_mm, next->vm_start,
    							HPAGE_SIZE)) {
    				vma = next;
    				next = vma->vm_next;
    
    				anon_vma_unlink(vma);
    				unlink_file_vma(vma);
    
    			}
    			free_pgd_range(tlb, addr, vma->vm_end,
    				floor, next? next->vm_start: ceiling);
    		}
    
    int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct page *new = pte_alloc_one(mm, address);
    
    	if (!new)
    		return -ENOMEM;
    
    
    	pte_lock_init(new);
    
    	spin_lock(&mm->page_table_lock);
    
    	if (pmd_present(*pmd)) {	/* Another has populated it */
    		pte_lock_deinit(new);
    
    		pte_free(new);
    
    	} else {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		mm->nr_ptes++;
    		inc_page_state(nr_page_table_pages);
    		pmd_populate(mm, pmd, new);
    	}
    
    	spin_unlock(&mm->page_table_lock);
    
    int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	pte_t *new = pte_alloc_one_kernel(&init_mm, address);
    	if (!new)
    		return -ENOMEM;
    
    	spin_lock(&init_mm.page_table_lock);
    	if (pmd_present(*pmd))		/* Another has populated it */
    		pte_free_kernel(new);
    	else
    		pmd_populate_kernel(&init_mm, pmd, new);
    	spin_unlock(&init_mm.page_table_lock);
    	return 0;
    
    static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
    {
    	if (file_rss)
    		add_mm_counter(mm, file_rss, file_rss);
    	if (anon_rss)
    		add_mm_counter(mm, anon_rss, anon_rss);
    }
    
    
    /*
     * This function is called to print an error when a pte in a
     * !VM_RESERVED region is found pointing to an invalid pfn (which
     * is an error.
     *
     * The calling function must still handle the error.
     */
    void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
    {
    	printk(KERN_ERR "Bad pte = %08llx, process = %s, "
    			"vm_flags = %lx, vaddr = %lx\n",
    		(long long)pte_val(pte),
    		(vma->vm_mm == current->mm ? current->comm : "???"),
    		vma->vm_flags, vaddr);
    	dump_stack();
    }
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     * copy one vm_area from one task to the other. Assumes the page tables
     * already present in the new task to be cleared in the whole range
     * covered by this vma.
     */
    
    
    static inline void
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    
    		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
    
    		unsigned long addr, int *rss)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	unsigned long vm_flags = vma->vm_flags;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pte_t pte = *src_pte;
    	struct page *page;
    	unsigned long pfn;
    
    	/* pte contains position in swap or file, so copy. */
    	if (unlikely(!pte_present(pte))) {
    		if (!pte_file(pte)) {
    			swap_duplicate(pte_to_swp_entry(pte));
    			/* make sure dst_mm is on swapoff's mmlist. */
    			if (unlikely(list_empty(&dst_mm->mmlist))) {
    				spin_lock(&mmlist_lock);
    				list_add(&dst_mm->mmlist, &src_mm->mmlist);
    				spin_unlock(&mmlist_lock);
    			}
    		}
    
    		goto out_set_pte;
    
    	/* If the region is VM_RESERVED, the mapping is not
    	 * mapped via rmap - duplicate the pte as is.
    	 */
    	if (vm_flags & VM_RESERVED)
    		goto out_set_pte;
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pfn = pte_pfn(pte);
    
    	/* If the pte points outside of valid memory but
    	 * the region is not VM_RESERVED, we have a problem.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 */
    
    	if (unlikely(!pfn_valid(pfn))) {
    		print_bad_pte(vma, pte, addr);
    		goto out_set_pte; /* try to do something sane */
    	}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	page = pfn_to_page(pfn);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/*
    	 * If it's a COW mapping, write protect it both
    	 * in the parent and the child
    	 */
    	if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
    		ptep_set_wrprotect(src_mm, addr, src_pte);
    		pte = *src_pte;
    	}
    
    	/*
    	 * If it's a shared mapping, mark it clean in
    	 * the child
    	 */
    	if (vm_flags & VM_SHARED)
    		pte = pte_mkclean(pte);
    	pte = pte_mkold(pte);
    	get_page(page);
    	page_dup_rmap(page);
    
    	rss[!!PageAnon(page)]++;
    
    
    out_set_pte:
    	set_pte_at(dst_mm, addr, dst_pte, pte);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    		pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
    		unsigned long addr, unsigned long end)
    {
    	pte_t *src_pte, *dst_pte;
    
    	spinlock_t *src_ptl, *dst_ptl;
    
    	int progress = 0;
    
    	int rss[2];
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    again:
    
    	rss[1] = rss[0] = 0;
    
    	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (!dst_pte)
    		return -ENOMEM;
    	src_pte = pte_offset_map_nested(src_pmd, addr);
    
    	src_ptl = pte_lockptr(src_mm, src_pmd);
    
    	spin_lock(src_ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	do {
    		/*
    		 * We are holding two locks at this point - either of them
    		 * could generate latencies in another task on another CPU.
    		 */
    
    		if (progress >= 32) {
    			progress = 0;
    			if (need_resched() ||
    
    			    need_lockbreak(src_ptl) ||
    			    need_lockbreak(dst_ptl))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (pte_none(*src_pte)) {
    			progress++;
    			continue;
    		}
    
    		copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		progress += 8;
    	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
    
    
    	spin_unlock(src_ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pte_unmap_nested(src_pte - 1);
    
    	add_mm_rss(dst_mm, rss[0], rss[1]);
    
    	pte_unmap_unlock(dst_pte - 1, dst_ptl);
    	cond_resched();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (addr != end)
    		goto again;
    	return 0;
    }
    
    static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
    		unsigned long addr, unsigned long end)
    {
    	pmd_t *src_pmd, *dst_pmd;
    	unsigned long next;
    
    	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
    	if (!dst_pmd)
    		return -ENOMEM;
    	src_pmd = pmd_offset(src_pud, addr);
    	do {
    		next = pmd_addr_end(addr, end);
    		if (pmd_none_or_clear_bad(src_pmd))
    			continue;
    		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
    						vma, addr, next))
    			return -ENOMEM;
    	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
    	return 0;
    }
    
    static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
    		unsigned long addr, unsigned long end)
    {
    	pud_t *src_pud, *dst_pud;
    	unsigned long next;
    
    	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
    	if (!dst_pud)
    		return -ENOMEM;
    	src_pud = pud_offset(src_pgd, addr);
    	do {
    		next = pud_addr_end(addr, end);
    		if (pud_none_or_clear_bad(src_pud))
    			continue;
    		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
    						vma, addr, next))
    			return -ENOMEM;
    	} while (dst_pud++, src_pud++, addr = next, addr != end);
    	return 0;
    }
    
    int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
    		struct vm_area_struct *vma)
    {
    	pgd_t *src_pgd, *dst_pgd;
    	unsigned long next;
    	unsigned long addr = vma->vm_start;
    	unsigned long end = vma->vm_end;
    
    
    	/*
    	 * Don't copy ptes where a page fault will fill them correctly.
    	 * Fork becomes much lighter when there are big shared or private
    	 * readonly mappings. The tradeoff is that copy_page_range is more
    	 * efficient than faulting.
    	 */
    	if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) {
    		if (!vma->anon_vma)
    			return 0;
    	}
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (is_vm_hugetlb_page(vma))
    		return copy_hugetlb_page_range(dst_mm, src_mm, vma);
    
    	dst_pgd = pgd_offset(dst_mm, addr);
    	src_pgd = pgd_offset(src_mm, addr);
    	do {
    		next = pgd_addr_end(addr, end);
    		if (pgd_none_or_clear_bad(src_pgd))
    			continue;
    		if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
    						vma, addr, next))
    			return -ENOMEM;
    	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
    	return 0;
    }
    
    
    static void zap_pte_range(struct mmu_gather *tlb,
    				struct vm_area_struct *vma, pmd_t *pmd,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				unsigned long addr, unsigned long end,
    				struct zap_details *details)
    {
    
    	struct mm_struct *mm = tlb->mm;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pte_t *pte;
    
    	spinlock_t *ptl;
    
    	int file_rss = 0;
    	int anon_rss = 0;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	do {
    		pte_t ptent = *pte;
    		if (pte_none(ptent))
    			continue;
    		if (pte_present(ptent)) {
    			struct page *page = NULL;
    
    			if (!(vma->vm_flags & VM_RESERVED)) {
    				unsigned long pfn = pte_pfn(ptent);
    				if (unlikely(!pfn_valid(pfn)))
    					print_bad_pte(vma, ptent, addr);
    				else
    					page = pfn_to_page(pfn);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			}
    			if (unlikely(details) && page) {
    				/*
    				 * unmap_shared_mapping_pages() wants to
    				 * invalidate cache without truncating:
    				 * unmap shared but keep private pages.
    				 */
    				if (details->check_mapping &&
    				    details->check_mapping != page->mapping)
    					continue;
    				/*
    				 * Each page->index must be checked when
    				 * invalidating or truncating nonlinear.
    				 */
    				if (details->nonlinear_vma &&
    				    (page->index < details->first_index ||
    				     page->index > details->last_index))
    					continue;
    			}
    
    			ptent = ptep_get_and_clear_full(mm, addr, pte,
    
    							tlb->fullmm);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tlb_remove_tlb_entry(tlb, pte, addr);
    			if (unlikely(!page))
    				continue;
    			if (unlikely(details) && details->nonlinear_vma
    			    && linear_page_index(details->nonlinear_vma,
    						addr) != page->index)
    
    				set_pte_at(mm, addr, pte,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    					   pgoff_to_pte(page->index));
    			if (PageAnon(page))
    
    				anon_rss--;
    
    			else {
    				if (pte_dirty(ptent))
    					set_page_dirty(page);
    				if (pte_young(ptent))
    					mark_page_accessed(page);
    
    				file_rss--;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			page_remove_rmap(page);
    			tlb_remove_page(tlb, page);
    			continue;
    		}
    		/*
    		 * If details->check_mapping, we leave swap entries;
    		 * if details->nonlinear_vma, we leave file entries.
    		 */
    		if (unlikely(details))
    			continue;
    		if (!pte_file(ptent))
    			free_swap_and_cache(pte_to_swp_entry(ptent));
    
    		pte_clear_full(mm, addr, pte, tlb->fullmm);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} while (pte++, addr += PAGE_SIZE, addr != end);
    
    	add_mm_rss(mm, file_rss, anon_rss);
    
    	pte_unmap_unlock(pte - 1, ptl);
    
    static inline void zap_pmd_range(struct mmu_gather *tlb,
    				struct vm_area_struct *vma, pud_t *pud,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				unsigned long addr, unsigned long end,
    				struct zap_details *details)
    {
    	pmd_t *pmd;
    	unsigned long next;
    
    	pmd = pmd_offset(pud, addr);
    	do {
    		next = pmd_addr_end(addr, end);
    		if (pmd_none_or_clear_bad(pmd))
    			continue;
    
    		zap_pte_range(tlb, vma, pmd, addr, next, details);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} while (pmd++, addr = next, addr != end);
    }
    
    
    static inline void zap_pud_range(struct mmu_gather *tlb,
    				struct vm_area_struct *vma, pgd_t *pgd,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				unsigned long addr, unsigned long end,
    				struct zap_details *details)
    {
    	pud_t *pud;
    	unsigned long next;
    
    	pud = pud_offset(pgd, addr);
    	do {
    		next = pud_addr_end(addr, end);
    		if (pud_none_or_clear_bad(pud))
    			continue;
    
    		zap_pmd_range(tlb, vma, pud, addr, next, details);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} while (pud++, addr = next, addr != end);
    }
    
    static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
    				unsigned long addr, unsigned long end,
    				struct zap_details *details)
    {
    	pgd_t *pgd;
    	unsigned long next;
    
    	if (details && !details->check_mapping && !details->nonlinear_vma)
    		details = NULL;
    
    	BUG_ON(addr >= end);
    	tlb_start_vma(tlb, vma);
    	pgd = pgd_offset(vma->vm_mm, addr);
    	do {
    		next = pgd_addr_end(addr, end);
    		if (pgd_none_or_clear_bad(pgd))
    			continue;
    
    		zap_pud_range(tlb, vma, pgd, addr, next, details);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} while (pgd++, addr = next, addr != end);
    	tlb_end_vma(tlb, vma);
    }
    
    #ifdef CONFIG_PREEMPT
    # define ZAP_BLOCK_SIZE	(8 * PAGE_SIZE)
    #else
    /* No preempt: go for improved straight-line efficiency */
    # define ZAP_BLOCK_SIZE	(1024 * PAGE_SIZE)
    #endif
    
    /**
     * unmap_vmas - unmap a range of memory covered by a list of vma's
     * @tlbp: address of the caller's struct mmu_gather
     * @vma: the starting vma
     * @start_addr: virtual address at which to start unmapping
     * @end_addr: virtual address at which to end unmapping
     * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
     * @details: details of nonlinear truncation or shared cache invalidation
     *
    
     * Returns the end address of the unmapping (restart addr if interrupted).
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
    
     * Unmap all pages in the vma list.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     *
    
     * We aim to not hold locks for too long (for scheduling latency reasons).
     * So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * return the ending mmu_gather to the caller.
     *
     * Only addresses between `start' and `end' will be unmapped.
     *
     * The VMA list must be sorted in ascending virtual address order.
     *
     * unmap_vmas() assumes that the caller will flush the whole unmapped address
     * range after unmap_vmas() returns.  So the only responsibility here is to
     * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
     * drops the lock and schedules.
     */
    
    unsigned long unmap_vmas(struct mmu_gather **tlbp,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		struct vm_area_struct *vma, unsigned long start_addr,
    		unsigned long end_addr, unsigned long *nr_accounted,
    		struct zap_details *details)
    {
    	unsigned long zap_bytes = ZAP_BLOCK_SIZE;
    	unsigned long tlb_start = 0;	/* For tlb_finish_mmu */
    	int tlb_start_valid = 0;
    
    	unsigned long start = start_addr;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
    
    	int fullmm = (*tlbp)->fullmm;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
    		unsigned long end;
    
    		start = max(vma->vm_start, start_addr);
    		if (start >= vma->vm_end)
    			continue;
    		end = min(vma->vm_end, end_addr);
    		if (end <= vma->vm_start)
    			continue;
    
    		if (vma->vm_flags & VM_ACCOUNT)
    			*nr_accounted += (end - start) >> PAGE_SHIFT;
    
    		while (start != end) {
    			unsigned long block;
    
    			if (!tlb_start_valid) {
    				tlb_start = start;
    				tlb_start_valid = 1;
    			}
    
    			if (is_vm_hugetlb_page(vma)) {
    				block = end - start;
    				unmap_hugepage_range(vma, start, end);
    			} else {
    				block = min(zap_bytes, end - start);
    				unmap_page_range(*tlbp, vma, start,
    						start + block, details);
    			}
    
    			start += block;
    			zap_bytes -= block;
    			if ((long)zap_bytes > 0)
    				continue;
    
    			tlb_finish_mmu(*tlbp, tlb_start, start);
    
    			if (need_resched() ||
    				(i_mmap_lock && need_lockbreak(i_mmap_lock))) {
    				if (i_mmap_lock) {
    
    					*tlbp = NULL;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    					goto out;
    				}
    				cond_resched();
    			}
    
    
    			*tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			tlb_start_valid = 0;
    			zap_bytes = ZAP_BLOCK_SIZE;
    		}
    	}
    out:
    
    	return start;	/* which is now the end (or restart) address */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /**
     * zap_page_range - remove user pages in a given range
     * @vma: vm_area_struct holding the applicable pages
     * @address: starting address of pages to zap
     * @size: number of bytes to zap
     * @details: details of nonlinear truncation or shared cache invalidation
     */
    
    unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		unsigned long size, struct zap_details *details)
    {
    	struct mm_struct *mm = vma->vm_mm;
    	struct mmu_gather *tlb;
    	unsigned long end = address + size;
    	unsigned long nr_accounted = 0;
    
    	lru_add_drain();
    	tlb = tlb_gather_mmu(mm, 0);
    
    	update_hiwater_rss(mm);
    
    	end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
    	if (tlb)
    		tlb_finish_mmu(tlb, address, end);
    
    	return end;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /*
     * Do a quick page-table lookup for a single page.
     */
    
    struct page *follow_page(struct mm_struct *mm, unsigned long address,
    			unsigned int flags)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pgd_t *pgd;
    	pud_t *pud;
    	pmd_t *pmd;
    	pte_t *ptep, pte;
    
    	spinlock_t *ptl;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	unsigned long pfn;
    	struct page *page;
    
    
    	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
    	if (!IS_ERR(page)) {
    		BUG_ON(flags & FOLL_GET);
    		goto out;
    	}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pgd = pgd_offset(mm, address);
    	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
    
    		goto no_page_table;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	pud = pud_offset(pgd, address);
    	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
    
    		goto no_page_table;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	
    	pmd = pmd_offset(pud, address);
    	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
    
    		goto no_page_table;
    
    	if (pmd_huge(*pmd)) {
    		BUG_ON(flags & FOLL_GET);
    		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		goto out;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (!ptep)
    		goto out;
    
    	pte = *ptep;
    
    	if (!pte_present(pte))
    		goto unlock;
    	if ((flags & FOLL_WRITE) && !pte_write(pte))
    		goto unlock;
    	pfn = pte_pfn(pte);
    	if (!pfn_valid(pfn))
    		goto unlock;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	page = pfn_to_page(pfn);
    	if (flags & FOLL_GET)
    		get_page(page);
    	if (flags & FOLL_TOUCH) {
    		if ((flags & FOLL_WRITE) &&
    		    !pte_dirty(pte) && !PageDirty(page))
    			set_page_dirty(page);
    		mark_page_accessed(page);
    	}
    unlock:
    	pte_unmap_unlock(ptep, ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    out:
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    no_page_table:
    	/*
    	 * When core dumping an enormous anonymous area that nobody
    	 * has touched so far, we don't want to allocate page tables.
    	 */
    	if (flags & FOLL_ANON) {
    		page = ZERO_PAGE(address);
    		if (flags & FOLL_GET)
    			get_page(page);
    		BUG_ON(flags & FOLL_WRITE);
    	}
    	return page;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
    		unsigned long start, int len, int write, int force,
    		struct page **pages, struct vm_area_struct **vmas)
    {
    	int i;
    
    	unsigned int vm_flags;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* 
    	 * Require read or write permissions.
    	 * If 'force' is set, we only require the "MAY" flags.
    	 */
    
    	vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
    	vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	i = 0;
    
    	do {
    
    		struct vm_area_struct *vma;
    		unsigned int foll_flags;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    		vma = find_extend_vma(mm, start);
    		if (!vma && in_gate_area(tsk, start)) {
    			unsigned long pg = start & PAGE_MASK;
    			struct vm_area_struct *gate_vma = get_gate_vma(tsk);
    			pgd_t *pgd;
    			pud_t *pud;
    			pmd_t *pmd;
    			pte_t *pte;
    			if (write) /* user gate pages are read-only */
    				return i ? : -EFAULT;
    			if (pg > TASK_SIZE)
    				pgd = pgd_offset_k(pg);
    			else
    				pgd = pgd_offset_gate(mm, pg);
    			BUG_ON(pgd_none(*pgd));
    			pud = pud_offset(pgd, pg);
    			BUG_ON(pud_none(*pud));
    			pmd = pmd_offset(pud, pg);
    
    			if (pmd_none(*pmd))
    				return i ? : -EFAULT;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			pte = pte_offset_map(pmd, pg);
    
    			if (pte_none(*pte)) {
    				pte_unmap(pte);
    				return i ? : -EFAULT;
    			}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			if (pages) {
    				pages[i] = pte_page(*pte);
    				get_page(pages[i]);
    			}
    			pte_unmap(pte);
    			if (vmas)
    				vmas[i] = gate_vma;
    			i++;
    			start += PAGE_SIZE;
    			len--;
    			continue;
    		}
    
    
    		if (!vma || (vma->vm_flags & (VM_IO | VM_RESERVED))
    
    				|| !(vm_flags & vma->vm_flags))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			return i ? : -EFAULT;
    
    		if (is_vm_hugetlb_page(vma)) {
    			i = follow_hugetlb_page(mm, vma, pages, vmas,
    						&start, &len, i);
    			continue;
    		}
    
    
    		foll_flags = FOLL_TOUCH;
    		if (pages)
    			foll_flags |= FOLL_GET;
    		if (!write && !(vma->vm_flags & VM_LOCKED) &&
    		    (!vma->vm_ops || !vma->vm_ops->nopage))
    			foll_flags |= FOLL_ANON;
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		do {
    
    			struct page *page;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    			if (write)
    				foll_flags |= FOLL_WRITE;
    
    			cond_resched();
    			while (!(page = follow_page(mm, start, foll_flags))) {
    				int ret;
    				ret = __handle_mm_fault(mm, vma, start,
    						foll_flags & FOLL_WRITE);
    
    				/*
    				 * The VM_FAULT_WRITE bit tells us that do_wp_page has
    				 * broken COW when necessary, even if maybe_mkwrite
    				 * decided not to set pte_write. We can thus safely do
    				 * subsequent page lookups as if they were reads.
    				 */
    				if (ret & VM_FAULT_WRITE)
    
    					foll_flags &= ~FOLL_WRITE;
    
    				
    				switch (ret & ~VM_FAULT_WRITE) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    				case VM_FAULT_MINOR:
    					tsk->min_flt++;
    					break;
    				case VM_FAULT_MAJOR:
    					tsk->maj_flt++;
    					break;
    				case VM_FAULT_SIGBUS:
    					return i ? i : -EFAULT;
    				case VM_FAULT_OOM:
    					return i ? i : -ENOMEM;
    				default:
    					BUG();
    				}
    			}
    			if (pages) {