Skip to content
Snippets Groups Projects
memory.c 58.4 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
    					BUG();
    				}
    				spin_lock(&mm->page_table_lock);
    			}
    			if (pages) {
    
    				pages[i] = page;
    				flush_dcache_page(page);
    
    				page_cache_get(page);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			}
    			if (vmas)
    				vmas[i] = vma;
    			i++;
    			start += PAGE_SIZE;
    			len--;
    
    		} while (len && start < vma->vm_end);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		spin_unlock(&mm->page_table_lock);
    
    	} while (len);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return i;
    }
    EXPORT_SYMBOL(get_user_pages);
    
    static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
    			unsigned long addr, unsigned long end, pgprot_t prot)
    {
    	pte_t *pte;
    
    	spinlock_t *ptl;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (!pte)
    		return -ENOMEM;
    	do {
    
    		struct page *page = ZERO_PAGE(addr);
    		pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
    		page_cache_get(page);
    		page_add_file_rmap(page);
    		inc_mm_counter(mm, file_rss);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		BUG_ON(!pte_none(*pte));
    		set_pte_at(mm, addr, pte, zero_pte);
    	} while (pte++, addr += PAGE_SIZE, addr != end);
    
    	pte_unmap_unlock(pte - 1, ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return 0;
    }
    
    static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
    			unsigned long addr, unsigned long end, pgprot_t prot)
    {
    	pmd_t *pmd;
    	unsigned long next;
    
    	pmd = pmd_alloc(mm, pud, addr);
    	if (!pmd)
    		return -ENOMEM;
    	do {
    		next = pmd_addr_end(addr, end);
    		if (zeromap_pte_range(mm, pmd, addr, next, prot))
    			return -ENOMEM;
    	} while (pmd++, addr = next, addr != end);
    	return 0;
    }
    
    static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
    			unsigned long addr, unsigned long end, pgprot_t prot)
    {
    	pud_t *pud;
    	unsigned long next;
    
    	pud = pud_alloc(mm, pgd, addr);
    	if (!pud)
    		return -ENOMEM;
    	do {
    		next = pud_addr_end(addr, end);
    		if (zeromap_pmd_range(mm, pud, addr, next, prot))
    			return -ENOMEM;
    	} while (pud++, addr = next, addr != end);
    	return 0;
    }
    
    int zeromap_page_range(struct vm_area_struct *vma,
    			unsigned long addr, unsigned long size, pgprot_t prot)
    {
    	pgd_t *pgd;
    	unsigned long next;
    	unsigned long end = addr + size;
    	struct mm_struct *mm = vma->vm_mm;
    	int err;
    
    	BUG_ON(addr >= end);
    	pgd = pgd_offset(mm, addr);
    	flush_cache_range(vma, addr, end);
    	do {
    		next = pgd_addr_end(addr, end);
    		err = zeromap_pud_range(mm, pgd, addr, next, prot);
    		if (err)
    			break;
    	} while (pgd++, addr = next, addr != end);
    	return err;
    }
    
    /*
     * maps a range of physical memory into the requested pages. the old
     * mappings are removed. any references to nonexistent pages results
     * in null mappings (currently treated as "copy-on-access")
     */
    static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
    			unsigned long addr, unsigned long end,
    			unsigned long pfn, pgprot_t prot)
    {
    	pte_t *pte;
    
    	spinlock_t *ptl;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (!pte)
    		return -ENOMEM;
    	do {
    		BUG_ON(!pte_none(*pte));
    
    		set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		pfn++;
    	} while (pte++, addr += PAGE_SIZE, addr != end);
    
    	pte_unmap_unlock(pte - 1, ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return 0;
    }
    
    static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
    			unsigned long addr, unsigned long end,
    			unsigned long pfn, pgprot_t prot)
    {
    	pmd_t *pmd;
    	unsigned long next;
    
    	pfn -= addr >> PAGE_SHIFT;
    	pmd = pmd_alloc(mm, pud, addr);
    	if (!pmd)
    		return -ENOMEM;
    	do {
    		next = pmd_addr_end(addr, end);
    		if (remap_pte_range(mm, pmd, addr, next,
    				pfn + (addr >> PAGE_SHIFT), prot))
    			return -ENOMEM;
    	} while (pmd++, addr = next, addr != end);
    	return 0;
    }
    
    static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
    			unsigned long addr, unsigned long end,
    			unsigned long pfn, pgprot_t prot)
    {
    	pud_t *pud;
    	unsigned long next;
    
    	pfn -= addr >> PAGE_SHIFT;
    	pud = pud_alloc(mm, pgd, addr);
    	if (!pud)
    		return -ENOMEM;
    	do {
    		next = pud_addr_end(addr, end);
    		if (remap_pmd_range(mm, pud, addr, next,
    				pfn + (addr >> PAGE_SHIFT), prot))
    			return -ENOMEM;
    	} while (pud++, addr = next, addr != end);
    	return 0;
    }
    
    /*  Note: this is only safe if the mm semaphore is held when called. */
    int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
    		    unsigned long pfn, unsigned long size, pgprot_t prot)
    {
    	pgd_t *pgd;
    	unsigned long next;
    
    	unsigned long end = addr + PAGE_ALIGN(size);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct mm_struct *mm = vma->vm_mm;
    	int err;
    
    	/*
    	 * Physically remapped pages are special. Tell the
    	 * rest of the world about it:
    	 *   VM_IO tells people not to look at these pages
    	 *	(accesses can have side effects).
    
    	 *   VM_RESERVED tells the core MM not to "manage" these pages
             *	(e.g. refcount, mapcount, try to swap them out).
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 */
    	vma->vm_flags |= VM_IO | VM_RESERVED;
    
    	BUG_ON(addr >= end);
    	pfn -= addr >> PAGE_SHIFT;
    	pgd = pgd_offset(mm, addr);
    	flush_cache_range(vma, addr, end);
    	do {
    		next = pgd_addr_end(addr, end);
    		err = remap_pud_range(mm, pgd, addr, next,
    				pfn + (addr >> PAGE_SHIFT), prot);
    		if (err)
    			break;
    	} while (pgd++, addr = next, addr != end);
    	return err;
    }
    EXPORT_SYMBOL(remap_pfn_range);
    
    
    /*
     * handle_pte_fault chooses page fault handler according to an entry
     * which was read non-atomically.  Before making any commitment, on
     * those architectures or configurations (e.g. i386 with PAE) which
     * might give a mix of unmatched parts, do_swap_page and do_file_page
     * must check under lock before unmapping the pte and proceeding
     * (but do_wp_page is only called after already making such a check;
     * and do_anonymous_page and do_no_page can safely check later on).
     */
    static inline int pte_unmap_same(struct mm_struct *mm,
    				pte_t *page_table, pte_t orig_pte)
    {
    	int same = 1;
    #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
    	if (sizeof(pte_t) > sizeof(unsigned long)) {
    		spin_lock(&mm->page_table_lock);
    		same = pte_same(*page_table, orig_pte);
    		spin_unlock(&mm->page_table_lock);
    	}
    #endif
    	pte_unmap(page_table);
    	return same;
    }
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /*
     * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
     * servicing faults for write access.  In the normal case, do always want
     * pte_mkwrite.  But get_user_pages can cause write faults for mappings
     * that do not have writing enabled, when used by access_process_vm.
     */
    static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
    {
    	if (likely(vma->vm_flags & VM_WRITE))
    		pte = pte_mkwrite(pte);
    	return pte;
    }
    
    /*
     * This routine handles present pages, when users try to write
     * to a shared page. It is done by copying the page to a new address
     * and decrementing the shared-page counter for the old page.
     *
     * Note that this routine assumes that the protection checks have been
     * done by the caller (the low-level page fault routine in most cases).
     * Thus we can safely just mark it writable once we've done any necessary
     * COW.
     *
     * We also mark the page dirty at this point even though the page will
     * change only once the write actually happens. This avoids a few races,
     * and potentially makes it more efficient.
     *
    
     * We enter with non-exclusive mmap_sem (to exclude vma changes,
     * but allow concurrent faults), with pte both mapped and locked.
     * We return with mmap_sem still held, but pte unmapped and unlocked.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    
    static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
    		unsigned long address, pte_t *page_table, pmd_t *pmd,
    
    		spinlock_t *ptl, pte_t orig_pte)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	struct page *old_page, *new_page;
    
    	unsigned long pfn = pte_pfn(orig_pte);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pte_t entry;
    
    	int ret = VM_FAULT_MINOR;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	BUG_ON(vma->vm_flags & VM_RESERVED);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (unlikely(!pfn_valid(pfn))) {
    		/*
    
    		 * Page table corrupted: show pte and kill process.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		 */
    
    		print_bad_pte(vma, orig_pte, address);
    
    		ret = VM_FAULT_OOM;
    		goto unlock;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    	old_page = pfn_to_page(pfn);
    
    
    	if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		int reuse = can_share_swap_page(old_page);
    		unlock_page(old_page);
    		if (reuse) {
    			flush_cache_page(vma, address, pfn);
    
    			entry = pte_mkyoung(orig_pte);
    			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			ptep_set_access_flags(vma, address, page_table, entry, 1);
    			update_mmu_cache(vma, address, entry);
    			lazy_mmu_prot_update(entry);
    
    			ret |= VM_FAULT_WRITE;
    			goto unlock;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		}
    	}
    
    	/*
    	 * Ok, we need to copy. Oh, well..
    	 */
    
    	page_cache_get(old_page);
    
    	pte_unmap_unlock(page_table, ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	if (unlikely(anon_vma_prepare(vma)))
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (old_page == ZERO_PAGE(address)) {
    		new_page = alloc_zeroed_user_highpage(vma, address);
    		if (!new_page)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} else {
    		new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
    		if (!new_page)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		copy_user_highpage(new_page, old_page, address);
    	}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	/*
    	 * Re-check the pte - we dropped the lock
    	 */
    
    	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
    
    	if (likely(pte_same(*page_table, orig_pte))) {
    
    		page_remove_rmap(old_page);
    		if (!PageAnon(old_page)) {
    
    			inc_mm_counter(mm, anon_rss);
    
    			dec_mm_counter(mm, file_rss);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		flush_cache_page(vma, address, pfn);
    
    		entry = mk_pte(new_page, vma->vm_page_prot);
    		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
    		ptep_establish(vma, address, page_table, entry);
    		update_mmu_cache(vma, address, entry);
    		lazy_mmu_prot_update(entry);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		lru_cache_add_active(new_page);
    		page_add_anon_rmap(new_page, vma, address);
    
    		/* Free the old page.. */
    		new_page = old_page;
    
    		ret |= VM_FAULT_WRITE;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    	page_cache_release(new_page);
    	page_cache_release(old_page);
    
    	pte_unmap_unlock(page_table, ptl);
    
    	return ret;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	page_cache_release(old_page);
    	return VM_FAULT_OOM;
    }
    
    /*
     * Helper functions for unmap_mapping_range().
     *
     * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
     *
     * We have to restart searching the prio_tree whenever we drop the lock,
     * since the iterator is only valid while the lock is held, and anyway
     * a later vma might be split and reinserted earlier while lock dropped.
     *
     * The list of nonlinear vmas could be handled more efficiently, using
     * a placeholder, but handle it in the same way until a need is shown.
     * It is important to search the prio_tree before nonlinear list: a vma
     * may become nonlinear and be shifted from prio_tree to nonlinear list
     * while the lock is dropped; but never shifted from list to prio_tree.
     *
     * In order to make forward progress despite restarting the search,
     * vm_truncate_count is used to mark a vma as now dealt with, so we can
     * quickly skip it next time around.  Since the prio_tree search only
     * shows us those vmas affected by unmapping the range in question, we
     * can't efficiently keep all vmas in step with mapping->truncate_count:
     * so instead reset them all whenever it wraps back to 0 (then go to 1).
     * mapping->truncate_count and vma->vm_truncate_count are protected by
     * i_mmap_lock.
     *
     * In order to make forward progress despite repeatedly restarting some
    
     * large vma, note the restart_addr from unmap_vmas when it breaks out:
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * and restart from that address when we reach that vma again.  It might
     * have been split or merged, shrunk or extended, but never shifted: so
     * restart_addr remains valid so long as it remains in the vma's range.
     * unmap_mapping_range forces truncate_count to leap over page-aligned
     * values so we can save vma's restart_addr in its truncate_count field.
     */
    #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
    
    static void reset_vma_truncate_counts(struct address_space *mapping)
    {
    	struct vm_area_struct *vma;
    	struct prio_tree_iter iter;
    
    	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
    		vma->vm_truncate_count = 0;
    	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
    		vma->vm_truncate_count = 0;
    }
    
    static int unmap_mapping_range_vma(struct vm_area_struct *vma,
    		unsigned long start_addr, unsigned long end_addr,
    		struct zap_details *details)
    {
    	unsigned long restart_addr;
    	int need_break;
    
    again:
    	restart_addr = vma->vm_truncate_count;
    	if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
    		start_addr = restart_addr;
    		if (start_addr >= end_addr) {
    			/* Top of vma has been split off since last time */
    			vma->vm_truncate_count = details->truncate_count;
    			return 0;
    		}
    	}
    
    
    	restart_addr = zap_page_range(vma, start_addr,
    					end_addr - start_addr, details);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	need_break = need_resched() ||
    			need_lockbreak(details->i_mmap_lock);
    
    
    	if (restart_addr >= end_addr) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		/* We have now completed this vma: mark it so */
    		vma->vm_truncate_count = details->truncate_count;
    		if (!need_break)
    			return 0;
    	} else {
    		/* Note restart_addr in vma's truncate_count field */
    
    		vma->vm_truncate_count = restart_addr;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (!need_break)
    			goto again;
    	}
    
    	spin_unlock(details->i_mmap_lock);
    	cond_resched();
    	spin_lock(details->i_mmap_lock);
    	return -EINTR;
    }
    
    static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
    					    struct zap_details *details)
    {
    	struct vm_area_struct *vma;
    	struct prio_tree_iter iter;
    	pgoff_t vba, vea, zba, zea;
    
    restart:
    	vma_prio_tree_foreach(vma, &iter, root,
    			details->first_index, details->last_index) {
    		/* Skip quickly over those we have already dealt with */
    		if (vma->vm_truncate_count == details->truncate_count)
    			continue;
    
    		vba = vma->vm_pgoff;
    		vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
    		/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
    		zba = details->first_index;
    		if (zba < vba)
    			zba = vba;
    		zea = details->last_index;
    		if (zea > vea)
    			zea = vea;
    
    		if (unmap_mapping_range_vma(vma,
    			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
    			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
    				details) < 0)
    			goto restart;
    	}
    }
    
    static inline void unmap_mapping_range_list(struct list_head *head,
    					    struct zap_details *details)
    {
    	struct vm_area_struct *vma;
    
    	/*
    	 * In nonlinear VMAs there is no correspondence between virtual address
    	 * offset and file offset.  So we must perform an exhaustive search
    	 * across *all* the pages in each nonlinear VMA, not just the pages
    	 * whose virtual address lies outside the file truncation point.
    	 */
    restart:
    	list_for_each_entry(vma, head, shared.vm_set.list) {
    		/* Skip quickly over those we have already dealt with */
    		if (vma->vm_truncate_count == details->truncate_count)
    			continue;
    		details->nonlinear_vma = vma;
    		if (unmap_mapping_range_vma(vma, vma->vm_start,
    					vma->vm_end, details) < 0)
    			goto restart;
    	}
    }
    
    /**
     * unmap_mapping_range - unmap the portion of all mmaps
     * in the specified address_space corresponding to the specified
     * page range in the underlying file.
    
     * @mapping: the address space containing mmaps to be unmapped.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     * @holebegin: byte in first page to unmap, relative to the start of
     * the underlying file.  This will be rounded down to a PAGE_SIZE
     * boundary.  Note that this is different from vmtruncate(), which
     * must keep the partial page.  In contrast, we must get rid of
     * partial pages.
     * @holelen: size of prospective hole in bytes.  This will be rounded
     * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
     * end of the file.
     * @even_cows: 1 when truncating a file, unmap even private COWed pages;
     * but 0 when invalidating pagecache, don't throw away private data.
     */
    void unmap_mapping_range(struct address_space *mapping,
    		loff_t const holebegin, loff_t const holelen, int even_cows)
    {
    	struct zap_details details;
    	pgoff_t hba = holebegin >> PAGE_SHIFT;
    	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
    
    	/* Check for overflow. */
    	if (sizeof(holelen) > sizeof(hlen)) {
    		long long holeend =
    			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
    		if (holeend & ~(long long)ULONG_MAX)
    			hlen = ULONG_MAX - hba + 1;
    	}
    
    	details.check_mapping = even_cows? NULL: mapping;
    	details.nonlinear_vma = NULL;
    	details.first_index = hba;
    	details.last_index = hba + hlen - 1;
    	if (details.last_index < details.first_index)
    		details.last_index = ULONG_MAX;
    	details.i_mmap_lock = &mapping->i_mmap_lock;
    
    	spin_lock(&mapping->i_mmap_lock);
    
    	/* serialize i_size write against truncate_count write */
    	smp_wmb();
    	/* Protect against page faults, and endless unmapping loops */
    	mapping->truncate_count++;
    	/*
    	 * For archs where spin_lock has inclusive semantics like ia64
    	 * this smp_mb() will prevent to read pagetable contents
    	 * before the truncate_count increment is visible to
    	 * other cpus.
    	 */
    	smp_mb();
    	if (unlikely(is_restart_addr(mapping->truncate_count))) {
    		if (mapping->truncate_count == 0)
    			reset_vma_truncate_counts(mapping);
    		mapping->truncate_count++;
    	}
    	details.truncate_count = mapping->truncate_count;
    
    	if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
    		unmap_mapping_range_tree(&mapping->i_mmap, &details);
    	if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
    		unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
    	spin_unlock(&mapping->i_mmap_lock);
    }
    EXPORT_SYMBOL(unmap_mapping_range);
    
    /*
     * Handle all mappings that got truncated by a "truncate()"
     * system call.
     *
     * NOTE! We have to be ready to update the memory sharing
     * between the file and the memory map for a potential last
     * incomplete page.  Ugly, but necessary.
     */
    int vmtruncate(struct inode * inode, loff_t offset)
    {
    	struct address_space *mapping = inode->i_mapping;
    	unsigned long limit;
    
    	if (inode->i_size < offset)
    		goto do_expand;
    	/*
    	 * truncation of in-use swapfiles is disallowed - it would cause
    	 * subsequent swapout to scribble on the now-freed blocks.
    	 */
    	if (IS_SWAPFILE(inode))
    		goto out_busy;
    	i_size_write(inode, offset);
    	unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
    	truncate_inode_pages(mapping, offset);
    	goto out_truncate;
    
    do_expand:
    	limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
    	if (limit != RLIM_INFINITY && offset > limit)
    		goto out_sig;
    	if (offset > inode->i_sb->s_maxbytes)
    		goto out_big;
    	i_size_write(inode, offset);
    
    out_truncate:
    	if (inode->i_op && inode->i_op->truncate)
    		inode->i_op->truncate(inode);
    	return 0;
    out_sig:
    	send_sig(SIGXFSZ, current, 0);
    out_big:
    	return -EFBIG;
    out_busy:
    	return -ETXTBSY;
    }
    
    EXPORT_SYMBOL(vmtruncate);
    
    /* 
     * Primitive swap readahead code. We simply read an aligned block of
     * (1 << page_cluster) entries in the swap area. This method is chosen
     * because it doesn't cost us any seek time.  We also make sure to queue
     * the 'original' request together with the readahead ones...  
     *
     * This has been extended to use the NUMA policies from the mm triggering
     * the readahead.
     *
     * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
     */
    void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
    {
    #ifdef CONFIG_NUMA
    	struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
    #endif
    	int i, num;
    	struct page *new_page;
    	unsigned long offset;
    
    	/*
    	 * Get the number of handles we should do readahead io to.
    	 */
    	num = valid_swaphandles(entry, &offset);
    	for (i = 0; i < num; offset++, i++) {
    		/* Ok, do the async read-ahead now */
    		new_page = read_swap_cache_async(swp_entry(swp_type(entry),
    							   offset), vma, addr);
    		if (!new_page)
    			break;
    		page_cache_release(new_page);
    #ifdef CONFIG_NUMA
    		/*
    		 * Find the next applicable VMA for the NUMA policy.
    		 */
    		addr += PAGE_SIZE;
    		if (addr == 0)
    			vma = NULL;
    		if (vma) {
    			if (addr >= vma->vm_end) {
    				vma = next_vma;
    				next_vma = vma ? vma->vm_next : NULL;
    			}
    			if (vma && addr < vma->vm_start)
    				vma = NULL;
    		} else {
    			if (next_vma && addr >= next_vma->vm_start) {
    				vma = next_vma;
    				next_vma = vma->vm_next;
    			}
    		}
    #endif
    	}
    	lru_add_drain();	/* Push any new pages onto the LRU now */
    }
    
    /*
    
     * We enter with non-exclusive mmap_sem (to exclude vma changes,
     * but allow concurrent faults), and pte mapped but not yet locked.
     * We return with mmap_sem still held, but pte unmapped and unlocked.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    
    static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
    		unsigned long address, pte_t *page_table, pmd_t *pmd,
    		int write_access, pte_t orig_pte)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	spinlock_t *ptl;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct page *page;
    
    	swp_entry_t entry;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pte_t pte;
    	int ret = VM_FAULT_MINOR;
    
    
    	if (!pte_unmap_same(mm, page_table, orig_pte))
    		goto out;
    
    
    	entry = pte_to_swp_entry(orig_pte);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	page = lookup_swap_cache(entry);
    	if (!page) {
     		swapin_readahead(entry, address, vma);
     		page = read_swap_cache_async(entry, vma, address);
    		if (!page) {
    			/*
    
    			 * Back out if somebody else faulted in this pte
    			 * while we released the pte lock.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			 */
    
    			page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			if (likely(pte_same(*page_table, orig_pte)))
    				ret = VM_FAULT_OOM;
    
    			goto unlock;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		}
    
    		/* Had to read the page from swap area: Major fault */
    		ret = VM_FAULT_MAJOR;
    		inc_page_state(pgmajfault);
    		grab_swap_token();
    	}
    
    	mark_page_accessed(page);
    	lock_page(page);
    
    	/*
    
    	 * Back out if somebody else already faulted in this pte.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 */
    
    	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
    
    	if (unlikely(!pte_same(*page_table, orig_pte)))
    
    		goto out_nomap;
    
    	if (unlikely(!PageUptodate(page))) {
    		ret = VM_FAULT_SIGBUS;
    		goto out_nomap;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	/* The page isn't present yet, go ahead with the fault. */
    
    
    	inc_mm_counter(mm, anon_rss);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pte = mk_pte(page, vma->vm_page_prot);
    	if (write_access && can_share_swap_page(page)) {
    		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
    		write_access = 0;
    	}
    
    	flush_icache_page(vma, page);
    	set_pte_at(mm, address, page_table, pte);
    	page_add_anon_rmap(page, vma, address);
    
    
    	swap_free(entry);
    	if (vm_swap_full())
    		remove_exclusive_swap_page(page);
    	unlock_page(page);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (write_access) {
    		if (do_wp_page(mm, vma, address,
    
    				page_table, pmd, ptl, pte) == VM_FAULT_OOM)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			ret = VM_FAULT_OOM;
    		goto out;
    	}
    
    	/* No need to invalidate - it was non-present before */
    	update_mmu_cache(vma, address, pte);
    	lazy_mmu_prot_update(pte);
    
    	pte_unmap_unlock(page_table, ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    out:
    	return ret;
    
    	pte_unmap_unlock(page_table, ptl);
    
    	unlock_page(page);
    	page_cache_release(page);
    
     * We enter with non-exclusive mmap_sem (to exclude vma changes,
     * but allow concurrent faults), and pte mapped but not yet locked.
     * We return with mmap_sem still held, but pte unmapped and unlocked.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    
    static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
    		unsigned long address, pte_t *page_table, pmd_t *pmd,
    		int write_access)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct page *page;
    	spinlock_t *ptl;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	pte_t entry;
    
    	if (write_access) {
    		/* Allocate our own private page. */
    		pte_unmap(page_table);
    
    		if (unlikely(anon_vma_prepare(vma)))
    
    			goto oom;
    		page = alloc_zeroed_user_highpage(vma, address);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (!page)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    		entry = mk_pte(page, vma->vm_page_prot);
    		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
    
    
    		page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
    		if (!pte_none(*page_table))
    			goto release;
    		inc_mm_counter(mm, anon_rss);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		lru_cache_add_active(page);
    		SetPageReferenced(page);
    
    		page_add_anon_rmap(page, vma, address);
    
    		/* Map the ZERO_PAGE - vm_page_prot is readonly */
    		page = ZERO_PAGE(address);
    		page_cache_get(page);
    		entry = mk_pte(page, vma->vm_page_prot);
    
    		ptl = &mm->page_table_lock;
    		spin_lock(ptl);
    		if (!pte_none(*page_table))
    			goto release;
    
    		inc_mm_counter(mm, file_rss);
    		page_add_file_rmap(page);
    
    	set_pte_at(mm, address, page_table, entry);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	/* No need to invalidate - it was non-present before */
    
    	update_mmu_cache(vma, address, entry);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	lazy_mmu_prot_update(entry);
    
    	pte_unmap_unlock(page_table, ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return VM_FAULT_MINOR;
    
    release:
    	page_cache_release(page);
    	goto unlock;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return VM_FAULT_OOM;
    }
    
    /*
     * do_no_page() tries to create a new page mapping. It aggressively
     * tries to share with existing pages, but makes a separate copy if
     * the "write_access" parameter is true in order to avoid the next
     * page fault.
     *
     * As this is called only for pages that do not currently exist, we
     * do not need to flush old virtual caches or the TLB.
     *
    
     * We enter with non-exclusive mmap_sem (to exclude vma changes,
     * but allow concurrent faults), and pte mapped but not yet locked.
     * We return with mmap_sem still held, but pte unmapped and unlocked.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    
    static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
    		unsigned long address, pte_t *page_table, pmd_t *pmd,
    		int write_access)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	spinlock_t *ptl;
    
    	struct page *new_page;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	struct address_space *mapping = NULL;
    	pte_t entry;
    	unsigned int sequence = 0;
    	int ret = VM_FAULT_MINOR;
    	int anon = 0;
    
    	pte_unmap(page_table);
    
    	if (vma->vm_file) {
    		mapping = vma->vm_file->f_mapping;
    		sequence = mapping->truncate_count;
    		smp_rmb(); /* serializes i_size against truncate_count */
    	}
    retry:
    	new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
    	/*
    	 * No smp_rmb is needed here as long as there's a full
    	 * spin_lock/unlock sequence inside the ->nopage callback
    	 * (for the pagecache lookup) that acts as an implicit
    	 * smp_mb() and prevents the i_size read to happen
    	 * after the next truncate_count read.
    	 */
    
    	/* no page was available -- either SIGBUS or OOM */
    	if (new_page == NOPAGE_SIGBUS)
    		return VM_FAULT_SIGBUS;
    	if (new_page == NOPAGE_OOM)
    		return VM_FAULT_OOM;
    
    	/*
    	 * Should we do an early C-O-W break?
    	 */
    	if (write_access && !(vma->vm_flags & VM_SHARED)) {
    		struct page *page;
    
    		if (unlikely(anon_vma_prepare(vma)))
    			goto oom;
    		page = alloc_page_vma(GFP_HIGHUSER, vma, address);
    		if (!page)
    			goto oom;
    		copy_user_highpage(page, new_page, address);
    		page_cache_release(new_page);
    		new_page = page;
    		anon = 1;
    	}
    
    
    	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	/*
    	 * For a file-backed vma, someone could have truncated or otherwise
    	 * invalidated this page.  If unmap_mapping_range got called,
    	 * retry getting the page.
    	 */
    	if (mapping && unlikely(sequence != mapping->truncate_count)) {
    
    		pte_unmap_unlock(page_table, ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		page_cache_release(new_page);
    
    		cond_resched();
    		sequence = mapping->truncate_count;
    		smp_rmb();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		goto retry;
    	}
    
    	/*
    	 * This silly early PAGE_DIRTY setting removes a race
    	 * due to the bad i386 page protection. But it's valid
    	 * for other architectures too.
    	 *
    	 * Note that if write_access is true, we either now have
    	 * an exclusive copy of the page, or this is a shared mapping,
    	 * so we can make it writable and dirty to avoid having to
    	 * handle that later.
    	 */
    	/* Only go through if we didn't race with anybody else... */
    	if (pte_none(*page_table)) {
    		flush_icache_page(vma, new_page);
    		entry = mk_pte(new_page, vma->vm_page_prot);
    		if (write_access)
    			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
    		set_pte_at(mm, address, page_table, entry);
    		if (anon) {
    
    			inc_mm_counter(mm, anon_rss);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			lru_cache_add_active(new_page);
    			page_add_anon_rmap(new_page, vma, address);
    
    		} else if (!(vma->vm_flags & VM_RESERVED)) {
    
    			inc_mm_counter(mm, file_rss);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			page_add_file_rmap(new_page);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	} else {
    		/* One of our sibling threads was faster, back out. */
    		page_cache_release(new_page);
    
    		goto unlock;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	/* no need to invalidate: a not-present page shouldn't be cached */
    	update_mmu_cache(vma, address, entry);
    	lazy_mmu_prot_update(entry);
    
    	pte_unmap_unlock(page_table, ptl);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	return ret;
    oom:
    	page_cache_release(new_page);
    
    	return VM_FAULT_OOM;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    /*
     * Fault of a previously existing named mapping. Repopulate the pte
     * from the encoded file_pte if possible. This enables swappable
     * nonlinear vmas.
    
     *
     * We enter with non-exclusive mmap_sem (to exclude vma changes,
     * but allow concurrent faults), and pte mapped but not yet locked.
     * We return with mmap_sem still held, but pte unmapped and unlocked.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    
    static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
    		unsigned long address, pte_t *page_table, pmd_t *pmd,
    		int write_access, pte_t orig_pte)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	pgoff_t pgoff;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	int err;
    
    
    	if (!pte_unmap_same(mm, page_table, orig_pte))
    		return VM_FAULT_MINOR;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
    		/*
    		 * Page table corrupted: show pte and kill process.
    		 */
    
    		print_bad_pte(vma, orig_pte, address);
    
    		return VM_FAULT_OOM;
    	}
    	/* We can then assume vm->vm_ops && vma->vm_ops->populate */
    
    	pgoff = pte_to_pgoff(orig_pte);
    	err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
    					vma->vm_page_prot, pgoff, 0);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (err == -ENOMEM)
    		return VM_FAULT_OOM;
    	if (err)
    		return VM_FAULT_SIGBUS;
    	return VM_FAULT_MAJOR;
    }
    
    /*
     * These routines also need to handle stuff like marking pages dirty
     * and/or accessed for architectures that don't do it in hardware (most
     * RISC architectures).  The early dirtying is also good on the i386.
     *
     * There is also a hook called "update_mmu_cache()" that architectures
     * with external mmu caches can use to update those (ie the Sparc or
     * PowerPC hashed page tables that act as extended TLBs).
     *
    
     * We enter with non-exclusive mmap_sem (to exclude vma changes,
     * but allow concurrent faults), and pte mapped but not yet locked.
     * We return with mmap_sem still held, but pte unmapped and unlocked.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    static inline int handle_pte_fault(struct mm_struct *mm,
    
    		struct vm_area_struct *vma, unsigned long address,
    		pte_t *pte, pmd_t *pmd, int write_access)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	pte_t entry;
    
    	spinlock_t *ptl;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	entry = *pte;
    	if (!pte_present(entry)) {
    
    		if (pte_none(entry)) {
    			if (!vma->vm_ops || !vma->vm_ops->nopage)
    				return do_anonymous_page(mm, vma, address,
    					pte, pmd, write_access);
    			return do_no_page(mm, vma, address,
    					pte, pmd, write_access);
    		}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (pte_file(entry))
    
    			return do_file_page(mm, vma, address,
    					pte, pmd, write_access, entry);