Skip to content
Snippets Groups Projects
init_64.c 66.9 KiB
Newer Older
  • Learn to ignore specific revisions
  • Linus Torvalds's avatar
    Linus Torvalds committed
     *  arch/sparc64/mm/init.c
     *
     *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
     *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
     */
     
    
    #include <linux/module.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #include <linux/kernel.h>
    #include <linux/sched.h>
    #include <linux/string.h>
    #include <linux/init.h>
    #include <linux/bootmem.h>
    #include <linux/mm.h>
    #include <linux/hugetlb.h>
    #include <linux/initrd.h>
    #include <linux/swap.h>
    #include <linux/pagemap.h>
    
    #include <linux/poison.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #include <linux/fs.h>
    #include <linux/seq_file.h>
    
    #include <linux/cache.h>
    
    #include <linux/sort.h>
    
    Yinghai Lu's avatar
    Yinghai Lu committed
    #include <linux/memblock.h>
    
    #include <linux/mmzone.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    #include <asm/head.h>
    #include <asm/page.h>
    #include <asm/pgalloc.h>
    #include <asm/pgtable.h>
    #include <asm/oplib.h>
    #include <asm/iommu.h>
    #include <asm/io.h>
    #include <asm/uaccess.h>
    #include <asm/mmu_context.h>
    #include <asm/tlbflush.h>
    #include <asm/dma.h>
    #include <asm/starfire.h>
    #include <asm/tlb.h>
    #include <asm/spitfire.h>
    #include <asm/sections.h>
    
    #include <asm/irq.h>
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    Sam Ravnborg's avatar
    Sam Ravnborg committed
    #include "init_64.h"
    
    unsigned long kern_linear_pte_xor[4] __read_mostly;
    
    /* A bitmap, two bits for every 256MB of physical memory.  These two
     * bits determine what page size we use for kernel linear
     * translations.  They form an index into kern_linear_pte_xor[].  The
     * value in the indexed slot is XOR'd with the TLB miss virtual
     * address to form the resulting TTE.  The mapping is:
     *
     *	0	==>	4MB
     *	1	==>	256MB
     *	2	==>	2GB
     *	3	==>	16GB
     *
     * All sun4v chips support 256MB pages.  Only SPARC-T4 and later
     * support 2GB pages, and hopefully future cpus will support the 16GB
     * pages as well.  For slots 2 and 3, we encode a 256MB TTE xor there
     * if these larger page sizes are not supported by the cpu.
     *
     * It would be nice to determine this from the machine description
     * 'cpu' properties, but we need to have this table setup before the
     * MDESC is initialized.
    
     */
    unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
    
    
    #ifndef CONFIG_DEBUG_PAGEALLOC
    
    /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
     * Space is allocated for this right after the trap table in
     * arch/sparc64/kernel/head.S
    
     */
    extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
    
    static unsigned long cpu_pgsz_mask;
    
    
    static struct linux_prom64_registers pavail[MAX_BANKS];
    static int pavail_ents;
    
    
    static int cmp_p64(const void *a, const void *b)
    {
    	const struct linux_prom64_registers *x = a, *y = b;
    
    	if (x->phys_addr > y->phys_addr)
    		return 1;
    	if (x->phys_addr < y->phys_addr)
    		return -1;
    	return 0;
    }
    
    static void __init read_obp_memory(const char *property,
    				   struct linux_prom64_registers *regs,
    				   int *num_ents)
    {
    
    	phandle node = prom_finddevice("/memory");
    
    	int prop_size = prom_getproplen(node, property);
    	int ents, ret, i;
    
    	ents = prop_size / sizeof(struct linux_prom64_registers);
    	if (ents > MAX_BANKS) {
    		prom_printf("The machine has more %s property entries than "
    			    "this kernel can support (%d).\n",
    			    property, MAX_BANKS);
    		prom_halt();
    	}
    
    	ret = prom_getproperty(node, property, (char *) regs, prop_size);
    	if (ret == -1) {
    
    		prom_printf("Couldn't get %s property from /memory.\n",
    				property);
    
    		prom_halt();
    	}
    
    	/* Sanitize what we got from the firmware, by page aligning
    	 * everything.
    	 */
    	for (i = 0; i < ents; i++) {
    		unsigned long base, size;
    
    		base = regs[i].phys_addr;
    		size = regs[i].reg_size;
    
    		size &= PAGE_MASK;
    		if (base & ~PAGE_MASK) {
    			unsigned long new_base = PAGE_ALIGN(base);
    
    			size -= new_base - base;
    			if ((long) size < 0L)
    				size = 0UL;
    			base = new_base;
    		}
    
    		if (size == 0UL) {
    			/* If it is empty, simply get rid of it.
    			 * This simplifies the logic of the other
    			 * functions that process these arrays.
    			 */
    			memmove(&regs[i], &regs[i + 1],
    				(ents - i - 1) * sizeof(regs[0]));
    
    		regs[i].phys_addr = base;
    		regs[i].reg_size = size;
    
    	sort(regs, ents, sizeof(struct linux_prom64_registers),
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    unsigned long sparc64_valid_addr_bitmap[VALID_ADDR_BITMAP_BYTES /
    					sizeof(unsigned long)];
    
    EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    /* Kernel physical address base and size in bytes.  */
    
    unsigned long kern_base __read_mostly;
    unsigned long kern_size __read_mostly;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    /* Initial ramdisk setup */
    extern unsigned long sparc_ramdisk_image64;
    extern unsigned int sparc_ramdisk_image;
    extern unsigned int sparc_ramdisk_size;
    
    
    struct page *mem_map_zero __read_mostly;
    
    EXPORT_SYMBOL(mem_map_zero);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
    
    unsigned long sparc64_kern_pri_context __read_mostly;
    unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
    unsigned long sparc64_kern_sec_context __read_mostly;
    
    
    int num_kernel_image_mappings;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    #ifdef CONFIG_DEBUG_DCFLUSH
    atomic_t dcpage_flushes = ATOMIC_INIT(0);
    #ifdef CONFIG_SMP
    atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
    #endif
    #endif
    
    
    inline void flush_dcache_page_impl(struct page *page)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	BUG_ON(tlb_type == hypervisor);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #ifdef CONFIG_DEBUG_DCFLUSH
    	atomic_inc(&dcpage_flushes);
    #endif
    
    #ifdef DCACHE_ALIASING_POSSIBLE
    	__flush_dcache_page(page_address(page),
    			    ((tlb_type == spitfire) &&
    			     page_mapping(page) != NULL));
    #else
    	if (page_mapping(page) != NULL &&
    	    tlb_type == spitfire)
    		__flush_icache_page(__pa(page_address(page)));
    #endif
    }
    
    #define PG_dcache_dirty		PG_arch_1
    
    #define PG_dcache_cpu_shift	32UL
    #define PG_dcache_cpu_mask	\
    	((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    #define dcache_dirty_cpu(page) \
    
    	(((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    static inline void set_dcache_dirty(struct page *page, int this_cpu)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	unsigned long mask = this_cpu;
    
    	unsigned long non_cpu_bits;
    
    	non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
    	mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	__asm__ __volatile__("1:\n\t"
    			     "ldx	[%2], %%g7\n\t"
    			     "and	%%g7, %1, %%g1\n\t"
    			     "or	%%g1, %0, %%g1\n\t"
    			     "casx	[%2], %%g7, %%g1\n\t"
    			     "cmp	%%g7, %%g1\n\t"
    			     "bne,pn	%%xcc, 1b\n\t"
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			     : /* no outputs */
    			     : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
    			     : "g1", "g7");
    }
    
    
    static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	unsigned long mask = (1UL << PG_dcache_dirty);
    
    	__asm__ __volatile__("! test_and_clear_dcache_dirty\n"
    			     "1:\n\t"
    			     "ldx	[%2], %%g7\n\t"
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			     "and	%%g1, %3, %%g1\n\t"
    			     "cmp	%%g1, %0\n\t"
    			     "bne,pn	%%icc, 2f\n\t"
    			     " andn	%%g7, %1, %%g1\n\t"
    			     "casx	[%2], %%g7, %%g1\n\t"
    			     "cmp	%%g7, %%g1\n\t"
    			     "bne,pn	%%xcc, 1b\n\t"
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			     "2:"
    			     : /* no outputs */
    			     : "r" (cpu), "r" (mask), "r" (&page->flags),
    
    			       "i" (PG_dcache_cpu_mask),
    			       "i" (PG_dcache_cpu_shift)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			     : "g1", "g7");
    }
    
    
    static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
    {
    	unsigned long tsb_addr = (unsigned long) ent;
    
    
    	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
    
    		tsb_addr = __pa(tsb_addr);
    
    	__tsb_insert(tsb_addr, tag, pte);
    }
    
    
    unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
    
    
    static void flush_dcache(unsigned long pfn)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	struct page *page;
    
    	page = pfn_to_page(pfn);
    
    		pg_flags = page->flags;
    		if (pg_flags & (1UL << PG_dcache_dirty)) {
    
    			int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
    				   PG_dcache_cpu_mask);
    			int this_cpu = get_cpu();
    
    			/* This is just to optimize away some function calls
    			 * in the SMP case.
    			 */
    			if (cpu == this_cpu)
    				flush_dcache_page_impl(page);
    			else
    				smp_flush_dcache_page_impl(page, cpu);
    
    			clear_dcache_dirty_cpu(page, cpu);
    
    			put_cpu();
    		}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    /* mm->context.lock must be held */
    static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
    				    unsigned long tsb_hash_shift, unsigned long address,
    				    unsigned long tte)
    {
    	struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
    	unsigned long tag;
    
    
    	if (unlikely(!tsb))
    		return;
    
    
    	tsb += ((address >> tsb_hash_shift) &
    		(mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
    	tag = (address >> 22UL);
    	tsb_insert(tsb, tag, tte);
    }
    
    
    #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
    static inline bool is_hugetlb_pte(pte_t pte)
    {
    	if ((tlb_type == hypervisor &&
    	     (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
    	    (tlb_type != hypervisor &&
    	     (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
    		return true;
    	return false;
    }
    #endif
    
    
    void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
    
    {
    	struct mm_struct *mm;
    
    	unsigned long flags;
    
    
    	if (tlb_type != hypervisor) {
    		unsigned long pfn = pte_pfn(pte);
    
    		if (pfn_valid(pfn))
    			flush_dcache(pfn);
    	}
    
    
    	spin_lock_irqsave(&mm->context.lock, flags);
    
    
    #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
    
    	if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
    		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, HPAGE_SHIFT,
    					address, pte_val(pte));
    	else
    
    		__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
    					address, pte_val(pte));
    
    
    	spin_unlock_irqrestore(&mm->context.lock, flags);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    void flush_dcache_page(struct page *page)
    {
    
    	struct address_space *mapping;
    	int this_cpu;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	/* Do not bother with the expensive D-cache flush if it
    	 * is merely the zero page.  The 'bigcore' testcase in GDB
    	 * causes this case to run millions of times.
    	 */
    	if (page == ZERO_PAGE(0))
    		return;
    
    	this_cpu = get_cpu();
    
    	mapping = page_mapping(page);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (mapping && !mapping_mapped(mapping)) {
    
    		int dirty = test_bit(PG_dcache_dirty, &page->flags);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (dirty) {
    
    			int dirty_cpu = dcache_dirty_cpu(page);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			if (dirty_cpu == this_cpu)
    				goto out;
    			smp_flush_dcache_page_impl(page, dirty_cpu);
    		}
    		set_dcache_dirty(page, this_cpu);
    	} else {
    		/* We could delay the flush for the !page_mapping
    		 * case too.  But that case is for exec env/arg
    		 * pages and those are %99 certainly going to get
    		 * faulted into the tlb (and thus flushed) anyways.
    		 */
    		flush_dcache_page_impl(page);
    	}
    
    out:
    	put_cpu();
    }
    
    EXPORT_SYMBOL(flush_dcache_page);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    void __kprobes flush_icache_range(unsigned long start, unsigned long end)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	/* Cheetah and Hypervisor platform cpus have coherent I-cache. */
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (tlb_type == spitfire) {
    		unsigned long kaddr;
    
    
    		/* This code only runs on Spitfire cpus so this is
    		 * why we can assume _PAGE_PADDR_4U.
    		 */
    		for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
    			unsigned long paddr, mask = _PAGE_PADDR_4U;
    
    			if (kaddr >= PAGE_OFFSET)
    				paddr = kaddr & mask;
    			else {
    				pgd_t *pgdp = pgd_offset_k(kaddr);
    				pud_t *pudp = pud_offset(pgdp, kaddr);
    				pmd_t *pmdp = pmd_offset(pudp, kaddr);
    				pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
    
    				paddr = pte_val(*ptep) & mask;
    			}
    			__flush_icache_page(paddr);
    		}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    }
    
    EXPORT_SYMBOL(flush_icache_range);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    void mmu_info(struct seq_file *m)
    {
    
    	static const char *pgsz_strings[] = {
    		"8K", "64K", "512K", "4MB", "32MB",
    		"256MB", "2GB", "16GB",
    	};
    	int i, printed;
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (tlb_type == cheetah)
    		seq_printf(m, "MMU Type\t: Cheetah\n");
    	else if (tlb_type == cheetah_plus)
    		seq_printf(m, "MMU Type\t: Cheetah+\n");
    	else if (tlb_type == spitfire)
    		seq_printf(m, "MMU Type\t: Spitfire\n");
    
    	else if (tlb_type == hypervisor)
    		seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	else
    		seq_printf(m, "MMU Type\t: ???\n");
    
    
    	seq_printf(m, "MMU PGSZs\t: ");
    	printed = 0;
    	for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
    		if (cpu_pgsz_mask & (1UL << i)) {
    			seq_printf(m, "%s%s",
    				   printed ? "," : "", pgsz_strings[i]);
    			printed++;
    		}
    	}
    	seq_putc(m, '\n');
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    #ifdef CONFIG_DEBUG_DCFLUSH
    	seq_printf(m, "DCPageFlushes\t: %d\n",
    		   atomic_read(&dcpage_flushes));
    #ifdef CONFIG_SMP
    	seq_printf(m, "DCPageFlushesXC\t: %d\n",
    		   atomic_read(&dcpage_flushes_xcall));
    #endif /* CONFIG_SMP */
    #endif /* CONFIG_DEBUG_DCFLUSH */
    }
    
    
    struct linux_prom_translation prom_trans[512] __read_mostly;
    unsigned int prom_trans_ents __read_mostly;
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    unsigned long kern_locked_tte_data;
    
    
    /* The obp translations are saved based on 8k pagesize, since obp can
     * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
    
     * HI_OBP_ADDRESS range are handled in ktlb.S.
    
    static inline int in_obp_range(unsigned long vaddr)
    {
    	return (vaddr >= LOW_OBP_ADDRESS &&
    		vaddr < HI_OBP_ADDRESS);
    }
    
    
    static int cmp_ptrans(const void *a, const void *b)
    
    	const struct linux_prom_translation *x = a, *y = b;
    
    	if (x->virt > y->virt)
    		return 1;
    	if (x->virt < y->virt)
    		return -1;
    	return 0;
    
    /* Read OBP translations property into 'prom_trans[]'.  */
    
    static void __init read_obp_translations(void)
    
    	int n, node, ents, first, last, i;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	node = prom_finddevice("/virtual-memory");
    	n = prom_getproplen(node, "translations");
    
    		prom_printf("prom_mappings: Couldn't get size.\n");
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		prom_halt();
    	}
    
    	if (unlikely(n > sizeof(prom_trans))) {
    
    		prom_printf("prom_mappings: Size %d is too big.\n", n);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		prom_halt();
    	}
    
    	if ((n = prom_getproperty(node, "translations",
    
    				  (char *)&prom_trans[0],
    				  sizeof(prom_trans))) == -1) {
    
    		prom_printf("prom_mappings: Couldn't get property.\n");
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		prom_halt();
    	}
    
    	n = n / sizeof(struct linux_prom_translation);
    
    	ents = n;
    
    	sort(prom_trans, ents, sizeof(struct linux_prom_translation),
    	     cmp_ptrans, NULL);
    
    	/* Now kick out all the non-OBP entries.  */
    	for (i = 0; i < ents; i++) {
    		if (in_obp_range(prom_trans[i].virt))
    			break;
    	}
    	first = i;
    	for (; i < ents; i++) {
    		if (!in_obp_range(prom_trans[i].virt))
    			break;
    	}
    	last = i;
    
    	for (i = 0; i < (last - first); i++) {
    		struct linux_prom_translation *src = &prom_trans[i + first];
    		struct linux_prom_translation *dest = &prom_trans[i];
    
    		*dest = *src;
    	}
    	for (; i < ents; i++) {
    		struct linux_prom_translation *dest = &prom_trans[i];
    		dest->virt = dest->size = dest->data = 0x0UL;
    	}
    
    	prom_trans_ents = last - first;
    
    	if (tlb_type == spitfire) {
    		/* Clear diag TTE bits. */
    		for (i = 0; i < prom_trans_ents; i++)
    			prom_trans[i].data &= ~0x0003fe0000000000UL;
    	}
    
    
    	/* Force execute bit on.  */
    	for (i = 0; i < prom_trans_ents; i++)
    		prom_trans[i].data |= (tlb_type == hypervisor ?
    				       _PAGE_EXEC_4V : _PAGE_EXEC_4U);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    static void __init hypervisor_tlb_lock(unsigned long vaddr,
    				       unsigned long pte,
    				       unsigned long mmu)
    {
    
    	unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
    
    	if (ret != 0) {
    
    		prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
    
    			    "errors with %lx\n", vaddr, 0, pte, mmu, ret);
    
    static unsigned long kern_large_tte(unsigned long paddr);
    
    
    static void __init remap_kernel(void)
    
    {
    	unsigned long phys_page, tte_vaddr, tte_data;
    
    	int i, tlb_ent = sparc64_highest_locked_tlbent();
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	tte_vaddr = (unsigned long) KERNBASE;
    
    	phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
    
    	tte_data = kern_large_tte(phys_page);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	kern_locked_tte_data = tte_data;
    
    
    	/* Now lock us into the TLBs via Hypervisor or OBP. */
    	if (tlb_type == hypervisor) {
    
    		for (i = 0; i < num_kernel_image_mappings; i++) {
    
    			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
    			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
    
    			tte_vaddr += 0x400000;
    			tte_data += 0x400000;
    
    		for (i = 0; i < num_kernel_image_mappings; i++) {
    			prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
    			prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
    			tte_vaddr += 0x400000;
    			tte_data += 0x400000;
    
    		sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	if (tlb_type == cheetah_plus) {
    		sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
    					    CTX_CHEETAH_PLUS_NUC);
    		sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
    		sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
    	}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    static void __init inherit_prom_mappings(void)
    
    	/* Now fixup OBP's idea about where we really are mapped. */
    
    	printk("Remapping the kernel... ");
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    void prom_world(int enter)
    {
    	if (!enter)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	__asm__ __volatile__("flushw");
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    }
    
    void __flush_dcache_range(unsigned long start, unsigned long end)
    {
    	unsigned long va;
    
    	if (tlb_type == spitfire) {
    		int n = 0;
    
    		for (va = start; va < end; va += 32) {
    			spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
    			if (++n >= 512)
    				break;
    		}
    
    	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		start = __pa(start);
    		end = __pa(end);
    		for (va = start; va < end; va += 32)
    			__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
    					     "membar #Sync"
    					     : /* no outputs */
    					     : "r" (va),
    					       "i" (ASI_DCACHE_INVALIDATE));
    	}
    }
    
    EXPORT_SYMBOL(__flush_dcache_range);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    /* get_new_mmu_context() uses "cache + 1".  */
    DEFINE_SPINLOCK(ctx_alloc_lock);
    unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
    #define MAX_CTX_NR	(1UL << CTX_NR_BITS)
    #define CTX_BMAP_SLOTS	BITS_TO_LONGS(MAX_CTX_NR)
    DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
    
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /* Caller does TLB context flushing on local CPU if necessary.
     * The caller also ensures that CTX_VALID(mm->context) is false.
     *
     * We must be careful about boundary cases so that we never
     * let the user have CTX 0 (nucleus) or we ever use a CTX
     * version of zero (and thus NO_CONTEXT would not be caught
     * by version mis-match tests in mmu_context.h).
    
     *
     * Always invoked with interrupts disabled.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
     */
    void get_new_mmu_context(struct mm_struct *mm)
    {
    	unsigned long ctx, new_ctx;
    	unsigned long orig_pgsz_bits;
    
    	unsigned long flags;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	spin_lock_irqsave(&ctx_alloc_lock, flags);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
    	ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
    	new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	if (new_ctx >= (1 << CTX_NR_BITS)) {
    		new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
    		if (new_ctx >= ctx) {
    			int i;
    			new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
    				CTX_FIRST_VERSION;
    			if (new_ctx == 1)
    				new_ctx = CTX_FIRST_VERSION;
    
    			/* Don't call memset, for 16 entries that's just
    			 * plain silly...
    			 */
    			mmu_context_bmap[0] = 3;
    			mmu_context_bmap[1] = 0;
    			mmu_context_bmap[2] = 0;
    			mmu_context_bmap[3] = 0;
    			for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
    				mmu_context_bmap[i + 0] = 0;
    				mmu_context_bmap[i + 1] = 0;
    				mmu_context_bmap[i + 2] = 0;
    				mmu_context_bmap[i + 3] = 0;
    			}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			goto out;
    		}
    	}
    	mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
    	new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
    out:
    	tlb_context_cache = new_ctx;
    	mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
    
    	spin_unlock_irqrestore(&ctx_alloc_lock, flags);
    
    
    	if (unlikely(new_version))
    		smp_new_mmu_context_version();
    
    static int numa_enabled = 1;
    static int numa_debug;
    
    static int __init early_numa(char *p)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    
    	if (!p)
    		return 0;
    
    	if (strstr(p, "off"))
    		numa_enabled = 0;
    
    	if (strstr(p, "debug"))
    		numa_debug = 1;
    
    	return 0;
    
    early_param("numa", early_numa);
    
    #define numadbg(f, a...) \
    do {	if (numa_debug) \
    		printk(KERN_INFO f, ## a); \
    } while (0)
    
    static void __init find_ramdisk(unsigned long phys_base)
    {
    #ifdef CONFIG_BLK_DEV_INITRD
    	if (sparc_ramdisk_image || sparc_ramdisk_image64) {
    		unsigned long ramdisk_image;
    
    		/* Older versions of the bootloader only supported a
    		 * 32-bit physical address for the ramdisk image
    		 * location, stored at sparc_ramdisk_image.  Newer
    		 * SILO versions set sparc_ramdisk_image to zero and
    		 * provide a full 64-bit physical address at
    		 * sparc_ramdisk_image64.
    		 */
    		ramdisk_image = sparc_ramdisk_image;
    		if (!ramdisk_image)
    			ramdisk_image = sparc_ramdisk_image64;
    
    		/* Another bootloader quirk.  The bootloader normalizes
    		 * the physical address to KERNBASE, so we have to
    		 * factor that back out and add in the lowest valid
    		 * physical page address to get the true physical address.
    		 */
    		ramdisk_image -= KERNBASE;
    		ramdisk_image += phys_base;
    
    
    		numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
    			ramdisk_image, sparc_ramdisk_size);
    
    
    		initrd_start = ramdisk_image;
    		initrd_end = ramdisk_image + sparc_ramdisk_size;
    
    Yinghai Lu's avatar
    Yinghai Lu committed
    		memblock_reserve(initrd_start, sparc_ramdisk_size);
    
    
    		initrd_start += PAGE_OFFSET;
    		initrd_end += PAGE_OFFSET;
    
    struct node_mem_mask {
    	unsigned long mask;
    	unsigned long val;
    };
    static struct node_mem_mask node_masks[MAX_NUMNODES];
    static int num_node_masks;
    
    int numa_cpu_lookup_table[NR_CPUS];
    cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
    
    #ifdef CONFIG_NEED_MULTIPLE_NODES
    
    struct mdesc_mblock {
    	u64	base;
    	u64	size;
    	u64	offset; /* RA-to-PA */
    };
    static struct mdesc_mblock *mblocks;
    static int num_mblocks;
    
    static unsigned long ra_to_pa(unsigned long addr)
    {
    	int i;
    
    	for (i = 0; i < num_mblocks; i++) {
    		struct mdesc_mblock *m = &mblocks[i];
    
    		if (addr >= m->base &&
    		    addr < (m->base + m->size)) {
    			addr += m->offset;
    			break;
    		}
    	}
    	return addr;
    }
    
    static int find_node(unsigned long addr)
    {
    	int i;
    
    	addr = ra_to_pa(addr);
    	for (i = 0; i < num_node_masks; i++) {
    		struct node_mem_mask *p = &node_masks[i];
    
    		if ((addr & p->mask) == p->val)
    			return i;
    	}
    	return -1;
    }
    
    
    static u64 memblock_nid_range(u64 start, u64 end, int *nid)
    
    {
    	*nid = find_node(start);
    	start += PAGE_SIZE;
    	while (start < end) {
    		int n = find_node(start);
    
    		if (n != *nid)
    			break;
    		start += PAGE_SIZE;
    	}
    
    
    	if (start > end)
    		start = end;
    
    
    	return start;
    }
    #endif
    
    /* This must be invoked after performing all of the necessary
    
     * memblock_set_node() calls for 'nid'.  We need to be able to get
    
     * correct data from get_pfn_range_for_nid().
    
    static void __init allocate_node_data(int nid)
    {
    	struct pglist_data *p;
    
    	unsigned long start_pfn, end_pfn;
    
    #ifdef CONFIG_NEED_MULTIPLE_NODES
    
    	paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
    
    	if (!paddr) {
    		prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
    		prom_halt();
    	}
    	NODE_DATA(nid) = __va(paddr);
    	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
    
    
    	NODE_DATA(nid)->node_id = nid;
    
    #endif
    
    	p = NODE_DATA(nid);
    
    	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
    	p->node_start_pfn = start_pfn;
    	p->node_spanned_pages = end_pfn - start_pfn;
    }
    
    static void init_node_masks_nonnuma(void)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	int i;
    
    
    	numadbg("Initializing tables for non-numa.\n");
    
    	node_masks[0].mask = node_masks[0].val = 0;
    	num_node_masks = 1;
    
    	for (i = 0; i < NR_CPUS; i++)
    		numa_cpu_lookup_table[i] = 0;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    	cpumask_setall(&numa_cpumask_lookup_table[0]);
    
    }
    
    #ifdef CONFIG_NEED_MULTIPLE_NODES
    struct pglist_data *node_data[MAX_NUMNODES];
    
    EXPORT_SYMBOL(numa_cpu_lookup_table);
    EXPORT_SYMBOL(numa_cpumask_lookup_table);
    EXPORT_SYMBOL(node_data);
    
    struct mdesc_mlgroup {
    	u64	node;
    	u64	latency;
    	u64	match;
    	u64	mask;
    };
    static struct mdesc_mlgroup *mlgroups;
    static int num_mlgroups;
    
    static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
    				   u32 cfg_handle)
    {
    	u64 arc;
    
    	mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
    		u64 target = mdesc_arc_target(md, arc);
    		const u64 *val;
    
    		val = mdesc_get_property(md, target,
    					 "cfg-handle", NULL);
    		if (val && *val == cfg_handle)
    			return 0;
    	}
    	return -ENODEV;
    }
    
    static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
    				    u32 cfg_handle)
    {
    	u64 arc, candidate, best_latency = ~(u64)0;
    
    	candidate = MDESC_NODE_NULL;
    	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
    		u64 target = mdesc_arc_target(md, arc);
    		const char *name = mdesc_node_name(md, target);
    		const u64 *val;
    
    		if (strcmp(name, "pio-latency-group"))
    			continue;
    
    		val = mdesc_get_property(md, target, "latency", NULL);
    		if (!val)
    			continue;
    
    		if (*val < best_latency) {
    			candidate = target;
    			best_latency = *val;
    		}
    	}
    
    	if (candidate == MDESC_NODE_NULL)
    		return -ENODEV;
    
    	return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
    }
    
    int of_node_to_nid(struct device_node *dp)
    {
    	const struct linux_prom64_registers *regs;
    	struct mdesc_handle *md;
    	u32 cfg_handle;
    	int count, nid;
    	u64 grp;
    
    
    	/* This is the right thing to do on currently supported
    	 * SUN4U NUMA platforms as well, as the PCI controller does
    	 * not sit behind any particular memory controller.
    	 */
    
    	if (!mlgroups)
    		return -1;
    
    	regs = of_get_property(dp, "reg", NULL);
    	if (!regs)
    		return -1;
    
    	cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
    
    	md = mdesc_grab();
    
    	count = 0;
    	nid = -1;
    	mdesc_for_each_node_by_name(md, grp, "group") {
    		if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
    			nid = count;
    			break;
    		}
    		count++;
    	}
    
    	mdesc_release(md);