Skip to content
Snippets Groups Projects
io_apic.c 93.9 KiB
Newer Older
  • Learn to ignore specific revisions
  • 	if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
    		reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		io_apic_write(dev->id, 0, reg_00.raw);
    	}
    	spin_unlock_irqrestore(&ioapic_lock, flags);
    
    	for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
    
    		ioapic_write_entry(dev->id, i, entry[i]);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	return 0;
    }
    
    static struct sysdev_class ioapic_sysdev_class = {
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	.suspend = ioapic_suspend,
    	.resume = ioapic_resume,
    };
    
    static int __init ioapic_init_sysfs(void)
    {
    
    	struct sys_device * dev;
    	int i, size, error;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	error = sysdev_class_register(&ioapic_sysdev_class);
    	if (error)
    		return error;
    
    
    	for (i = 0; i < nr_ioapics; i++ ) {
    
    		size = sizeof(struct sys_device) + nr_ioapic_registers[i]
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			* sizeof(struct IO_APIC_route_entry);
    
    		mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		if (!mp_ioapic_data[i]) {
    			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
    			continue;
    		}
    		dev = &mp_ioapic_data[i]->dev;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    		dev->cls = &ioapic_sysdev_class;
    		error = sysdev_register(dev);
    		if (error) {
    			kfree(mp_ioapic_data[i]);
    			mp_ioapic_data[i] = NULL;
    			printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
    			continue;
    		}
    	}
    
    	return 0;
    }
    
    device_initcall(ioapic_init_sysfs);
    
    
     * Dynamic irq allocate and deallocation
    
    unsigned int create_irq_nr(unsigned int irq_want)
    
    	unsigned int irq;
    	unsigned int new;
    
    	unsigned long flags;
    
    	struct irq_cfg *cfg_new;
    
    #ifndef CONFIG_HAVE_SPARSE_IRQ
    
    	irq_want = nr_irqs - 1;
    
    	spin_lock_irqsave(&vector_lock, flags);
    
    	for (new = irq_want; new > 0; new--) {
    
    		cfg_new = irq_cfg(new);
    		if (cfg_new && cfg_new->vector != 0)
    
    		/* check if need to create one */
    
    		if (!cfg_new)
    			cfg_new = irq_cfg_alloc(new);
    
    		if (__assign_irq_vector(new, TARGET_CPUS) == 0)
    
    			irq = new;
    		break;
    	}
    	spin_unlock_irqrestore(&vector_lock, flags);
    
    	if (irq > 0) {
    
    int create_irq(void)
    {
    
    	int irq;
    
    	irq = create_irq_nr(nr_irqs - 1);
    
    	if (irq == 0)
    		irq = -1;
    
    	return irq;
    
    void destroy_irq(unsigned int irq)
    {
    	unsigned long flags;
    
    	dynamic_irq_cleanup(irq);
    
    
    #ifdef CONFIG_INTR_REMAP
    	free_irte(irq);
    #endif
    
    	spin_lock_irqsave(&vector_lock, flags);
    
    	__clear_irq_vector(irq);
    
    	spin_unlock_irqrestore(&vector_lock, flags);
    }
    
    
    Simon Arlott's avatar
    Simon Arlott committed
     * MSI message composition
    
    static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
    
    	struct irq_cfg *cfg;
    	int err;
    
    	cpumask_t tmp;
    
    	tmp = TARGET_CPUS;
    	err = assign_irq_vector(irq, tmp);
    	if (err)
    		return err;
    
    	cfg = irq_cfg(irq);
    	cpus_and(tmp, cfg->domain, tmp);
    	dest = cpu_mask_to_apicid(tmp);
    
    
    #ifdef CONFIG_INTR_REMAP
    	if (irq_remapped(irq)) {
    		struct irte irte;
    		int ir_index;
    		u16 sub_handle;
    
    		ir_index = map_irq_to_irte_handle(irq, &sub_handle);
    		BUG_ON(ir_index == -1);
    
    		memset (&irte, 0, sizeof(irte));
    
    		irte.present = 1;
    		irte.dst_mode = INT_DEST_MODE;
    		irte.trigger_mode = 0; /* edge */
    		irte.dlvry_mode = INT_DELIVERY_MODE;
    		irte.vector = cfg->vector;
    		irte.dest_id = IRTE_DEST(dest);
    
    		modify_irte(irq, &irte);
    
    		msg->address_hi = MSI_ADDR_BASE_HI;
    		msg->data = sub_handle;
    		msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
    				  MSI_ADDR_IR_SHV |
    				  MSI_ADDR_IR_INDEX1(ir_index) |
    				  MSI_ADDR_IR_INDEX2(ir_index);
    	} else
    #endif
    	{
    		msg->address_hi = MSI_ADDR_BASE_HI;
    		msg->address_lo =
    			MSI_ADDR_BASE_LO |
    			((INT_DEST_MODE == 0) ?
    				MSI_ADDR_DEST_MODE_PHYSICAL:
    				MSI_ADDR_DEST_MODE_LOGICAL) |
    			((INT_DELIVERY_MODE != dest_LowestPrio) ?
    				MSI_ADDR_REDIRECTION_CPU:
    				MSI_ADDR_REDIRECTION_LOWPRI) |
    			MSI_ADDR_DEST_ID(dest);
    
    		msg->data =
    			MSI_DATA_TRIGGER_EDGE |
    			MSI_DATA_LEVEL_ASSERT |
    			((INT_DELIVERY_MODE != dest_LowestPrio) ?
    				MSI_DATA_DELIVERY_FIXED:
    				MSI_DATA_DELIVERY_LOWPRI) |
    			MSI_DATA_VECTOR(cfg->vector);
    	}
    
    	return err;
    
    #ifdef CONFIG_SMP
    static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
    
    	struct irq_cfg *cfg;
    
    	struct msi_msg msg;
    	unsigned int dest;
    	cpumask_t tmp;
    
    	struct irq_desc *desc;
    
    
    	cpus_and(tmp, mask, cpu_online_map);
    	if (cpus_empty(tmp))
    
    	if (assign_irq_vector(irq, mask))
    
    	cfg = irq_cfg(irq);
    	cpus_and(tmp, cfg->domain, mask);
    	dest = cpu_mask_to_apicid(tmp);
    
    
    	read_msi_msg(irq, &msg);
    
    	msg.data &= ~MSI_DATA_VECTOR_MASK;
    
    	msg.data |= MSI_DATA_VECTOR(cfg->vector);
    
    	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
    	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
    
    	write_msi_msg(irq, &msg);
    
    	desc = irq_to_desc(irq);
    	desc->affinity = mask;
    
    
    #ifdef CONFIG_INTR_REMAP
    /*
     * Migrate the MSI irq to another cpumask. This migration is
     * done in the process context using interrupt-remapping hardware.
     */
    static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
    {
    	struct irq_cfg *cfg;
    	unsigned int dest;
    	cpumask_t tmp, cleanup_mask;
    	struct irte irte;
    	struct irq_desc *desc;
    
    	cpus_and(tmp, mask, cpu_online_map);
    	if (cpus_empty(tmp))
    		return;
    
    	if (get_irte(irq, &irte))
    		return;
    
    	if (assign_irq_vector(irq, mask))
    		return;
    
    	cfg = irq_cfg(irq);
    	cpus_and(tmp, cfg->domain, mask);
    	dest = cpu_mask_to_apicid(tmp);
    
    	irte.vector = cfg->vector;
    	irte.dest_id = IRTE_DEST(dest);
    
    	/*
    	 * atomically update the IRTE with the new destination and vector.
    	 */
    	modify_irte(irq, &irte);
    
    	/*
    	 * After this point, all the interrupts will start arriving
    	 * at the new destination. So, time to cleanup the previous
    	 * vector allocation.
    	 */
    	if (cfg->move_in_progress) {
    		cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
    		cfg->move_cleanup_count = cpus_weight(cleanup_mask);
    		send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
    		cfg->move_in_progress = 0;
    	}
    
    	desc = irq_to_desc(irq);
    	desc->affinity = mask;
    }
    #endif
    
    /*
     * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
     * which implement the MSI or MSI-X Capability Structure.
     */
    static struct irq_chip msi_chip = {
    	.name		= "PCI-MSI",
    	.unmask		= unmask_msi_irq,
    	.mask		= mask_msi_irq,
    
    	.ack		= ack_apic_edge,
    
    #ifdef CONFIG_SMP
    	.set_affinity	= set_msi_irq_affinity,
    #endif
    	.retrigger	= ioapic_retrigger_irq,
    
    #ifdef CONFIG_INTR_REMAP
    static struct irq_chip msi_ir_chip = {
    	.name		= "IR-PCI-MSI",
    	.unmask		= unmask_msi_irq,
    	.mask		= mask_msi_irq,
    	.ack		= ack_x2apic_edge,
    #ifdef CONFIG_SMP
    	.set_affinity	= ir_set_msi_irq_affinity,
    #endif
    	.retrigger	= ioapic_retrigger_irq,
    };
    
    /*
     * Map the PCI dev to the corresponding remapping hardware unit
     * and allocate 'nvec' consecutive interrupt-remapping table entries
     * in it.
     */
    static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
    {
    	struct intel_iommu *iommu;
    	int index;
    
    	iommu = map_dev_to_ir(dev);
    	if (!iommu) {
    		printk(KERN_ERR
    		       "Unable to map PCI %s to iommu\n", pci_name(dev));
    		return -ENOENT;
    	}
    
    	index = alloc_irte(iommu, irq, nvec);
    	if (index < 0) {
    		printk(KERN_ERR
    		       "Unable to allocate %d IRTE for PCI %s\n", nvec,
    		        pci_name(dev));
    		return -ENOSPC;
    	}
    	return index;
    }
    #endif
    
    
    static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
    {
    	int ret;
    	struct msi_msg msg;
    
    	ret = msi_compose_msg(dev, irq, &msg);
    	if (ret < 0)
    		return ret;
    
    	set_irq_msi(irq, desc);
    	write_msi_msg(irq, &msg);
    
    
    #ifdef CONFIG_INTR_REMAP
    	if (irq_remapped(irq)) {
    		struct irq_desc *desc = irq_to_desc(irq);
    		/*
    		 * irq migration in process context
    		 */
    		desc->status |= IRQ_MOVE_PCNTXT;
    		set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
    	} else
    #endif
    		set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
    
    static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
    {
    	unsigned int irq;
    
    	irq = dev->bus->number;
    	irq <<= 8;
    	irq |= dev->devfn;
    	irq <<= 12;
    
    	return irq;
    }
    
    
    int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
    
    	unsigned int irq;
    	int ret;
    
    	unsigned int irq_want;
    
    	irq_want = build_irq_for_pci_dev(dev) + 0x100;
    
    	irq = create_irq_nr(irq_want);
    	if (irq == 0)
    		return -1;
    
    #ifdef CONFIG_INTR_REMAP
    	if (!intr_remapping_enabled)
    		goto no_ir;
    
    	ret = msi_alloc_irte(dev, irq, 1);
    	if (ret < 0)
    		goto error;
    no_ir:
    #endif
    
    	ret = setup_msi_irq(dev, desc, irq);
    
    	if (ret < 0) {
    		destroy_irq(irq);
    
    
    #ifdef CONFIG_INTR_REMAP
    error:
    	destroy_irq(irq);
    	return ret;
    #endif
    
    int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
    {
    
    	unsigned int irq;
    	int ret, sub_handle;
    	struct msi_desc *desc;
    	unsigned int irq_want;
    
    #ifdef CONFIG_INTR_REMAP
    	struct intel_iommu *iommu = 0;
    	int index = 0;
    #endif
    
    	irq_want = build_irq_for_pci_dev(dev) + 0x100;
    	sub_handle = 0;
    	list_for_each_entry(desc, &dev->msi_list, list) {
    		irq = create_irq_nr(irq_want--);
    		if (irq == 0)
    			return -1;
    #ifdef CONFIG_INTR_REMAP
    		if (!intr_remapping_enabled)
    			goto no_ir;
    
    		if (!sub_handle) {
    			/*
    			 * allocate the consecutive block of IRTE's
    			 * for 'nvec'
    			 */
    			index = msi_alloc_irte(dev, irq, nvec);
    			if (index < 0) {
    				ret = index;
    				goto error;
    			}
    		} else {
    			iommu = map_dev_to_ir(dev);
    			if (!iommu) {
    				ret = -ENOENT;
    				goto error;
    			}
    			/*
    			 * setup the mapping between the irq and the IRTE
    			 * base index, the sub_handle pointing to the
    			 * appropriate interrupt remap table entry.
    			 */
    			set_irte_irq(irq, iommu, index, sub_handle);
    		}
    no_ir:
    #endif
    		ret = setup_msi_irq(dev, desc, irq);
    		if (ret < 0)
    			goto error;
    		sub_handle++;
    	}
    	return 0;
    
    	destroy_irq(irq);
    	return ret;
    
    void arch_teardown_msi_irq(unsigned int irq)
    {
    
    	destroy_irq(irq);
    
    #ifdef CONFIG_DMAR
    #ifdef CONFIG_SMP
    static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
    {
    	struct irq_cfg *cfg;
    	struct msi_msg msg;
    	unsigned int dest;
    	cpumask_t tmp;
    	struct irq_desc *desc;
    
    	cpus_and(tmp, mask, cpu_online_map);
    	if (cpus_empty(tmp))
    		return;
    
    	if (assign_irq_vector(irq, mask))
    		return;
    
    	cfg = irq_cfg(irq);
    	cpus_and(tmp, cfg->domain, mask);
    	dest = cpu_mask_to_apicid(tmp);
    
    	dmar_msi_read(irq, &msg);
    
    	msg.data &= ~MSI_DATA_VECTOR_MASK;
    	msg.data |= MSI_DATA_VECTOR(cfg->vector);
    	msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
    	msg.address_lo |= MSI_ADDR_DEST_ID(dest);
    
    	dmar_msi_write(irq, &msg);
    	desc = irq_to_desc(irq);
    	desc->affinity = mask;
    }
    #endif /* CONFIG_SMP */
    
    struct irq_chip dmar_msi_type = {
    	.name = "DMAR_MSI",
    	.unmask = dmar_msi_unmask,
    	.mask = dmar_msi_mask,
    	.ack = ack_apic_edge,
    #ifdef CONFIG_SMP
    	.set_affinity = dmar_msi_set_affinity,
    #endif
    	.retrigger = ioapic_retrigger_irq,
    };
    
    int arch_setup_dmar_msi(unsigned int irq)
    {
    	int ret;
    	struct msi_msg msg;
    
    	ret = msi_compose_msg(NULL, irq, &msg);
    	if (ret < 0)
    		return ret;
    	dmar_msi_write(irq, &msg);
    	set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
    		"edge");
    	return 0;
    }
    #endif
    
    #endif /* CONFIG_PCI_MSI */
    
    /*
     * Hypertransport interrupt support
     */
    #ifdef CONFIG_HT_IRQ
    
    #ifdef CONFIG_SMP
    
    
    static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
    
    	struct ht_irq_msg msg;
    	fetch_ht_irq_msg(irq, &msg);
    
    	msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
    
    	msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
    
    	msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
    
    	msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
    
    }
    
    static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
    {
    
    	struct irq_cfg *cfg;
    
    	unsigned int dest;
    	cpumask_t tmp;
    
    	struct irq_desc *desc;
    
    
    	cpus_and(tmp, mask, cpu_online_map);
    	if (cpus_empty(tmp))
    
    	if (assign_irq_vector(irq, mask))
    		return;
    
    	cfg = irq_cfg(irq);
    	cpus_and(tmp, cfg->domain, mask);
    	dest = cpu_mask_to_apicid(tmp);
    
    	target_ht_irq(irq, dest, cfg->vector);
    
    	desc = irq_to_desc(irq);
    	desc->affinity = mask;
    
    static struct irq_chip ht_irq_chip = {
    
    	.name		= "PCI-HT",
    	.mask		= mask_ht_irq,
    	.unmask		= unmask_ht_irq,
    
    	.ack		= ack_apic_edge,
    
    #ifdef CONFIG_SMP
    	.set_affinity	= set_ht_irq_affinity,
    #endif
    	.retrigger	= ioapic_retrigger_irq,
    };
    
    int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
    {
    
    	struct irq_cfg *cfg;
    	int err;
    	cpumask_t tmp;
    
    	tmp = TARGET_CPUS;
    	err = assign_irq_vector(irq, tmp);
    
    		cfg = irq_cfg(irq);
    		cpus_and(tmp, cfg->domain, tmp);
    
    		msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
    
    			HT_IRQ_LOW_VECTOR(cfg->vector) |
    
    			((INT_DEST_MODE == 0) ?
    				HT_IRQ_LOW_DM_PHYSICAL :
    				HT_IRQ_LOW_DM_LOGICAL) |
    			HT_IRQ_LOW_RQEOI_EDGE |
    			((INT_DELIVERY_MODE != dest_LowestPrio) ?
    				HT_IRQ_LOW_MT_FIXED :
    				HT_IRQ_LOW_MT_ARBITRATED) |
    			HT_IRQ_LOW_IRQ_MASKED;
    
    
    		set_irq_chip_and_handler_name(irq, &ht_irq_chip,
    					      handle_edge_irq, "edge");
    
    	return err;
    
    int __init io_apic_get_redir_entries (int ioapic)
    {
    	union IO_APIC_reg_01	reg_01;
    	unsigned long flags;
    
    	spin_lock_irqsave(&ioapic_lock, flags);
    	reg_01.raw = io_apic_read(ioapic, 1);
    	spin_unlock_irqrestore(&ioapic_lock, flags);
    
    	return reg_01.bits.entries;
    }
    
    int __init probe_nr_irqs(void)
    {
    	int idx;
    	int nr = 0;
    
    #ifndef CONFIG_XEN
    	int nr_min = 32;
    #else
    	int nr_min = NR_IRQS;
    #endif
    
    
    	for (idx = 0; idx < nr_ioapics; idx++)
    
    		nr += io_apic_get_redir_entries(idx) + 1;
    
    
    	/* double it for hotplug and msi and nmi */
    	nr <<= 1;
    
    	/* something wrong ? */
    
    	if (nr < nr_min)
    		nr = nr_min;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    /* --------------------------------------------------------------------------
    
                              ACPI-based IOAPIC Configuration
    
    Linus Torvalds's avatar
    Linus Torvalds committed
       -------------------------------------------------------------------------- */
    
    
    #ifdef CONFIG_ACPI
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    #ifdef CONFIG_X86_32
    
    int __init io_apic_get_unique_id(int ioapic, int apic_id)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	union IO_APIC_reg_00 reg_00;
    	static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
    	physid_mask_t tmp;
    	unsigned long flags;
    	int i = 0;
    
    	/*
    
    	 * The P4 platform supports up to 256 APIC IDs on two separate APIC
    	 * buses (one for LAPICs, one for IOAPICs), where predecessors only
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 * supports up to 16 on one shared APIC bus.
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
    	 *      advantage of new APIC bus architecture.
    	 */
    
    	if (physids_empty(apic_id_map))
    		apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
    
    	spin_lock_irqsave(&ioapic_lock, flags);
    	reg_00.raw = io_apic_read(ioapic, 0);
    	spin_unlock_irqrestore(&ioapic_lock, flags);
    
    	if (apic_id >= get_physical_broadcast()) {
    		printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
    			"%d\n", ioapic, apic_id, reg_00.bits.ID);
    		apic_id = reg_00.bits.ID;
    	}
    
    	/*
    
    	 * Every APIC in a system must have a unique ID or we get lots of nice
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	 * 'stuck on smp_invalidate_needed IPI wait' messages.
    	 */
    	if (check_apicid_used(apic_id_map, apic_id)) {
    
    		for (i = 0; i < get_physical_broadcast(); i++) {
    			if (!check_apicid_used(apic_id_map, i))
    				break;
    		}
    
    		if (i == get_physical_broadcast())
    			panic("Max apic_id exceeded!\n");
    
    		printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
    			"trying %d\n", ioapic, apic_id, i);
    
    		apic_id = i;
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    	tmp = apicid_to_cpu_present(apic_id);
    	physids_or(apic_id_map, apic_id_map, tmp);
    
    	if (reg_00.bits.ID != apic_id) {
    		reg_00.bits.ID = apic_id;
    
    		spin_lock_irqsave(&ioapic_lock, flags);
    		io_apic_write(ioapic, 0, reg_00.raw);
    		reg_00.raw = io_apic_read(ioapic, 0);
    		spin_unlock_irqrestore(&ioapic_lock, flags);
    
    		/* Sanity check */
    
    		if (reg_00.bits.ID != apic_id) {
    			printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
    			return -1;
    		}
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    	}
    
    	apic_printk(APIC_VERBOSE, KERN_INFO
    			"IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
    
    	return apic_id;
    }
    
    
    int __init io_apic_get_version(int ioapic)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	union IO_APIC_reg_01	reg_01;
    	unsigned long flags;
    
    	spin_lock_irqsave(&ioapic_lock, flags);
    	reg_01.raw = io_apic_read(ioapic, 1);
    	spin_unlock_irqrestore(&ioapic_lock, flags);
    
    	return reg_01.bits.version;
    }
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    
    
    int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    {
    	if (!IO_APIC_IRQ(irq)) {
    
    		apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
    
    Linus Torvalds's avatar
    Linus Torvalds committed
    			ioapic);
    		return -EINVAL;
    	}
    
    	/*
    	 * IRQs < 16 are already in the irq_2_pin[] map
    	 */
    	if (irq >= 16)
    		add_pin_to_irq(irq, ioapic, pin);
    
    
    	setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
    
    int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
    {
    	int i;
    
    	if (skip_ioapic_setup)
    		return -1;
    
    	for (i = 0; i < mp_irq_entries; i++)
    
    		if (mp_irqs[i].mp_irqtype == mp_INT &&
    		    mp_irqs[i].mp_srcbusirq == bus_irq)
    
    			break;
    	if (i >= mp_irq_entries)
    		return -1;
    
    	*trigger = irq_trigger(i);
    	*polarity = irq_polarity(i);
    	return 0;
    }
    
    
    #endif /* CONFIG_ACPI */
    
    /*
     * This function currently is only a helper for the i386 smp boot process where
     * we need to reprogram the ioredtbls to cater for the cpus which have come online
     * so mask in all cases should simply be TARGET_CPUS
     */
    #ifdef CONFIG_SMP
    void __init setup_ioapic_dest(void)
    {
    	int pin, ioapic, irq, irq_entry;
    	struct irq_cfg *cfg;
    
    	if (skip_ioapic_setup == 1)
    		return;
    
    	for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
    		for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
    			irq_entry = find_irq_entry(ioapic, pin, mp_INT);
    			if (irq_entry == -1)
    				continue;
    			irq = pin_2_irq(irq_entry, ioapic, pin);
    
    			/* setup_IO_APIC_irqs could fail to get vector for some device
    			 * when you have too many devices, because at that time only boot
    			 * cpu is online.
    			 */
    			cfg = irq_cfg(irq);
    			if (!cfg->vector)
    				setup_IO_APIC_irq(ioapic, pin, irq,
    						  irq_trigger(irq_entry),
    						  irq_polarity(irq_entry));
    
    #ifdef CONFIG_INTR_REMAP
    			else if (intr_remapping_enabled)
    				set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
    #endif
    			else
    
    				set_ioapic_affinity_irq(irq, TARGET_CPUS);
    		}
    
    	}
    }
    #endif
    
    
    #define IOAPIC_RESOURCE_NAME_SIZE 11
    
    static struct resource *ioapic_resources;
    
    static struct resource * __init ioapic_setup_resources(void)
    {
    	unsigned long n;
    	struct resource *res;
    	char *mem;
    	int i;
    
    	if (nr_ioapics <= 0)
    		return NULL;
    
    	n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
    	n *= nr_ioapics;
    
    	mem = alloc_bootmem(n);
    	res = (void *)mem;
    
    	if (mem != NULL) {
    		mem += sizeof(struct resource) * nr_ioapics;
    
    		for (i = 0; i < nr_ioapics; i++) {
    			res[i].name = mem;
    			res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
    			sprintf(mem,  "IOAPIC %u", i);
    			mem += IOAPIC_RESOURCE_NAME_SIZE;
    		}
    	}
    
    	ioapic_resources = res;
    
    	return res;
    }
    
    
    void __init ioapic_init_mappings(void)
    {
    	unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
    	int i;
    
    	struct resource *ioapic_res;
    
    	ioapic_res = ioapic_setup_resources();
    
    	for (i = 0; i < nr_ioapics; i++) {
    		if (smp_found_config) {
    			ioapic_phys = mp_ioapics[i].mp_apicaddr;
    
    #ifdef CONFIG_X86_32
                            if (!ioapic_phys) {
                                    printk(KERN_ERR
                                           "WARNING: bogus zero IO-APIC "
                                           "address found in MPTABLE, "
                                           "disabling IO/APIC support!\n");
                                    smp_found_config = 0;
                                    skip_ioapic_setup = 1;
                                    goto fake_ioapic_page;
                            }
    #endif
    
    #ifdef CONFIG_X86_32
    
    			ioapic_phys = (unsigned long)
    
    				alloc_bootmem_pages(PAGE_SIZE);
    
    			ioapic_phys = __pa(ioapic_phys);
    		}
    		set_fixmap_nocache(idx, ioapic_phys);
    
    		apic_printk(APIC_VERBOSE,
    			    "mapped IOAPIC to %08lx (%08lx)\n",
    			    __fix_to_virt(idx), ioapic_phys);
    
    
    		if (ioapic_res != NULL) {
    			ioapic_res->start = ioapic_phys;
    			ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
    			ioapic_res++;
    		}
    
    static int __init ioapic_insert_resources(void)
    {
    	int i;
    	struct resource *r = ioapic_resources;
    
    	if (!r) {
    		printk(KERN_ERR
    		       "IO APIC resources could be not be allocated.\n");
    		return -1;
    	}
    
    	for (i = 0; i < nr_ioapics; i++) {
    		insert_resource(&iomem_resource, r);
    		r++;
    	}
    
    	return 0;
    }
    
    /* Insert the IO APIC resources after PCI initialization has occured to handle
     * IO APICS that are mapped in on a BAR in PCI space. */
    late_initcall(ioapic_insert_resources);