Newer
Older
ioapics[ioapic_idx].mp_config.apicid = reg_00.bits.ID;
}
/*
* Sanity check, is the ID really free? Every APIC in a
* system must have a unique ID or we get lots of nice
* 'stuck on smp_invalidate_needed IPI wait' messages.
*/
if (apic->check_apicid_used(&phys_id_present_map,
printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
ioapic_idx, mpc_ioapic_id(ioapic_idx));
for (i = 0; i < get_physical_broadcast(); i++)
if (!physid_isset(i, phys_id_present_map))
break;
if (i >= get_physical_broadcast())
panic("Max APIC ID exceeded!\n");
printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
i);
physid_set(i, phys_id_present_map);
ioapics[ioapic_idx].mp_config.apicid = i;
apic->apicid_to_cpu_present(mpc_ioapic_id(ioapic_idx),
apic_printk(APIC_VERBOSE, "Setting %d in the "
"phys_id_present_map\n",
physids_or(phys_id_present_map, phys_id_present_map, tmp);
}
/*
* We need to adjust the IRQ routing table
* if the ID changed.
*/
if (old_id != mpc_ioapic_id(ioapic_idx))
if (mp_irqs[i].dstapic == old_id)
mp_irqs[i].dstapic
* Update the ID register according to the right value
* from the MPC table if they are different.
if (mpc_ioapic_id(ioapic_idx) == reg_00.bits.ID)
apic_printk(APIC_VERBOSE, KERN_INFO
"...changing IO-APIC physical APIC ID to %d ...",
reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
raw_spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(ioapic_idx, 0, reg_00.raw);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(ioapic_idx, 0);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx))
printk("could not set ID!\n");
else
apic_printk(APIC_VERBOSE, " ok.\n");
}
}
void __init setup_ioapic_ids_from_mpc(void)
{
if (acpi_ioapic)
return;
/*
* Don't check I/O APIC IDs for xAPIC systems. They have
* no meaning without the serial APIC bus.
*/
if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|| APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
return;
setup_ioapic_ids_from_mpc_nocheck();
}
int no_timer_check __initdata;
static int __init notimercheck(char *s)
{
no_timer_check = 1;
return 1;
}
__setup("no_timer_check", notimercheck);
/*
* There is a nasty bug in some older SMP boards, their mptable lies
* about the timer IRQ. We do the following to work around the situation:
*
* - timer IRQ defaults to IO-APIC IRQ
* - if this function detects that timer IRQs are defunct, then we fall
* back to ISA timer IRQs
*/
static int __init timer_irq_works(void)
unsigned long flags;
if (no_timer_check)
return 1;
local_save_flags(flags);
local_irq_enable();
/* Let ten ticks pass... */
mdelay((10 * 1000) / HZ);
local_irq_restore(flags);
/*
* Expect a few ticks at least, to be sure some possible
* glue logic does not lock up after one or two first
* ticks in a non-ExtINT mode. Also the local APIC
* might have cached one ExtINT interrupt. Finally, at
* least one tick may be lost due to delays.
*/
if (time_after(jiffies, t1 + 4))
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
return 1;
return 0;
}
/*
* In the SMP+IOAPIC case it might happen that there are an unspecified
* number of pending IRQ events unhandled. These cases are very rare,
* so we 'resend' these IRQs via IPIs, to the same CPU. It's much
* better to do it this way as thus we do not have to be aware of
* 'pending' interrupts in the IRQ path, except at this point.
*/
/*
* Edge triggered needs to resend any interrupt
* that was delayed but this is now handled in the device
* independent code.
*/
/*
* Starting up a edge-triggered IO-APIC interrupt is
* nasty - we need to make sure that we get the edge.
* If it is already asserted for some reason, we need
* return 1 to indicate that is was pending.
*
* This is not complete - we should be able to fake
* an edge even if it isn't on the 8259A...
*/
static unsigned int startup_ioapic_irq(struct irq_data *data)
int was_pending = 0, irq = data->irq;
raw_spin_lock_irqsave(&ioapic_lock, flags);
if (irq < legacy_pic->nr_legacy_irqs) {
if (legacy_pic->irq_pending(irq))
__unmask_ioapic(data->chip_data);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
static int ioapic_retrigger_irq(struct irq_data *data)
struct irq_cfg *cfg = data->chip_data;
unsigned long flags;
raw_spin_lock_irqsave(&vector_lock, flags);
apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
raw_spin_unlock_irqrestore(&vector_lock, flags);
return 1;
}
/*
* Level and edge triggered IO-APIC interrupts need different handling,
* so we use two separate IRQ descriptors. Edge triggered IRQs can be
* handled with the level-triggered descriptor, but that one has slightly
* more overhead. Level-triggered interrupts cannot be handled with the
* edge-triggered handler, without risking IRQ storms and other ugly
* races.
*/
void send_cleanup_vector(struct irq_cfg *cfg)
{
cpumask_var_t cleanup_mask;
if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
unsigned int i;
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
} else {
cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
free_cpumask_var(cleanup_mask);
}
cfg->move_in_progress = 0;
}
static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
{
int apic, pin;
struct irq_pin_list *entry;
u8 vector = cfg->vector;
for_each_irq_pin(entry, cfg->irq_2_pin) {
unsigned int reg;
apic = entry->apic;
pin = entry->pin;
/*
* With interrupt-remapping, destination information comes
* from interrupt-remapping table entry.
*/
if (!irq_remapped(cfg))
io_apic_write(apic, 0x11 + pin*2, dest);
reg = io_apic_read(apic, 0x10 + pin*2);
reg &= ~IO_APIC_REDIR_VECTOR_MASK;
reg |= vector;
io_apic_modify(apic, 0x10 + pin*2, reg);
}
}
/*
* Either sets data->affinity to a valid value, and returns
* ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
* leaves data->affinity untouched.
int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
unsigned int *dest_id)
struct irq_cfg *cfg = data->chip_data;
if (!cpumask_intersects(mask, cpu_online_mask))
return -1;
if (assign_irq_vector(data->irq, data->chip_data, mask))
return -1;
cpumask_copy(data->affinity, mask);
*dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain);
return 0;
ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
unsigned int dest, irq = data->irq;
int ret;
raw_spin_lock_irqsave(&ioapic_lock, flags);
ret = __ioapic_set_affinity(data, mask, &dest);
if (!ret) {
/* Only the high 8 bits are valid. */
dest = SET_APIC_LOGICAL_ID(dest);
__target_IO_APIC_irq(irq, dest, data->chip_data);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
asmlinkage void smp_irq_move_cleanup_interrupt(void)
{
unsigned vector, me;
ack_APIC_irq();
irq_enter();
me = smp_processor_id();
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
unsigned int irq;
unsigned int irr;
struct irq_desc *desc;
struct irq_cfg *cfg;
irq = __this_cpu_read(vector_irq[vector]);
if (irq == -1)
continue;
desc = irq_to_desc(irq);
if (!desc)
continue;
cfg = irq_cfg(irq);
raw_spin_lock(&desc->lock);
/*
* Check if the irq migration is in progress. If so, we
* haven't received the cleanup request yet for this irq.
*/
if (cfg->move_in_progress)
goto unlock;
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
/*
* Check if the vector that needs to be cleanedup is
* registered at the cpu's IRR. If so, then this is not
* the best time to clean it up. Lets clean it up in the
* next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
* to myself.
*/
if (irr & (1 << (vector % 32))) {
apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
goto unlock;
}
__this_cpu_write(vector_irq[vector], -1);
raw_spin_unlock(&desc->lock);
}
irq_exit();
}
static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
if (likely(!cfg->move_in_progress))
return;
me = smp_processor_id();
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
static void irq_complete_move(struct irq_cfg *cfg)
__irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
}
void irq_force_complete_move(int irq)
{
struct irq_cfg *cfg = irq_get_chip_data(irq);
if (!cfg)
return;
static inline void irq_complete_move(struct irq_cfg *cfg) { }
static void ack_apic_edge(struct irq_data *data)
irq_complete_move(data->chip_data);
#ifdef CONFIG_GENERIC_PENDING_IRQ

Márton Németh
committed
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
{
struct irq_pin_list *entry;
unsigned long flags;
raw_spin_lock_irqsave(&ioapic_lock, flags);
for_each_irq_pin(entry, cfg->irq_2_pin) {
unsigned int reg;
int pin;
pin = entry->pin;
reg = io_apic_read(entry->apic, 0x10 + pin*2);
/* Is the remote IRR bit set? */
if (reg & IO_APIC_REDIR_REMOTE_IRR) {
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return true;
}
}
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return false;
}

Alexander Gordeev
committed
static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
{
/* If we are moving the irq we need to mask it */
if (unlikely(irqd_is_setaffinity_pending(data))) {

Alexander Gordeev
committed
return true;

Alexander Gordeev
committed
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
return false;
}
static inline void ioapic_irqd_unmask(struct irq_data *data,
struct irq_cfg *cfg, bool masked)
{
if (unlikely(masked)) {
/* Only migrate the irq if the ack has been received.
*
* On rare occasions the broadcast level triggered ack gets
* delayed going to ioapics, and if we reprogram the
* vector while Remote IRR is still set the irq will never
* fire again.
*
* To prevent this scenario we read the Remote IRR bit
* of the ioapic. This has two effects.
* - On any sane system the read of the ioapic will
* flush writes (and acks) going to the ioapic from
* this cpu.
* - We get to see if the ACK has actually been delivered.
*
* Based on failed experiments of reprogramming the
* ioapic entry from outside of irq context starting
* with masking the ioapic entry and then polling until
* Remote IRR was clear before reprogramming the
* ioapic I don't trust the Remote IRR bit to be
* completey accurate.
*
* However there appears to be no other way to plug
* this race, so if the Remote IRR bit is not
* accurate and is causing problems then it is a hardware bug
* and you can go talk to the chipset vendor about it.
*/
if (!io_apic_level_ack_pending(cfg))
irq_move_masked_irq(data);
unmask_ioapic(cfg);
}
}
#else
static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
{
return false;
}
static inline void ioapic_irqd_unmask(struct irq_data *data,
struct irq_cfg *cfg, bool masked)
{
}

Alexander Gordeev
committed
static void ack_apic_level(struct irq_data *data)
{
struct irq_cfg *cfg = data->chip_data;
int i, irq = data->irq;
unsigned long v;
bool masked;
irq_complete_move(cfg);
masked = ioapic_irqd_mask(data, cfg);
* It appears there is an erratum which affects at least version 0x11
* of I/O APIC (that's the 82093AA and cores integrated into various
* chipsets). Under certain conditions a level-triggered interrupt is
* erroneously delivered as edge-triggered one but the respective IRR
* bit gets set nevertheless. As a result the I/O unit expects an EOI
* message but it will never arrive and further interrupts are blocked
* from the source. The exact reason is so far unknown, but the
* phenomenon was observed when two consecutive interrupt requests
* from a given source get delivered to the same CPU and the source is
* temporarily disabled in between.
*
* A workaround is to simulate an EOI message manually. We achieve it
* by setting the trigger mode to edge and then to level when the edge
* trigger mode gets detected in the TMR of a local APIC for a
* level-triggered interrupt. We mask the source for the time of the
* operation to prevent an edge-triggered interrupt escaping meanwhile.
* The idea is from Manfred Spraul. --macro
*
* Also in the case when cpu goes offline, fixup_irqs() will forward
* any unhandled interrupt on the offlined cpu to the new cpu
* destination that is handling the corresponding interrupt. This
* interrupt forwarding is done via IPI's. Hence, in this case also
* level-triggered io-apic interrupt will be seen as an edge
* interrupt in the IRR. And we can't rely on the cpu's EOI
* to be broadcasted to the IO-APIC's which will clear the remoteIRR
* corresponding to the level-triggered interrupt. Hence on IO-APIC's
* supporting EOI register, we do an explicit EOI to clear the
* remote IRR and on IO-APIC's which don't have an EOI register,
* we use the above logic (mask+edge followed by unmask+level) from
* Manfred Spraul to clear the remote IRR.
v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
/*
* We must acknowledge the irq before we move it or the acknowledge will
* not propagate properly.
*/
ack_APIC_irq();
/*
* Tail end of clearing remote IRR bit (either by delivering the EOI
* message via io-apic EOI register write or simulating it using
* mask+edge followed by unnask+level logic) manually when the
* level triggered interrupt is seen as the edge triggered interrupt
* at the cpu.
*/

Maciej W. Rozycki
committed
if (!(v & (1 << (i & 0x1f)))) {
atomic_inc(&irq_mis_count);

Maciej W. Rozycki
committed
}

Alexander Gordeev
committed
ioapic_irqd_unmask(data, cfg, masked);
#ifdef CONFIG_IRQ_REMAP
static void ir_ack_apic_edge(struct irq_data *data)
static void ir_ack_apic_level(struct irq_data *data)
eoi_ioapic_irq(data->irq, data->chip_data);
static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
{
seq_printf(p, " IR-%s", data->chip->name);
}
static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
{
chip->irq_print_chip = ir_print_prefix;
chip->irq_ack = ir_ack_apic_edge;
chip->irq_eoi = ir_ack_apic_level;
#ifdef CONFIG_SMP
chip->irq_set_affinity = set_remapped_irq_affinity;
#endif /* CONFIG_IRQ_REMAP */
static struct irq_chip ioapic_chip __read_mostly = {
.name = "IO-APIC",
.irq_startup = startup_ioapic_irq,
.irq_mask = mask_ioapic_irq,
.irq_unmask = unmask_ioapic_irq,
.irq_ack = ack_apic_edge,
.irq_eoi = ack_apic_level,
#ifdef CONFIG_SMP
.irq_set_affinity = ioapic_set_affinity,
#endif
.irq_retrigger = ioapic_retrigger_irq,
};
static inline void init_IO_APIC_traps(void)
{
/*
* NOTE! The local APIC isn't very good at handling
* multiple interrupts at the same interrupt level.
* As the interrupt level is determined by taking the
* vector number and shifting that right by 4, we
* want to spread these out a bit so that they don't
* all fall in the same interrupt level.
*
* Also, we've got to be careful not to trash gate
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
/*
* Hmm.. We don't have an entry for this,
* so default to an old-fashioned 8259
* interrupt if we can..
*/
if (irq < legacy_pic->nr_legacy_irqs)
legacy_pic->make_irq(irq);
/*
* The local APIC irq-chip implementation:
*/
static void mask_lapic_irq(struct irq_data *data)
{
unsigned long v;
v = apic_read(APIC_LVT0);
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
static void unmask_lapic_irq(struct irq_data *data)
unsigned long v;
v = apic_read(APIC_LVT0);
apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
static void ack_lapic_irq(struct irq_data *data)
{
ack_APIC_irq();
}
static struct irq_chip lapic_chip __read_mostly = {
.name = "local-APIC",
.irq_mask = mask_lapic_irq,
.irq_unmask = unmask_lapic_irq,
.irq_ack = ack_lapic_irq,
static void lapic_register_intr(int irq)
irq_clear_status_flags(irq, IRQ_LEVEL);
irq_set_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
/*
* This looks a bit hackish but it's about the only one way of sending
* a few INTA cycles to 8259As and any associated glue logic. ICR does
* not support the ExtINT mode, unfortunately. We need to send these
* cycles as some i82489DX-based boards have glue logic that keeps the
* 8259A interrupt line asserted until INTA. --macro
*/
static inline void __init unlock_ExtINT_logic(void)
int apic, pin, i;
struct IO_APIC_route_entry entry0, entry1;
unsigned char save_control, save_freq_select;
pin = find_isa_irq_pin(8, mp_INT);
if (pin == -1) {
WARN_ON_ONCE(1);
return;
}
apic = find_isa_irq_apic(8, mp_INT);
if (apic == -1) {
WARN_ON_ONCE(1);
entry0 = ioapic_read_entry(apic, pin);
clear_IO_APIC_pin(apic, pin);
memset(&entry1, 0, sizeof(entry1));
entry1.dest_mode = 0; /* physical delivery */
entry1.mask = 0; /* unmask IRQ now */
entry1.dest = hard_smp_processor_id();
entry1.delivery_mode = dest_ExtINT;
entry1.polarity = entry0.polarity;
entry1.trigger = 0;
entry1.vector = 0;
ioapic_write_entry(apic, pin, entry1);
save_control = CMOS_READ(RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
RTC_FREQ_SELECT);
CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
i = 100;
while (i-- > 0) {
mdelay(10);
if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
i -= 10;
}
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
clear_IO_APIC_pin(apic, pin);
ioapic_write_entry(apic, pin, entry0);
static int disable_timer_pin_1 __initdata;
/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
static int __init disable_timer_pin_setup(char *arg)
{
disable_timer_pin_1 = 1;
return 0;
}
early_param("disable_timer_pin_1", disable_timer_pin_setup);
int timer_through_8259 __initdata;
/*
* This code may look a bit paranoid, but it's supposed to cooperate with
* a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
* is so screwy. Thanks to Brian Perkins for testing/hacking this beast
* fanatically on his truly buggy board.
*
* FIXME: really need to revamp this for all platforms.
static inline void __init check_timer(void)
struct irq_cfg *cfg = irq_get_chip_data(0);
int node = cpu_to_node(0);
int apic1, pin1, apic2, pin2;
unsigned long flags;
local_irq_save(flags);
assign_irq_vector(0, cfg, apic->target_cpus());
* As IRQ0 is to be enabled in the 8259A, the virtual
* wire has to be disabled in the local APIC. Also
* timer interrupts need to be acknowledged manually in
* the 8259A for the i82489DX when using the NMI
* watchdog as that APIC treats NMIs as level-triggered.
* The AEOI mode will finish them in the 8259A
* automatically.
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
pin1 = find_isa_irq_pin(0, mp_INT);
apic1 = find_isa_irq_apic(0, mp_INT);
pin2 = ioapic_i8259.pin;
apic2 = ioapic_i8259.apic;
apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
"apic1=%d pin1=%d apic2=%d pin2=%d\n",
cfg->vector, apic1, pin1, apic2, pin2);
/*
* Some BIOS writers are clueless and report the ExtINTA
* I/O APIC input from the cascaded 8259A as the timer
* interrupt input. So just in case, if only one pin
* was found above, try it both directly and through the
* 8259A.
*/
if (pin1 == -1) {
if (irq_remapping_enabled)
panic("BIOS bug: timer not connected to IO-APIC");
pin1 = pin2;
apic1 = apic2;
no_pin1 = 1;
} else if (pin2 == -1) {
pin2 = pin1;
apic2 = apic1;
}
if (pin1 != -1) {
/*
* Ok, does IRQ0 through the IOAPIC work?
*/
add_pin_to_irq_node(cfg, node, apic1, pin1);
setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
/* for edge trigger, setup_ioapic_irq already
* leave it unmasked.
* so only need to unmask if it is level-trigger
* do we really have level trigger timer?
*/
int idx;
idx = find_irq_entry(apic1, pin1, mp_INT);
if (idx != -1 && irq_trigger(idx))

Chuck Ebbert
committed
if (disable_timer_pin_1 > 0)
clear_IO_APIC_pin(0, pin1);
goto out;
if (irq_remapping_enabled)
panic("timer doesn't work through Interrupt-remapped IO-APIC");
clear_IO_APIC_pin(apic1, pin1);
apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
"8254 timer not connected to IO-APIC\n");
apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
"(IRQ0) through the 8259A ...\n");
apic_printk(APIC_QUIET, KERN_INFO
"..... (found apic %d pin %d) ...\n", apic2, pin2);
/*
* legacy devices should be connected to IO APIC #0
*/
replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
timer_through_8259 = 1;
goto out;
clear_IO_APIC_pin(apic2, pin2);
apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
apic_printk(APIC_QUIET, KERN_INFO
"...trying to set up timer as Virtual Wire IRQ...\n");
lapic_register_intr(0);
apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
goto out;
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
apic_printk(APIC_QUIET, KERN_INFO
"...trying to set up timer as ExtINT IRQ...\n");
legacy_pic->init(0);
legacy_pic->make_irq(0);
apic_write(APIC_LVT0, APIC_DM_EXTINT);
unlock_ExtINT_logic();
if (timer_irq_works()) {
apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
goto out;
apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
if (x2apic_preenabled)
apic_printk(APIC_QUIET, KERN_INFO
"Perhaps problem with the pre-enabled x2apic mode\n"
"Try booting with x2apic and interrupt-remapping disabled in the bios.\n");
panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
"report. Then try booting with the 'noapic' option.\n");
out:
local_irq_restore(flags);
* Traditionally ISA IRQ2 is the cascade IRQ, and is not available
* to devices. However there may be an I/O APIC pin available for
* this interrupt regardless. The pin may be left unconnected, but
* typically it will be reused as an ExtINT cascade interrupt for
* the master 8259A. In the MPS case such a pin will normally be
* reported as an ExtINT interrupt in the MP table. With ACPI
* there is no provision for ExtINT interrupts, and in the absence
* of an override it would be treated as an ordinary ISA I/O APIC
* interrupt, that is edge-triggered and unmasked by default. We
* used to do this, but it caused problems on some systems because
* of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
* the same ExtINT cascade interrupt to drive the local APIC of the
* bootstrap processor. Therefore we refrain from routing IRQ2 to
* the I/O APIC in all cases now. No actual device should request
* it anyway. --macro
#define PIC_IRQS (1UL << PIC_CASCADE_IR)
/*
* calling enable_IO_APIC() is moved to setup_local_APIC for BP
*/
io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
* Set up IO-APIC IRQ routing.
*/
x86_init.mpparse.setup_ioapic_ids();
sync_Arb_IDs();
setup_IO_APIC_irqs();
init_IO_APIC_traps();
if (legacy_pic->nr_legacy_irqs)
* Called after all the initialization is done. If we didn't find any
* APIC bugs then we can allow the modify fast path
if (sis_apic_bug == -1)
sis_apic_bug = 0;
return 0;
}
late_initcall(io_apic_bug_finalize);
static void resume_ioapic_id(int ioapic_idx)
{
unsigned long flags;
union IO_APIC_reg_00 reg_00;
raw_spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(ioapic_idx, 0);
if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx)) {
reg_00.bits.ID = mpc_ioapic_id(ioapic_idx);
io_apic_write(ioapic_idx, 0, reg_00.raw);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
static void ioapic_resume(void)
{
for (ioapic_idx = nr_ioapics - 1; ioapic_idx >= 0; ioapic_idx--)
resume_ioapic_id(ioapic_idx);
restore_ioapic_entries();
static struct syscore_ops ioapic_syscore_ops = {
.suspend = save_ioapic_entries,
static int __init ioapic_init_ops(void)
register_syscore_ops(&ioapic_syscore_ops);
device_initcall(ioapic_init_ops);
* Dynamic irq allocate and deallocation
unsigned int create_irq_nr(unsigned int from, int node)
unsigned int ret = 0;
int irq;
if (from < nr_irqs_gsi)
from = nr_irqs_gsi;
irq = alloc_irq_from(from, node);
if (irq < 0)
return 0;
cfg = alloc_irq_cfg(irq, node);
if (!cfg) {
free_irq_at(irq, NULL);
return 0;
}
raw_spin_lock_irqsave(&vector_lock, flags);
if (!__assign_irq_vector(irq, cfg, apic->target_cpus()))
ret = irq;
raw_spin_unlock_irqrestore(&vector_lock, flags);
irq_clear_status_flags(irq, IRQ_NOREQUEST);
} else {
free_irq_at(irq, cfg);
}
return ret;
int node = cpu_to_node(0);
irq = create_irq_nr(irq_want, node);
if (irq == 0)
irq = -1;