Skip to content
Snippets Groups Projects
Commit e360adbe authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

irq_work: Add generic hardirq context callbacks


Provide a mechanism that allows running code in IRQ context. It is
most useful for NMI code that needs to interact with the rest of the
system -- like wakeup a task to drain buffers.

Perf currently has such a mechanism, so extract that and provide it as
a generic feature, independent of perf so that others may also
benefit.

The IRQ context callback is generated through self-IPIs where
possible, or on architectures like powerpc the decrementer (the
built-in timer facility) is set to generate an interrupt immediately.

Architectures that don't have anything like this get to do with a
callback from the timer tick. These architectures can call
irq_work_run() at the tail of any IRQ handlers that might enqueue such
work (like the perf IRQ handler) to avoid undue latencies in
processing the work.

Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarKyle McMartin <kyle@mcmartin.ca>
Acked-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
[ various fixes ]
Signed-off-by: default avatarHuang Ying <ying.huang@intel.com>
LKML-Reference: <1287036094.7768.291.camel@yhuang-dev>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 8e5fc1a7
No related merge requests found
Showing
with 53 additions and 93 deletions
...@@ -9,6 +9,7 @@ config ALPHA ...@@ -9,6 +9,7 @@ config ALPHA
select HAVE_IDE select HAVE_IDE
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS select HAVE_SYSCALL_WRAPPERS
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_DMA_ATTRS select HAVE_DMA_ATTRS
help help
......
#ifndef __ASM_ALPHA_PERF_EVENT_H #ifndef __ASM_ALPHA_PERF_EVENT_H
#define __ASM_ALPHA_PERF_EVENT_H #define __ASM_ALPHA_PERF_EVENT_H
/* Alpha only supports software events through this interface. */
extern void set_perf_event_pending(void);
#define PERF_EVENT_INDEX_OFFSET 0
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
extern void init_hw_perf_events(void); extern void init_hw_perf_events(void);
#else #else
......
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/bcd.h> #include <linux/bcd.h>
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/perf_event.h> #include <linux/irq_work.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -83,25 +83,25 @@ static struct { ...@@ -83,25 +83,25 @@ static struct {
unsigned long est_cycle_freq; unsigned long est_cycle_freq;
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_IRQ_WORK
DEFINE_PER_CPU(u8, perf_event_pending); DEFINE_PER_CPU(u8, irq_work_pending);
#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
#define test_perf_event_pending() __get_cpu_var(perf_event_pending) #define test_irq_work_pending() __get_cpu_var(irq_work_pending)
#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
void set_perf_event_pending(void) void set_irq_work_pending(void)
{ {
set_perf_event_pending_flag(); set_irq_work_pending_flag();
} }
#else /* CONFIG_PERF_EVENTS */ #else /* CONFIG_IRQ_WORK */
#define test_perf_event_pending() 0 #define test_irq_work_pending() 0
#define clear_perf_event_pending() #define clear_irq_work_pending()
#endif /* CONFIG_PERF_EVENTS */ #endif /* CONFIG_IRQ_WORK */
static inline __u32 rpcc(void) static inline __u32 rpcc(void)
...@@ -191,9 +191,9 @@ irqreturn_t timer_interrupt(int irq, void *dev) ...@@ -191,9 +191,9 @@ irqreturn_t timer_interrupt(int irq, void *dev)
write_sequnlock(&xtime_lock); write_sequnlock(&xtime_lock);
if (test_perf_event_pending()) { if (test_irq_work_pending()) {
clear_perf_event_pending(); clear_irq_work_pending();
perf_event_do_pending(); irq_work_run();
} }
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
......
...@@ -23,6 +23,7 @@ config ARM ...@@ -23,6 +23,7 @@ config ARM
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZMA
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC select PERF_USE_VMALLOC
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
......
...@@ -12,18 +12,6 @@ ...@@ -12,18 +12,6 @@
#ifndef __ARM_PERF_EVENT_H__ #ifndef __ARM_PERF_EVENT_H__
#define __ARM_PERF_EVENT_H__ #define __ARM_PERF_EVENT_H__
/*
* NOP: on *most* (read: all supported) ARM platforms, the performance
* counter interrupts are regular interrupts and not an NMI. This
* means that when we receive the interrupt we can call
* perf_event_do_pending() that handles all of the work with
* interrupts disabled.
*/
static inline void
set_perf_event_pending(void)
{
}
/* ARM performance counters start from 1 (in the cp15 accesses) so use the /* ARM performance counters start from 1 (in the cp15 accesses) so use the
* same indexes here for consistency. */ * same indexes here for consistency. */
#define PERF_EVENT_INDEX_OFFSET 1 #define PERF_EVENT_INDEX_OFFSET 1
......
...@@ -1092,7 +1092,7 @@ armv6pmu_handle_irq(int irq_num, ...@@ -1092,7 +1092,7 @@ armv6pmu_handle_irq(int irq_num,
* platforms that can have the PMU interrupts raised as an NMI, this * platforms that can have the PMU interrupts raised as an NMI, this
* will not work. * will not work.
*/ */
perf_event_do_pending(); irq_work_run();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -2068,7 +2068,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) ...@@ -2068,7 +2068,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
* platforms that can have the PMU interrupts raised as an NMI, this * platforms that can have the PMU interrupts raised as an NMI, this
* will not work. * will not work.
*/ */
perf_event_do_pending(); irq_work_run();
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -2436,7 +2436,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) ...@@ -2436,7 +2436,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
armpmu->disable(hwc, idx); armpmu->disable(hwc, idx);
} }
perf_event_do_pending(); irq_work_run();
/* /*
* Re-enable the PMU. * Re-enable the PMU.
...@@ -2763,7 +2763,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) ...@@ -2763,7 +2763,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
armpmu->disable(hwc, idx); armpmu->disable(hwc, idx);
} }
perf_event_do_pending(); irq_work_run();
/* /*
* Re-enable the PMU. * Re-enable the PMU.
......
...@@ -7,6 +7,7 @@ config FRV ...@@ -7,6 +7,7 @@ config FRV
default y default y
select HAVE_IDE select HAVE_IDE
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
config ZONE_DMA config ZONE_DMA
......
...@@ -5,4 +5,4 @@ ...@@ -5,4 +5,4 @@
lib-y := \ lib-y := \
__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
/* Performance event handling
*
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/perf_event.h>
/*
* mark the performance event as pending
*/
void set_perf_event_pending(void)
{
}
...@@ -16,6 +16,7 @@ config PARISC ...@@ -16,6 +16,7 @@ config PARISC
select RTC_DRV_GENERIC select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE select INIT_ALL_POSSIBLE
select BUG select BUG
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT select GENERIC_ATOMIC64 if !64BIT
help help
......
#ifndef __ASM_PARISC_PERF_EVENT_H #ifndef __ASM_PARISC_PERF_EVENT_H
#define __ASM_PARISC_PERF_EVENT_H #define __ASM_PARISC_PERF_EVENT_H
/* parisc only supports software events through this interface. */ /* Empty, just to avoid compiling error */
static inline void set_perf_event_pending(void) { }
#endif /* __ASM_PARISC_PERF_EVENT_H */ #endif /* __ASM_PARISC_PERF_EVENT_H */
...@@ -138,6 +138,7 @@ config PPC ...@@ -138,6 +138,7 @@ config PPC
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS if PPC64 select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32 select GENERIC_ATOMIC64 if PPC32
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
......
...@@ -129,7 +129,7 @@ struct paca_struct { ...@@ -129,7 +129,7 @@ struct paca_struct {
u8 soft_enabled; /* irq soft-enable flag */ u8 soft_enabled; /* irq soft-enable flag */
u8 hard_enabled; /* set if irqs are enabled in MSR */ u8 hard_enabled; /* set if irqs are enabled in MSR */
u8 io_sync; /* writel() needs spin_unlock sync */ u8 io_sync; /* writel() needs spin_unlock sync */
u8 perf_event_pending; /* PM interrupt while soft-disabled */ u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
/* Stuff for accurate time accounting */ /* Stuff for accurate time accounting */
u64 user_time; /* accumulated usermode TB ticks */ u64 user_time; /* accumulated usermode TB ticks */
......
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
#include <linux/posix-timers.h> #include <linux/posix-timers.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/perf_event.h> #include <linux/irq_work.h>
#include <asm/trace.h> #include <asm/trace.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -493,60 +493,60 @@ void __init iSeries_time_init_early(void) ...@@ -493,60 +493,60 @@ void __init iSeries_time_init_early(void)
} }
#endif /* CONFIG_PPC_ISERIES */ #endif /* CONFIG_PPC_ISERIES */
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_IRQ_WORK
/* /*
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
*/ */
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
static inline unsigned long test_perf_event_pending(void) static inline unsigned long test_irq_work_pending(void)
{ {
unsigned long x; unsigned long x;
asm volatile("lbz %0,%1(13)" asm volatile("lbz %0,%1(13)"
: "=r" (x) : "=r" (x)
: "i" (offsetof(struct paca_struct, perf_event_pending))); : "i" (offsetof(struct paca_struct, irq_work_pending)));
return x; return x;
} }
static inline void set_perf_event_pending_flag(void) static inline void set_irq_work_pending_flag(void)
{ {
asm volatile("stb %0,%1(13)" : : asm volatile("stb %0,%1(13)" : :
"r" (1), "r" (1),
"i" (offsetof(struct paca_struct, perf_event_pending))); "i" (offsetof(struct paca_struct, irq_work_pending)));
} }
static inline void clear_perf_event_pending(void) static inline void clear_irq_work_pending(void)
{ {
asm volatile("stb %0,%1(13)" : : asm volatile("stb %0,%1(13)" : :
"r" (0), "r" (0),
"i" (offsetof(struct paca_struct, perf_event_pending))); "i" (offsetof(struct paca_struct, irq_work_pending)));
} }
#else /* 32-bit */ #else /* 32-bit */
DEFINE_PER_CPU(u8, perf_event_pending); DEFINE_PER_CPU(u8, irq_work_pending);
#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
#define test_perf_event_pending() __get_cpu_var(perf_event_pending) #define test_irq_work_pending() __get_cpu_var(irq_work_pending)
#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
#endif /* 32 vs 64 bit */ #endif /* 32 vs 64 bit */
void set_perf_event_pending(void) void set_irq_work_pending(void)
{ {
preempt_disable(); preempt_disable();
set_perf_event_pending_flag(); set_irq_work_pending_flag();
set_dec(1); set_dec(1);
preempt_enable(); preempt_enable();
} }
#else /* CONFIG_PERF_EVENTS */ #else /* CONFIG_IRQ_WORK */
#define test_perf_event_pending() 0 #define test_irq_work_pending() 0
#define clear_perf_event_pending() #define clear_irq_work_pending()
#endif /* CONFIG_PERF_EVENTS */ #endif /* CONFIG_IRQ_WORK */
/* /*
* For iSeries shared processors, we have to let the hypervisor * For iSeries shared processors, we have to let the hypervisor
...@@ -587,9 +587,9 @@ void timer_interrupt(struct pt_regs * regs) ...@@ -587,9 +587,9 @@ void timer_interrupt(struct pt_regs * regs)
calculate_steal_time(); calculate_steal_time();
if (test_perf_event_pending()) { if (test_irq_work_pending()) {
clear_perf_event_pending(); clear_irq_work_pending();
perf_event_do_pending(); irq_work_run();
} }
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
......
...@@ -95,6 +95,7 @@ config S390 ...@@ -95,6 +95,7 @@ config S390
select HAVE_KVM if 64BIT select HAVE_KVM if 64BIT
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE select INIT_ALL_POSSIBLE
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_BZIP2
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
* Copyright 2009 Martin Schwidefsky, IBM Corporation. * Copyright 2009 Martin Schwidefsky, IBM Corporation.
*/ */
static inline void set_perf_event_pending(void) {} /* Empty, just to avoid compiling error */
static inline void clear_perf_event_pending(void) {}
#define PERF_EVENT_INDEX_OFFSET 0 #define PERF_EVENT_INDEX_OFFSET 0
...@@ -16,6 +16,7 @@ config SUPERH ...@@ -16,6 +16,7 @@ config SUPERH
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS select HAVE_DMA_ATTRS
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC select PERF_USE_VMALLOC
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
......
...@@ -26,11 +26,4 @@ extern int register_sh_pmu(struct sh_pmu *); ...@@ -26,11 +26,4 @@ extern int register_sh_pmu(struct sh_pmu *);
extern int reserve_pmc_hardware(void); extern int reserve_pmc_hardware(void);
extern void release_pmc_hardware(void); extern void release_pmc_hardware(void);
static inline void set_perf_event_pending(void)
{
/* Nothing to see here, move along. */
}
#define PERF_EVENT_INDEX_OFFSET 0
#endif /* __ASM_SH_PERF_EVENT_H */ #endif /* __ASM_SH_PERF_EVENT_H */
...@@ -26,6 +26,7 @@ config SPARC ...@@ -26,6 +26,7 @@ config SPARC
select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_OPTIONAL_GPIOLIB
select RTC_CLASS select RTC_CLASS
select RTC_DRV_M48T59 select RTC_DRV_M48T59
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC select PERF_USE_VMALLOC
select HAVE_DMA_ATTRS select HAVE_DMA_ATTRS
...@@ -54,6 +55,7 @@ config SPARC64 ...@@ -54,6 +55,7 @@ config SPARC64
select RTC_DRV_BQ4802 select RTC_DRV_BQ4802
select RTC_DRV_SUN4V select RTC_DRV_SUN4V
select RTC_DRV_STARFIRE select RTC_DRV_STARFIRE
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC select PERF_USE_VMALLOC
......
#ifndef __ASM_SPARC_PERF_EVENT_H #ifndef __ASM_SPARC_PERF_EVENT_H
#define __ASM_SPARC_PERF_EVENT_H #define __ASM_SPARC_PERF_EVENT_H
extern void set_perf_event_pending(void);
#define PERF_EVENT_INDEX_OFFSET 0
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
#include <asm/ptrace.h> #include <asm/ptrace.h>
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment