Skip to content
Snippets Groups Projects
slab.c 97.4 KiB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
 * slabs and you must pass objects with the same intializations to
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
 * The c_cpuarray may not be read with enabled local interrupts - 
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
 *  Several members in kmem_cache_t and struct slab never change, they
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
 *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
 */

#include	<linux/config.h>
#include	<linux/slab.h>
#include	<linux/mm.h>
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
#include	<linux/string.h>
#include	<linux/nodemask.h>
#include	<linux/mempolicy.h>
#include	<linux/mutex.h>
Linus Torvalds's avatar
Linus Torvalds committed

#include	<asm/uaccess.h>
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

/*
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
 *		  SLAB_RED_ZONE & SLAB_POISON.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)

#ifndef cache_line_size
#define cache_line_size()	L1_CACHE_BYTES
#endif

#ifndef ARCH_KMALLOC_MINALIGN
/*
 * Enforce a minimum alignment for the kmalloc caches.
 * Usually, the kmalloc caches are cache_line_size() aligned, except when
 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
 * Note that this flag disables some debug features.
 */
#define ARCH_KMALLOC_MINALIGN 0
#endif

#ifndef ARCH_SLAB_MINALIGN
/*
 * Enforce a minimum alignment for all caches.
 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
 * some debug features.
 */
#define ARCH_SLAB_MINALIGN 0
#endif

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

/* Legal flag mask for kmem_cache_create(). */
#if DEBUG
# define CREATE_MASK	(SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
			 SLAB_NO_REAP | SLAB_CACHE_DMA | \
			 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
			 SLAB_DESTROY_BY_RCU)
#else
# define CREATE_MASK	(SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \
			 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
			 SLAB_DESTROY_BY_RCU)
#endif

/*
 * kmem_bufctl_t:
 *
 * Bufctl's are used for linking objs within a slab
 * linked offsets.
 *
 * This implementation relies on "struct page" for locating the cache &
 * slab an object belongs to.
 * This allows the bufctl structure to be small (one int), but limits
 * the number of objects a slab (not a cache) can contain when off-slab
 * bufctls are used. The limit is the size of the largest general cache
 * that does not use off-slab slabs.
 * For 32bit archs with 4 kB pages, is this 56.
 * This is not serious, as it is only for large objects, when it is unwise
 * to have too many per slab.
 * Note: This limit can be raised by introducing a general cache whose size
 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
 */

typedef unsigned int kmem_bufctl_t;
Linus Torvalds's avatar
Linus Torvalds committed
#define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
#define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
#define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-2)

/* Max number of objs-per-slab for caches which use off-slab slabs.
 * Needed to avoid a possible looping condition in cache_grow().
 */
static unsigned long offslab_limit;

/*
 * struct slab
 *
 * Manages the objs in a slab. Placed either at the beginning of mem allocated
 * for a slab, or allocated from an general cache.
 * Slabs are chained into three list: fully used, partial, fully free slabs.
 */
struct slab {
	struct list_head list;
	unsigned long colouroff;
	void *s_mem;		/* including colour offset */
	unsigned int inuse;	/* num of objs active in slab */
	kmem_bufctl_t free;
	unsigned short nodeid;
Linus Torvalds's avatar
Linus Torvalds committed
};

/*
 * struct slab_rcu
 *
 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
 * arrange for kmem_freepages to be called via RCU.  This is useful if
 * we need to approach a kernel structure obliquely, from its address
 * obtained without the usual locking.  We can lock the structure to
 * stabilize it and check it's still at the given address, only if we
 * can be sure that the memory has not been meanwhile reused for some
 * other kind of object (which our subsystem's lock might corrupt).
 *
 * rcu_read_lock before reading the address, then rcu_read_unlock after
 * taking the spinlock within the structure expected at that address.
 *
 * We assume struct slab_rcu can overlay struct slab when destroying.
 */
struct slab_rcu {
	struct rcu_head head;
	kmem_cache_t *cachep;
	void *addr;
Linus Torvalds's avatar
Linus Torvalds committed
};

/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
	spinlock_t lock;
	void *entry[0];		/*
				 * Must have this definition in here for the proper
				 * alignment of array_cache. Also simplifies accessing
				 * the entries.
				 * [0] is for gcc 2.95. It should really be [].
				 */
Linus Torvalds's avatar
Linus Torvalds committed
};

/* bootstrap: The caches do not work without cpuarrays anymore,
 * but the cpuarrays are allocated from the generic caches...
 */
#define BOOT_CPUCACHE_ENTRIES	1
struct arraycache_init {
	struct array_cache cache;
	void *entries[BOOT_CPUCACHE_ENTRIES];
 * The slab lists for all objects.
Linus Torvalds's avatar
Linus Torvalds committed
 */
struct kmem_list3 {
	struct list_head slabs_partial;	/* partial list first, better asm code */
	struct list_head slabs_full;
	struct list_head slabs_free;
	unsigned long free_objects;
	unsigned long next_reap;
	int free_touched;
	unsigned int free_limit;
	spinlock_t list_lock;
	struct array_cache *shared;	/* shared per node */
	struct array_cache **alien;	/* on other nodes */
Linus Torvalds's avatar
Linus Torvalds committed
};

/*
 * Need this for bootstrapping a per node allocator.
 */
#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
#define	CACHE_CACHE 0
#define	SIZE_AC 1
#define	SIZE_L3 (1 + MAX_NUMNODES)

/*
 * This function must be completely optimized away if
 * a constant is passed to it. Mostly the same as
 * what is in linux/slab.h except it returns an
 * index.
 */
static __always_inline int index_of(const size_t size)
	extern void __bad_size(void);

	if (__builtin_constant_p(size)) {
		int i = 0;

#define CACHE(x) \
	if (size <=x) \
		return i; \
	else \
		i++;
#include "linux/kmalloc_sizes.h"
#undef CACHE
	return 0;
}

#define INDEX_AC index_of(sizeof(struct arraycache_init))
#define INDEX_L3 index_of(sizeof(struct kmem_list3))
Linus Torvalds's avatar
Linus Torvalds committed

static void kmem_list3_init(struct kmem_list3 *parent)
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

#define MAKE_LIST(cachep, listp, slab, nodeid)	\
	do {	\
		INIT_LIST_HEAD(listp);		\
		list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
	} while (0)

#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)			\
	do {					\
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed

/*
 * kmem_cache_t
 *
 * manages a cache.
 */
struct kmem_cache {
Linus Torvalds's avatar
Linus Torvalds committed
/* 1) per-cpu data, touched during every alloc/free */
	struct array_cache *array[NR_CPUS];
	unsigned int batchcount;
	unsigned int limit;
	unsigned int shared;
	unsigned int buffer_size;
/* 2) touched by every alloc & free from the backend */
	struct kmem_list3 *nodelists[MAX_NUMNODES];
	unsigned int flags;	/* constant flags */
	unsigned int num;	/* # of objs per slab */
	spinlock_t spinlock;
Linus Torvalds's avatar
Linus Torvalds committed

/* 3) cache_grow/shrink */
	/* order of pgs per slab (2^n) */
	unsigned int gfporder;
Linus Torvalds's avatar
Linus Torvalds committed

	/* force GFP flags, e.g. GFP_DMA */
	gfp_t gfpflags;
Linus Torvalds's avatar
Linus Torvalds committed

	size_t colour;		/* cache colouring range */
	unsigned int colour_off;	/* colour offset */
	unsigned int colour_next;	/* cache colouring */
	kmem_cache_t *slabp_cache;
	unsigned int slab_size;
	unsigned int dflags;	/* dynamic flags */
Linus Torvalds's avatar
Linus Torvalds committed

	/* constructor func */
	void (*ctor) (void *, kmem_cache_t *, unsigned long);
Linus Torvalds's avatar
Linus Torvalds committed

	/* de-constructor func */
	void (*dtor) (void *, kmem_cache_t *, unsigned long);
Linus Torvalds's avatar
Linus Torvalds committed

/* 4) cache creation/removal */
	const char *name;
	struct list_head next;
Linus Torvalds's avatar
Linus Torvalds committed

/* 5) statistics */
#if STATS
	unsigned long num_active;
	unsigned long num_allocations;
	unsigned long high_mark;
	unsigned long grown;
	unsigned long reaped;
	unsigned long errors;
	unsigned long max_freeable;
	unsigned long node_allocs;
	unsigned long node_frees;
	atomic_t allochit;
	atomic_t allocmiss;
	atomic_t freehit;
	atomic_t freemiss;
Linus Torvalds's avatar
Linus Torvalds committed
#endif
#if DEBUG
	/*
	 * If debugging is enabled, then the allocator can add additional
	 * fields and/or padding to every object. buffer_size contains the total
	 * object size including these internal fields, the following two
	 * variables contain the offset to the user object and its size.
	 */
	int obj_offset;
	int obj_size;
Linus Torvalds's avatar
Linus Torvalds committed
#endif
};

#define CFLGS_OFF_SLAB		(0x80000000UL)
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
/* Optimization question: fewer reaps means less 
 * probability for unnessary cpucache drain/refill cycles.
 *
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
 * which could lock up otherwise freeable slabs.
 */
#define REAPTIMEOUT_CPUC	(2*HZ)
#define REAPTIMEOUT_LIST3	(4*HZ)

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
#define	STATS_INC_REAPED(x)	((x)->reaped++)
#define	STATS_SET_HIGH(x)	do { if ((x)->num_active > (x)->high_mark) \
					(x)->high_mark = (x)->num_active; \
				} while (0)
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
Linus Torvalds's avatar
Linus Torvalds committed
#define	STATS_SET_FREEABLE(x, i) \
				do { if ((x)->max_freeable < i) \
					(x)->max_freeable = i; \
				} while (0)

#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
#define	STATS_INC_REAPED(x)	do { } while (0)
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
#define	STATS_INC_NODEFREES(x)	do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
#define	STATS_SET_FREEABLE(x, i) \
				do { } while (0)

#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG
/* Magic nums for obj red zoning.
 * Placed in the first word before and the first word after an obj.
 */
#define	RED_INACTIVE	0x5A2CF071UL	/* when obj is inactive */
#define	RED_ACTIVE	0x170FC2A5UL	/* when obj is active */

/* ...and for poisoning */
#define	POISON_INUSE	0x5a	/* for use-uninitialised poisoning */
#define POISON_FREE	0x6b	/* for use-after-free poisoning */
#define	POISON_END	0xa5	/* end-byte of poisoning */

/* memory layout of objects:
 * 0		: objp
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
 * 		redzone word.
 * cachep->obj_offset: The real object.
 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
 */
static int obj_offset(kmem_cache_t *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
{
	return cachep->obj_offset;
static int obj_size(kmem_cache_t *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
{
Linus Torvalds's avatar
Linus Torvalds committed
}

static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp)
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
}

static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
		return (unsigned long *)(objp + cachep->buffer_size -
					 2 * BYTES_PER_WORD);
	return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
}

static void **dbg_userword(kmem_cache_t *cachep, void *objp)
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
#define obj_offset(x)			0
#define obj_size(cachep)		(cachep->buffer_size)
Linus Torvalds's avatar
Linus Torvalds committed
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long *)NULL;})
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

/*
 * Maximum size of an obj (in 2^order pages)
 * and absolute limit for the gfp order.
 */
#if defined(CONFIG_LARGE_ALLOCS)
#define	MAX_OBJ_ORDER	13	/* up to 32Mb */
#define	MAX_GFP_ORDER	13	/* up to 32Mb */
#elif defined(CONFIG_MMU)
#define	MAX_OBJ_ORDER	5	/* 32 pages */
#define	MAX_GFP_ORDER	5	/* 32 pages */
#else
#define	MAX_OBJ_ORDER	8	/* up to 1Mb */
#define	MAX_GFP_ORDER	8	/* up to 1Mb */
#endif

/*
 * Do not go above this order unless 0 objects fit into the slab.
 */
#define	BREAK_GFP_ORDER_HI	1
#define	BREAK_GFP_ORDER_LO	0
static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;

/* Functions for storing/retrieving the cachep and or slab from the
Linus Torvalds's avatar
Linus Torvalds committed
 * global 'mem_map'. These are used to find the slab an obj belongs to.
 * With kfree(), these are used to find the cache which an obj belongs to.
 */
static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
{
	page->lru.next = (struct list_head *)cache;
}

static inline struct kmem_cache *page_get_cache(struct page *page)
{
	return (struct kmem_cache *)page->lru.next;
}

static inline void page_set_slab(struct page *page, struct slab *slab)
{
	page->lru.prev = (struct list_head *)slab;
}

static inline struct slab *page_get_slab(struct page *page)
{
	return (struct slab *)page->lru.prev;
}
Linus Torvalds's avatar
Linus Torvalds committed

static inline struct kmem_cache *virt_to_cache(const void *obj)
{
	struct page *page = virt_to_page(obj);
	return page_get_cache(page);
}

static inline struct slab *virt_to_slab(const void *obj)
{
	struct page *page = virt_to_page(obj);
	return page_get_slab(page);
}

Linus Torvalds's avatar
Linus Torvalds committed
/* These are the default caches for kmalloc. Custom caches can have other sizes. */
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h>
	CACHE(ULONG_MAX)
#undef CACHE
};
EXPORT_SYMBOL(malloc_sizes);

/* Must match cache_sizes above. Out of line to keep cache footprint low. */
struct cache_names {
	char *name;
	char *name_dma;
};

static struct cache_names __initdata cache_names[] = {
#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
#include <linux/kmalloc_sizes.h>
Linus Torvalds's avatar
Linus Torvalds committed
#undef CACHE
};

static struct arraycache_init initarray_cache __initdata =
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds's avatar
Linus Torvalds committed
static struct arraycache_init initarray_generic =
    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
Linus Torvalds's avatar
Linus Torvalds committed

/* internal cache of cache description objs */
static kmem_cache_t cache_cache = {
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
	.buffer_size = sizeof(kmem_cache_t),
	.flags = SLAB_NO_REAP,
	.spinlock = SPIN_LOCK_UNLOCKED,
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
#if DEBUG
	.obj_size = sizeof(kmem_cache_t),
Linus Torvalds's avatar
Linus Torvalds committed
#endif
};

/* Guard access to the cache-chain. */
static DEFINE_MUTEX(cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
static struct list_head cache_chain;

/*
 * vm_enough_memory() looks at this to determine how many
 * slab-allocated pages are possibly freeable under pressure
 *
 * SLAB_RECLAIM_ACCOUNT turns this on per-slab
 */
atomic_t slab_reclaim_pages;

/*
 * chicken and egg problem: delay the per-cpu array allocation
 * until the general caches are up.
 */
static enum {
	NONE,
	PARTIAL_AC,
	PARTIAL_L3,
Linus Torvalds's avatar
Linus Torvalds committed
	FULL
} g_cpucache_up;

static DEFINE_PER_CPU(struct work_struct, reap_work);

static void free_block(kmem_cache_t *cachep, void **objpp, int len, int node);
static void enable_cpucache(kmem_cache_t *cachep);
static void cache_reap(void *unused);
static int __node_shrink(kmem_cache_t *cachep, int node);
Linus Torvalds's avatar
Linus Torvalds committed

static inline struct array_cache *cpu_cache_get(kmem_cache_t *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
{
	return cachep->array[smp_processor_id()];
}

static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cache_sizes *csizep = malloc_sizes;

#if DEBUG
	/* This happens if someone tries to call
	 * kmem_cache_create(), or __kmalloc(), before
	 * the generic caches are initialized.
	 */
	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
Linus Torvalds's avatar
Linus Torvalds committed
#endif
	while (size > csizep->cs_size)
		csizep++;

	/*
	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
Linus Torvalds's avatar
Linus Torvalds committed
	 * has cs_{dma,}cachep==NULL. Thus no special case
	 * for large kmalloc calls required.
	 */
	if (unlikely(gfpflags & GFP_DMA))
		return csizep->cs_dmacachep;
	return csizep->cs_cachep;
}

kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
{
	return __find_general_cachep(size, gfpflags);
}
EXPORT_SYMBOL(kmem_find_general_cachep);

static size_t slab_mgmt_size(size_t nr_objs, size_t align)
Linus Torvalds's avatar
Linus Torvalds committed
{
	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
}
Linus Torvalds's avatar
Linus Torvalds committed

/* Calculate the number of objects and left-over bytes for a given
   buffer size. */
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
			   size_t align, int flags, size_t *left_over,
			   unsigned int *num)
{
	int nr_objs;
	size_t mgmt_size;
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed

	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
	 * - The struct slab
	 * - One kmem_bufctl_t for each object
	 * - Padding to respect alignment of @align
	 * - @buffer_size bytes for each object
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
	if (flags & CFLGS_OFF_SLAB) {
		mgmt_size = 0;
		nr_objs = slab_size / buffer_size;

		if (nr_objs > SLAB_LIMIT)
			nr_objs = SLAB_LIMIT;
	} else {
		/*
		 * Ignore padding for the initial guess. The padding
		 * is at most @align-1 bytes, and @buffer_size is at
		 * least @align. In the worst case, this result will
		 * be one greater than the number of objects that fit
		 * into the memory allocation when taking the padding
		 * into account.
		 */
		nr_objs = (slab_size - sizeof(struct slab)) /
			  (buffer_size + sizeof(kmem_bufctl_t));

		/*
		 * This calculated number will be either the right
		 * amount, or one greater than what we want.
		 */
		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
		       > slab_size)
			nr_objs--;

		if (nr_objs > SLAB_LIMIT)
			nr_objs = SLAB_LIMIT;

		mgmt_size = slab_mgmt_size(nr_objs, align);
	}
	*num = nr_objs;
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
Linus Torvalds's avatar
Linus Torvalds committed
}

#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)

static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg)
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
	dump_stack();
}

/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
static void __devinit start_cpu_timer(int cpu)
{
	struct work_struct *reap_work = &per_cpu(reap_work, cpu);

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
	if (keventd_up() && reap_work->func == NULL) {
		INIT_WORK(reap_work, cache_reap, NULL);
		schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
	}
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount)
Linus Torvalds's avatar
Linus Torvalds committed
{
	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
Linus Torvalds's avatar
Linus Torvalds committed
	struct array_cache *nc = NULL;

	nc = kmalloc_node(memsize, GFP_KERNEL, node);
Linus Torvalds's avatar
Linus Torvalds committed
	if (nc) {
		nc->avail = 0;
		nc->limit = entries;
		nc->batchcount = batchcount;
		nc->touched = 0;
		spin_lock_init(&nc->lock);
Linus Torvalds's avatar
Linus Torvalds committed
	}
	return nc;
}

#ifdef CONFIG_NUMA
static void *__cache_alloc_node(kmem_cache_t *, gfp_t, int);

static struct array_cache **alloc_alien_cache(int node, int limit)
{
	struct array_cache **ac_ptr;
	int memsize = sizeof(void *) * MAX_NUMNODES;
	int i;

	if (limit > 1)
		limit = 12;
	ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
	if (ac_ptr) {
		for_each_node(i) {
			if (i == node || !node_online(i)) {
				ac_ptr[i] = NULL;
				continue;
			}
			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
			if (!ac_ptr[i]) {
				for (i--; i <= 0; i--)
					kfree(ac_ptr[i]);
				kfree(ac_ptr);
				return NULL;
			}
		}
	}
	return ac_ptr;
}

static void free_alien_cache(struct array_cache **ac_ptr)
{
	int i;

	if (!ac_ptr)
		return;

	for_each_node(i)
	    kfree(ac_ptr[i]);
static void __drain_alien_cache(kmem_cache_t *cachep,
				struct array_cache *ac, int node)
{
	struct kmem_list3 *rl3 = cachep->nodelists[node];

	if (ac->avail) {
		spin_lock(&rl3->list_lock);
		free_block(cachep, ac->entry, ac->avail, node);
		ac->avail = 0;
		spin_unlock(&rl3->list_lock);
	}
}

static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
{
	int i = 0;
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
		ac = l3->alien[i];
		if (ac) {
			spin_lock_irqsave(&ac->lock, flags);
			__drain_alien_cache(cachep, ac, i);
			spin_unlock_irqrestore(&ac->lock, flags);
		}
	}
}
#else
#define alloc_alien_cache(node, limit) do { } while (0)
#define free_alien_cache(ac_ptr) do { } while (0)
#define drain_alien_cache(cachep, l3) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
static int __devinit cpuup_callback(struct notifier_block *nfb,
				    unsigned long action, void *hcpu)
Linus Torvalds's avatar
Linus Torvalds committed
{
	long cpu = (long)hcpu;
	kmem_cache_t *cachep;
	struct kmem_list3 *l3 = NULL;
	int node = cpu_to_node(cpu);
	int memsize = sizeof(struct kmem_list3);
Linus Torvalds's avatar
Linus Torvalds committed

	switch (action) {
	case CPU_UP_PREPARE:
		mutex_lock(&cache_chain_mutex);
		/* we need to do this right in the beginning since
		 * alloc_arraycache's are going to use this list.
		 * kmalloc_node allows us to add the slab to the right
		 * kmem_list3 and not this cpu's kmem_list3
		 */

Linus Torvalds's avatar
Linus Torvalds committed
		list_for_each_entry(cachep, &cache_chain, next) {
			/* setup the size64 kmemlist for cpu before we can
			 * begin anything. Make sure some other cpu on this
			 * node has not already allocated this
			 */
			if (!cachep->nodelists[node]) {
				if (!(l3 = kmalloc_node(memsize,
							GFP_KERNEL, node)))
					goto bad;
				kmem_list3_init(l3);
				l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
				    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;

				cachep->nodelists[node] = l3;
			}
Linus Torvalds's avatar
Linus Torvalds committed

			spin_lock_irq(&cachep->nodelists[node]->list_lock);
			cachep->nodelists[node]->free_limit =
			    (1 + nr_cpus_node(node)) *
			    cachep->batchcount + cachep->num;
			spin_unlock_irq(&cachep->nodelists[node]->list_lock);
		}

		/* Now we can go ahead with allocating the shared array's
		   & array cache's */
		list_for_each_entry(cachep, &cache_chain, next) {
			struct array_cache *nc;

			nc = alloc_arraycache(node, cachep->limit,
					      cachep->batchcount);
Linus Torvalds's avatar
Linus Torvalds committed
			if (!nc)
				goto bad;
			cachep->array[cpu] = nc;

			l3 = cachep->nodelists[node];
			BUG_ON(!l3);
			if (!l3->shared) {
				if (!(nc = alloc_arraycache(node,
							    cachep->shared *
							    cachep->batchcount,
							    0xbaadf00d)))
					goto bad;

				/* we are serialised from CPU_DEAD or
				   CPU_UP_CANCELLED by the cpucontrol lock */
				l3->shared = nc;
			}
Linus Torvalds's avatar
Linus Torvalds committed
		}
		mutex_unlock(&cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
		break;
	case CPU_ONLINE:
		start_cpu_timer(cpu);
		break;
#ifdef CONFIG_HOTPLUG_CPU
	case CPU_DEAD:
		/* fall thru */
	case CPU_UP_CANCELED:
		mutex_lock(&cache_chain_mutex);
Linus Torvalds's avatar
Linus Torvalds committed

		list_for_each_entry(cachep, &cache_chain, next) {
			struct array_cache *nc;
			cpumask_t mask;
Linus Torvalds's avatar
Linus Torvalds committed

			mask = node_to_cpumask(node);
Linus Torvalds's avatar
Linus Torvalds committed
			spin_lock_irq(&cachep->spinlock);
			/* cpu is dead; no one can alloc from it. */
			nc = cachep->array[cpu];
			cachep->array[cpu] = NULL;
			l3 = cachep->nodelists[node];

			if (!l3)
				goto unlock_cache;

			spin_lock(&l3->list_lock);

			/* Free limit for this kmem_list3 */