Skip to content
Snippets Groups Projects
memcontrol.c 30.3 KiB
Newer Older
  • Learn to ignore specific revisions
  • /* memcontrol.c - Memory Controller
     *
     * Copyright IBM Corporation, 2007
     * Author Balbir Singh <balbir@linux.vnet.ibm.com>
     *
    
     * Copyright 2007 OpenVZ SWsoft Inc
     * Author: Pavel Emelianov <xemul@openvz.org>
     *
    
     * This program is free software; you can redistribute it and/or modify
     * it under the terms of the GNU General Public License as published by
     * the Free Software Foundation; either version 2 of the License, or
     * (at your option) any later version.
     *
     * This program is distributed in the hope that it will be useful,
     * but WITHOUT ANY WARRANTY; without even the implied warranty of
     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     * GNU General Public License for more details.
     */
    
    #include <linux/res_counter.h>
    #include <linux/memcontrol.h>
    #include <linux/cgroup.h>
    
    #include <linux/mm.h>
    
    #include <linux/page-flags.h>
    
    #include <linux/backing-dev.h>
    
    #include <linux/bit_spinlock.h>
    #include <linux/rcupdate.h>
    
    #include <linux/swap.h>
    #include <linux/spinlock.h>
    #include <linux/fs.h>
    
    #include <linux/seq_file.h>
    
    #include <linux/vmalloc.h>
    
    #include <linux/mm_inline.h>
    
    struct cgroup_subsys mem_cgroup_subsys __read_mostly;
    static struct kmem_cache *page_cgroup_cache __read_mostly;
    #define MEM_CGROUP_RECLAIM_RETRIES	5
    
    /*
     * Statistics for memory cgroup.
     */
    enum mem_cgroup_stat_index {
    	/*
    	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
    	 */
    	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
    	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as rss */
    
    	MEM_CGROUP_STAT_PGPGIN_COUNT,	/* # of pages paged in */
    	MEM_CGROUP_STAT_PGPGOUT_COUNT,	/* # of pages paged out */
    
    
    	MEM_CGROUP_STAT_NSTATS,
    };
    
    struct mem_cgroup_stat_cpu {
    	s64 count[MEM_CGROUP_STAT_NSTATS];
    } ____cacheline_aligned_in_smp;
    
    struct mem_cgroup_stat {
    	struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
    };
    
    /*
     * For accounting under irq disable, no need for increment preempt count.
     */
    
    static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
    
    	stat->count[idx] += val;
    
    }
    
    static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
    		enum mem_cgroup_stat_index idx)
    {
    	int cpu;
    	s64 ret = 0;
    	for_each_possible_cpu(cpu)
    		ret += stat->cpustat[cpu].count[idx];
    	return ret;
    }
    
    
    /*
     * per-zone information in memory controller.
     */
    struct mem_cgroup_per_zone {
    
    	/*
    	 * spin_lock to protect the per cgroup LRU
    	 */
    	spinlock_t		lru_lock;
    
    	struct list_head	lists[NR_LRU_LISTS];
    	unsigned long		count[NR_LRU_LISTS];
    
    };
    /* Macro for accessing counter */
    #define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])
    
    struct mem_cgroup_per_node {
    	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
    };
    
    struct mem_cgroup_lru_info {
    	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
    };
    
    
    /*
     * The memory controller data structure. The memory controller controls both
     * page cache and RSS per cgroup. We would eventually like to provide
     * statistics based on the statistics developed by Rik Van Riel for clock-pro,
     * to help the administrator determine what knobs to tune.
     *
     * TODO: Add a water mark for the memory controller. Reclaim will begin when
    
     * we hit the water mark. May be even add a low water mark, such that
     * no reclaim occurs from a cgroup at it's low water mark, this is
     * a feature that will be implemented much later in the future.
    
     */
    struct mem_cgroup {
    	struct cgroup_subsys_state css;
    	/*
    	 * the counter to account for memory usage
    	 */
    	struct res_counter res;
    
    	/*
    	 * Per cgroup active and inactive list, similar to the
    	 * per zone LRU lists.
    	 */
    
    	int	prev_priority;	/* for recording reclaim priority */
    
    static struct mem_cgroup init_mem_cgroup;
    
    /*
     * We use the lower bit of the page->page_cgroup pointer as a bit spin
    
     * lock.  We need to ensure that page->page_cgroup is at least two
     * byte aligned (based on comments from Nick Piggin).  But since
     * bit_spin_lock doesn't actually set that lock bit in a non-debug
     * uniprocessor kernel, we should avoid setting it here too.
    
     */
    #define PAGE_CGROUP_LOCK_BIT 	0x0
    
    #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
    #define PAGE_CGROUP_LOCK 	(1 << PAGE_CGROUP_LOCK_BIT)
    #else
    #define PAGE_CGROUP_LOCK	0x0
    #endif
    
    /*
     * A page_cgroup page is associated with every page descriptor. The
     * page_cgroup helps us identify information about the cgroup
     */
    struct page_cgroup {
    	struct list_head lru;		/* per cgroup LRU list */
    	struct page *page;
    	struct mem_cgroup *mem_cgroup;
    
    #define PAGE_CGROUP_FLAG_CACHE	   (0x1)	/* charged as cache */
    #define PAGE_CGROUP_FLAG_ACTIVE    (0x2)	/* page is active in this cgroup */
    #define PAGE_CGROUP_FLAG_FILE	   (0x4)	/* page is file system backed */
    #define PAGE_CGROUP_FLAG_UNEVICTABLE (0x8)	/* page is unevictableable */
    
    static int page_cgroup_nid(struct page_cgroup *pc)
    
    static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
    
    enum charge_type {
    	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
    	MEM_CGROUP_CHARGE_TYPE_MAPPED,
    
    	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
    
    	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
    
    /*
     * Always modified under lru lock. Then, not necessary to preempt_disable()
     */
    static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
    					bool charge)
    {
    	int val = (charge)? 1 : -1;
    	struct mem_cgroup_stat *stat = &mem->stat;
    
    	struct mem_cgroup_stat_cpu *cpustat;
    
    	VM_BUG_ON(!irqs_disabled());
    
    
    	cpustat = &stat->cpustat[smp_processor_id()];
    
    		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
    
    		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
    
    		__mem_cgroup_stat_add_safe(cpustat,
    
    				MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
    	else
    
    		__mem_cgroup_stat_add_safe(cpustat,
    
    				MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
    
    static struct mem_cgroup_per_zone *
    
    mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
    {
    	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
    }
    
    
    static struct mem_cgroup_per_zone *
    
    page_cgroup_zoneinfo(struct page_cgroup *pc)
    {
    	struct mem_cgroup *mem = pc->mem_cgroup;
    	int nid = page_cgroup_nid(pc);
    	int zid = page_cgroup_zid(pc);
    
    	return mem_cgroup_zoneinfo(mem, nid, zid);
    }
    
    static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
    
    {
    	int nid, zid;
    	struct mem_cgroup_per_zone *mz;
    	u64 total = 0;
    
    	for_each_online_node(nid)
    		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
    			mz = mem_cgroup_zoneinfo(mem, nid, zid);
    			total += MEM_CGROUP_ZSTAT(mz, idx);
    		}
    	return total;
    
    static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
    
    {
    	return container_of(cgroup_subsys_state(cont,
    				mem_cgroup_subsys_id), struct mem_cgroup,
    				css);
    }
    
    
    struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
    
    	/*
    	 * mm_update_next_owner() may clear mm->owner to NULL
    	 * if it races with swapoff, page migration, etc.
    	 * So this can be called with p == NULL.
    	 */
    	if (unlikely(!p))
    		return NULL;
    
    
    	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
    				struct mem_cgroup, css);
    }
    
    
    static inline int page_cgroup_locked(struct page *page)
    {
    
    	return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
    
    static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
    
    	VM_BUG_ON(!page_cgroup_locked(page));
    	page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
    
    }
    
    struct page_cgroup *page_get_page_cgroup(struct page *page)
    {
    
    	return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
    
    static void lock_page_cgroup(struct page *page)
    
    {
    	bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
    }
    
    
    static int try_lock_page_cgroup(struct page *page)
    {
    	return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
    }
    
    
    static void unlock_page_cgroup(struct page *page)
    
    {
    	bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
    }
    
    
    static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
    			struct page_cgroup *pc)
    
    	if (pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE)
    		lru = LRU_UNEVICTABLE;
    	else {
    		if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE)
    			lru += LRU_ACTIVE;
    		if (pc->flags & PAGE_CGROUP_FLAG_FILE)
    			lru += LRU_FILE;
    	}
    
    	MEM_CGROUP_ZSTAT(mz, lru) -= 1;
    
    
    	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
    
    	list_del(&pc->lru);
    
    static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
    				struct page_cgroup *pc)
    
    	int lru = LRU_BASE;
    
    	if (pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE)
    		lru = LRU_UNEVICTABLE;
    	else {
    		if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE)
    			lru += LRU_ACTIVE;
    		if (pc->flags & PAGE_CGROUP_FLAG_FILE)
    			lru += LRU_FILE;
    	}
    
    
    	MEM_CGROUP_ZSTAT(mz, lru) += 1;
    	list_add(&pc->lru, &mz->lists[lru]);
    
    
    	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
    }
    
    
    static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru)
    
    	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
    
    	int active    = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
    	int file      = pc->flags & PAGE_CGROUP_FLAG_FILE;
    	int unevictable = pc->flags & PAGE_CGROUP_FLAG_UNEVICTABLE;
    	enum lru_list from = unevictable ? LRU_UNEVICTABLE :
    				(LRU_FILE * !!file + !!active);
    
    	if (lru == from)
    		return;
    
    	MEM_CGROUP_ZSTAT(mz, from) -= 1;
    
    	if (is_unevictable_lru(lru)) {
    
    		pc->flags |= PAGE_CGROUP_FLAG_UNEVICTABLE;
    	} else {
    		if (is_active_lru(lru))
    			pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
    		else
    			pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
    		pc->flags &= ~PAGE_CGROUP_FLAG_UNEVICTABLE;
    	}
    
    
    	MEM_CGROUP_ZSTAT(mz, lru) += 1;
    	list_move(&pc->lru, &mz->lists[lru]);
    
    int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
    {
    	int ret;
    
    	task_lock(task);
    
    	ret = task->mm && mm_match_cgroup(task->mm, mem);
    
    /*
     * This routine assumes that the appropriate zone's lru lock is already held
     */
    
    void mem_cgroup_move_lists(struct page *page, enum lru_list lru)
    
    	struct page_cgroup *pc;
    
    	struct mem_cgroup_per_zone *mz;
    	unsigned long flags;
    
    
    	if (mem_cgroup_subsys.disabled)
    		return;
    
    
    	/*
    	 * We cannot lock_page_cgroup while holding zone's lru_lock,
    	 * because other holders of lock_page_cgroup can be interrupted
    	 * with an attempt to rotate_reclaimable_page.  But we cannot
    	 * safely get to page_cgroup without it, so just try_lock it:
    	 * mem_cgroup_isolate_pages allows for page left on wrong list.
    	 */
    	if (!try_lock_page_cgroup(page))
    
    	pc = page_get_page_cgroup(page);
    	if (pc) {
    		mz = page_cgroup_zoneinfo(pc);
    		spin_lock_irqsave(&mz->lru_lock, flags);
    
    		__mem_cgroup_move_lists(pc, lru);
    
    		spin_unlock_irqrestore(&mz->lru_lock, flags);
    
    	}
    	unlock_page_cgroup(page);
    
    /*
     * Calculate mapped_ratio under memory controller. This will be used in
     * vmscan.c for deteremining we have to reclaim mapped pages.
     */
    int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
    {
    	long total, rss;
    
    	/*
    	 * usage is recorded in bytes. But, here, we assume the number of
    	 * physical pages can be represented by "long" on any arch.
    	 */
    	total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
    	rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
    	return (int)((rss * 100L) / total);
    }
    
    /*
     * prev_priority control...this will be used in memory reclaim path.
     */
    int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
    {
    	return mem->prev_priority;
    }
    
    void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
    {
    	if (priority < mem->prev_priority)
    		mem->prev_priority = priority;
    }
    
    void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
    {
    	mem->prev_priority = priority;
    }
    
    
    /*
     * Calculate # of pages to be scanned in this priority/zone.
     * See also vmscan.c
     *
     * priority starts from "DEF_PRIORITY" and decremented in each loop.
     * (see include/linux/mmzone.h)
     */
    
    
    long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
    					int priority, enum lru_list lru)
    
    	int nid = zone->zone_pgdat->node_id;
    	int zid = zone_idx(zone);
    	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
    
    
    	nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
    
    	return (nr_pages >> priority);
    
    unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
    					struct list_head *dst,
    					unsigned long *scanned, int order,
    					int mode, struct zone *z,
    					struct mem_cgroup *mem_cont,
    
    					int active, int file)
    
    {
    	unsigned long nr_taken = 0;
    	struct page *page;
    	unsigned long scan;
    	LIST_HEAD(pc_list);
    	struct list_head *src;
    
    	int nid = z->zone_pgdat->node_id;
    	int zid = zone_idx(z);
    	struct mem_cgroup_per_zone *mz;
    
    	int lru = LRU_FILE * !!file + !!active;
    
    	BUG_ON(!mem_cont);
    
    	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
    
    	src = &mz->lists[lru];
    
    	scan = 0;
    	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
    
    		if (scan >= nr_to_scan)
    
    		if (unlikely(!PageLRU(page)))
    
    		/*
    		 * TODO: play better with lumpy reclaim, grabbing anything.
    		 */
    
    		if (PageUnevictable(page) ||
    		    (PageActive(page) && !active) ||
    		    (!PageActive(page) && active)) {
    			__mem_cgroup_move_lists(pc, page_lru(page));
    
    		scan++;
    		list_move(&pc->lru, &pc_list);
    
    		if (__isolate_lru_page(page, mode, file) == 0) {
    
    			list_move(&page->lru, dst);
    			nr_taken++;
    		}
    	}
    
    	list_splice(&pc_list, src);
    
    /*
     * Charge the memory controller for page usage.
     * Return
     * 0 if the charge was successful
     * < 0 if the cgroup is over its limit
     */
    
    static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
    
    				gfp_t gfp_mask, enum charge_type ctype,
    				struct mem_cgroup *memcg)
    
    {
    	struct mem_cgroup *mem;
    
    	unsigned long flags;
    	unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
    
    	pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
    
    	if (unlikely(pc == NULL))
    
    	 * We always charge the cgroup the mm_struct belongs to.
    	 * The mm_struct's mem_cgroup changes on task migration if the
    
    	 * thread group leader migrates. It's possible that mm is not
    	 * set, if so charge the init_mm (happens for pagecache usage).
    	 */
    
    	if (likely(!memcg)) {
    
    		rcu_read_lock();
    		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
    
    		if (unlikely(!mem)) {
    			rcu_read_unlock();
    			kmem_cache_free(page_cgroup_cache, pc);
    			return 0;
    		}
    
    		/*
    		 * For every charge from the cgroup, increment reference count
    		 */
    		css_get(&mem->css);
    		rcu_read_unlock();
    	} else {
    		mem = memcg;
    		css_get(&memcg->css);
    	}
    
    	while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) {
    
    		if (!(gfp_mask & __GFP_WAIT))
    			goto out;
    
    
    		if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
    
    		 * try_to_free_mem_cgroup_pages() might not give us a full
    		 * picture of reclaim. Some pages are reclaimed and might be
    		 * moved to swap cache or just unmapped from the cgroup.
    		 * Check the limit again to see if the reclaim reduced the
    		 * current usage of the cgroup before giving up
    		 */
    
    		if (res_counter_check_under_limit(&mem->res))
    			continue;
    
    
    		if (!nr_retries--) {
    			mem_cgroup_out_of_memory(mem, gfp_mask);
    			goto out;
    
    	}
    
    	pc->mem_cgroup = mem;
    	pc->page = page;
    
    	/*
    	 * If a page is accounted as a page cache, insert to inactive list.
    	 * If anon, insert to active list.
    	 */
    
    	if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) {
    
    		pc->flags = PAGE_CGROUP_FLAG_CACHE;
    
    		if (page_is_file_cache(page))
    			pc->flags |= PAGE_CGROUP_FLAG_FILE;
    		else
    			pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
    	} else if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
    
    		pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
    
    	else /* MEM_CGROUP_CHARGE_TYPE_SHMEM */
    		pc->flags = PAGE_CGROUP_FLAG_CACHE | PAGE_CGROUP_FLAG_ACTIVE;
    
    	lock_page_cgroup(page);
    
    	if (unlikely(page_get_page_cgroup(page))) {
    
    		unlock_page_cgroup(page);
    
    		res_counter_uncharge(&mem->res, PAGE_SIZE);
    		css_put(&mem->css);
    
    		kmem_cache_free(page_cgroup_cache, pc);
    
    		goto done;
    
    	page_assign_page_cgroup(page, pc);
    
    	mz = page_cgroup_zoneinfo(pc);
    	spin_lock_irqsave(&mz->lru_lock, flags);
    
    	__mem_cgroup_add_list(mz, pc);
    
    	spin_unlock_irqrestore(&mz->lru_lock, flags);
    
    	unlock_page_cgroup(page);
    
    done:
    	return 0;
    
    out:
    	css_put(&mem->css);
    
    	kmem_cache_free(page_cgroup_cache, pc);
    
    int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
    
    	if (mem_cgroup_subsys.disabled)
    		return 0;
    
    
    	/*
    	 * If already mapped, we don't have to account.
    	 * If page cache, page->mapping has address_space.
    	 * But page->mapping may have out-of-use anon_vma pointer,
    	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
    	 * is NULL.
      	 */
    	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
    		return 0;
    	if (unlikely(!mm))
    		mm = &init_mm;
    
    	return mem_cgroup_charge_common(page, mm, gfp_mask,
    
    				MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
    
    int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
    				gfp_t gfp_mask)
    
    	if (mem_cgroup_subsys.disabled)
    		return 0;
    
    
    	/*
    	 * Corner case handling. This is called from add_to_page_cache()
    	 * in usual. But some FS (shmem) precharges this page before calling it
    	 * and call add_to_page_cache() with GFP_NOWAIT.
    	 *
    	 * For GFP_NOWAIT case, the page may be pre-charged before calling
    	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
    	 * charge twice. (It works but has to pay a bit larger cost.)
    	 */
    	if (!(gfp_mask & __GFP_WAIT)) {
    		struct page_cgroup *pc;
    
    		lock_page_cgroup(page);
    		pc = page_get_page_cgroup(page);
    		if (pc) {
    			VM_BUG_ON(pc->page != page);
    			VM_BUG_ON(!pc->mem_cgroup);
    			unlock_page_cgroup(page);
    			return 0;
    		}
    		unlock_page_cgroup(page);
    	}
    
    
    	if (unlikely(!mm))
    
    	return mem_cgroup_charge_common(page, mm, gfp_mask,
    
    				MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
    }
    
    
     * uncharge if !page_mapped(page)
    
    static void
    __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
    
    	struct page_cgroup *pc;
    
    	struct mem_cgroup *mem;
    
    	unsigned long flags;
    
    	 * Check if our page_cgroup is valid
    
    	lock_page_cgroup(page);
    	pc = page_get_page_cgroup(page);
    
    	if (unlikely(!pc))
    
    		goto unlock;
    
    	VM_BUG_ON(pc->page != page);
    
    
    	if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
    	    && ((pc->flags & PAGE_CGROUP_FLAG_CACHE)
    		|| page_mapped(page)))
    		goto unlock;
    
    	mz = page_cgroup_zoneinfo(pc);
    	spin_lock_irqsave(&mz->lru_lock, flags);
    	__mem_cgroup_remove_list(mz, pc);
    	spin_unlock_irqrestore(&mz->lru_lock, flags);
    
    	page_assign_page_cgroup(page, NULL);
    	unlock_page_cgroup(page);
    
    	mem = pc->mem_cgroup;
    	res_counter_uncharge(&mem->res, PAGE_SIZE);
    	css_put(&mem->css);
    
    	kmem_cache_free(page_cgroup_cache, pc);
    	return;
    
    	unlock_page_cgroup(page);
    }
    
    
    void mem_cgroup_uncharge_page(struct page *page)
    {
    	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
    }
    
    void mem_cgroup_uncharge_cache_page(struct page *page)
    {
    	VM_BUG_ON(page_mapped(page));
    
    	VM_BUG_ON(page->mapping);
    
    	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
    }
    
    
     * Before starting migration, account against new page.
    
    int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
    
    	struct mem_cgroup *mem = NULL;
    	enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
    	int ret = 0;
    
    	if (mem_cgroup_subsys.disabled)
    		return 0;
    
    
    	lock_page_cgroup(page);
    	pc = page_get_page_cgroup(page);
    
    	if (pc) {
    		mem = pc->mem_cgroup;
    		css_get(&mem->css);
    
    		if (pc->flags & PAGE_CGROUP_FLAG_CACHE) {
    			if (page_is_file_cache(page))
    				ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
    			else
    				ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
    		}
    
    	if (mem) {
    		ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
    			ctype, mem);
    		css_put(&mem->css);
    	}
    	return ret;
    
    /* remove redundant charge if migration failed*/
    
    void mem_cgroup_end_migration(struct page *newpage)
    
    	/*
    	 * At success, page->mapping is not NULL.
    	 * special rollback care is necessary when
    	 * 1. at migration failure. (newpage->mapping is cleared in this case)
    	 * 2. the newpage was moved but not remapped again because the task
    	 *    exits and the newpage is obsolete. In this case, the new page
    	 *    may be a swapcache. So, we just call mem_cgroup_uncharge_page()
    	 *    always for avoiding mess. The  page_cgroup will be removed if
    	 *    unnecessary. File cache pages is still on radix-tree. Don't
    	 *    care it.
    	 */
    	if (!newpage->mapping)
    		__mem_cgroup_uncharge_common(newpage,
    					 MEM_CGROUP_CHARGE_TYPE_FORCE);
    	else if (PageAnon(newpage))
    		mem_cgroup_uncharge_page(newpage);
    
    /*
     * A call to try to shrink memory usage under specified resource controller.
     * This is typically used for page reclaiming for shmem for reducing side
     * effect of page allocation from shmem, which is used by some mem_cgroup.
     */
    int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
    {
    	struct mem_cgroup *mem;
    	int progress = 0;
    	int retry = MEM_CGROUP_RECLAIM_RETRIES;
    
    
    	if (mem_cgroup_subsys.disabled)
    		return 0;
    
    	if (!mm)
    		return 0;
    
    	rcu_read_lock();
    	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
    
    	if (unlikely(!mem)) {
    		rcu_read_unlock();
    		return 0;
    	}
    
    	css_get(&mem->css);
    	rcu_read_unlock();
    
    	do {
    		progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
    
    		progress += res_counter_check_under_limit(&mem->res);
    
    	} while (!progress && --retry);
    
    	css_put(&mem->css);
    	if (!retry)
    		return -ENOMEM;
    	return 0;
    }
    
    
    int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val)
    {
    
    	int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
    	int progress;
    	int ret = 0;
    
    	while (res_counter_set_limit(&memcg->res, val)) {
    		if (signal_pending(current)) {
    			ret = -EINTR;
    			break;
    		}
    		if (!retry_count) {
    			ret = -EBUSY;
    			break;
    		}
    		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL);
    		if (!progress)
    			retry_count--;
    	}
    	return ret;
    }
    
    
    
    /*
     * This routine traverse page_cgroup in given list and drop them all.
     * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
     */
    #define FORCE_UNCHARGE_BATCH	(128)
    
    static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
    
    	int count = FORCE_UNCHARGE_BATCH;
    
    	list = &mz->lists[lru];
    
    	while (!list_empty(list)) {
    
    		pc = list_entry(list->prev, struct page_cgroup, lru);
    		page = pc->page;
    
    		get_page(page);
    		spin_unlock_irqrestore(&mz->lru_lock, flags);
    
    		/*
    		 * Check if this page is on LRU. !LRU page can be found
    		 * if it's under page migration.
    		 */
    		if (PageLRU(page)) {
    
    			__mem_cgroup_uncharge_common(page,
    					MEM_CGROUP_CHARGE_TYPE_FORCE);
    
    			put_page(page);
    			if (--count <= 0) {
    				count = FORCE_UNCHARGE_BATCH;
    				cond_resched();
    			}
    		} else
    
    			cond_resched();
    		spin_lock_irqsave(&mz->lru_lock, flags);
    
    	spin_unlock_irqrestore(&mz->lru_lock, flags);
    
    }
    
    /*
     * make mem_cgroup's charge to be 0 if there is no task.
     * This enables deleting this mem_cgroup.
     */
    
    static int mem_cgroup_force_empty(struct mem_cgroup *mem)
    
    	css_get(&mem->css);
    	/*
    	 * page reclaim code (kswapd etc..) will move pages between
    
    	 * active_list <-> inactive_list while we don't take a lock.
    
    	 * So, we have to do loop here until all lists are empty.
    	 */
    
    		if (atomic_read(&mem->css.cgroup->count) > 0)
    			goto out;
    
    		for_each_node_state(node, N_POSSIBLE)
    			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
    				struct mem_cgroup_per_zone *mz;
    
    				for_each_lru(l)
    					mem_cgroup_force_empty_list(mem, mz, l);
    
    static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
    
    	return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
    				    cft->private);
    
    /*
     * The user of this function is...
     * RES_LIMIT.
     */
    
    static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
    			    const char *buffer)
    
    	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
    	unsigned long long val;
    	int ret;
    
    	switch (cft->private) {
    	case RES_LIMIT:
    		/* This function does all necessary parse...reuse it */
    		ret = res_counter_memparse_write_strategy(buffer, &val);
    		if (!ret)
    			ret = mem_cgroup_resize_limit(memcg, val);
    		break;
    	default:
    		ret = -EINVAL; /* should be BUG() ? */
    		break;
    	}
    	return ret;
    
    static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
    
    {
    	struct mem_cgroup *mem;
    
    	mem = mem_cgroup_from_cont(cont);
    
    	switch (event) {
    	case RES_MAX_USAGE:
    		res_counter_reset_max(&mem->res);
    		break;
    	case RES_FAILCNT:
    		res_counter_reset_failcnt(&mem->res);
    		break;
    	}
    
    static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
    
    	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
    
    static const struct mem_cgroup_stat_desc {
    	const char *msg;
    	u64 unit;
    } mem_cgroup_stat_desc[] = {
    	[MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
    	[MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
    
    	[MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
    	[MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
    
    static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
    				 struct cgroup_map_cb *cb)
    
    {
    	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
    	struct mem_cgroup_stat *stat = &mem_cont->stat;
    	int i;