Skip to content
Snippets Groups Projects
memcontrol.c 34.4 KiB
Newer Older
  • Learn to ignore specific revisions
  • /* memcontrol.c - Memory Controller
     *
     * Copyright IBM Corporation, 2007
     * Author Balbir Singh <balbir@linux.vnet.ibm.com>
     *
    
     * Copyright 2007 OpenVZ SWsoft Inc
     * Author: Pavel Emelianov <xemul@openvz.org>
     *
    
     * This program is free software; you can redistribute it and/or modify
     * it under the terms of the GNU General Public License as published by
     * the Free Software Foundation; either version 2 of the License, or
     * (at your option) any later version.
     *
     * This program is distributed in the hope that it will be useful,
     * but WITHOUT ANY WARRANTY; without even the implied warranty of
     * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     * GNU General Public License for more details.
     */
    
    #include <linux/res_counter.h>
    #include <linux/memcontrol.h>
    #include <linux/cgroup.h>
    
    #include <linux/mm.h>
    
    #include <linux/page-flags.h>
    
    #include <linux/backing-dev.h>
    
    #include <linux/bit_spinlock.h>
    #include <linux/rcupdate.h>
    
    #include <linux/swap.h>
    #include <linux/spinlock.h>
    #include <linux/fs.h>
    
    #include <linux/seq_file.h>
    
    #include <linux/vmalloc.h>
    
    #include <linux/mm_inline.h>
    
    #include <linux/page_cgroup.h>
    
    struct cgroup_subsys mem_cgroup_subsys __read_mostly;
    #define MEM_CGROUP_RECLAIM_RETRIES	5
    
    /*
     * Statistics for memory cgroup.
     */
    enum mem_cgroup_stat_index {
    	/*
    	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
    	 */
    	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
    	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as rss */
    
    	MEM_CGROUP_STAT_PGPGIN_COUNT,	/* # of pages paged in */
    	MEM_CGROUP_STAT_PGPGOUT_COUNT,	/* # of pages paged out */
    
    
    	MEM_CGROUP_STAT_NSTATS,
    };
    
    struct mem_cgroup_stat_cpu {
    	s64 count[MEM_CGROUP_STAT_NSTATS];
    } ____cacheline_aligned_in_smp;
    
    struct mem_cgroup_stat {
    
    	struct mem_cgroup_stat_cpu cpustat[0];
    
    };
    
    /*
     * For accounting under irq disable, no need for increment preempt count.
     */
    
    static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
    
    	stat->count[idx] += val;
    
    }
    
    static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
    		enum mem_cgroup_stat_index idx)
    {
    	int cpu;
    	s64 ret = 0;
    	for_each_possible_cpu(cpu)
    		ret += stat->cpustat[cpu].count[idx];
    	return ret;
    }
    
    
    /*
     * per-zone information in memory controller.
     */
    struct mem_cgroup_per_zone {
    
    	/*
    	 * spin_lock to protect the per cgroup LRU
    	 */
    	spinlock_t		lru_lock;
    
    	struct list_head	lists[NR_LRU_LISTS];
    	unsigned long		count[NR_LRU_LISTS];
    
    };
    /* Macro for accessing counter */
    #define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])
    
    struct mem_cgroup_per_node {
    	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
    };
    
    struct mem_cgroup_lru_info {
    	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
    };
    
    
    /*
     * The memory controller data structure. The memory controller controls both
     * page cache and RSS per cgroup. We would eventually like to provide
     * statistics based on the statistics developed by Rik Van Riel for clock-pro,
     * to help the administrator determine what knobs to tune.
     *
     * TODO: Add a water mark for the memory controller. Reclaim will begin when
    
     * we hit the water mark. May be even add a low water mark, such that
     * no reclaim occurs from a cgroup at it's low water mark, this is
     * a feature that will be implemented much later in the future.
    
     */
    struct mem_cgroup {
    	struct cgroup_subsys_state css;
    	/*
    	 * the counter to account for memory usage
    	 */
    	struct res_counter res;
    
    	/*
    	 * Per cgroup active and inactive list, similar to the
    	 * per zone LRU lists.
    	 */
    
    	int	prev_priority;	/* for recording reclaim priority */
    
    	 * statistics. This must be placed at the end of memcg.
    
    enum charge_type {
    	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
    	MEM_CGROUP_CHARGE_TYPE_MAPPED,
    
    	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
    
    	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
    	NR_CHARGE_TYPE,
    };
    
    
    /* only for here (for easy reading.) */
    #define PCGF_CACHE	(1UL << PCG_CACHE)
    #define PCGF_USED	(1UL << PCG_USED)
    #define PCGF_ACTIVE	(1UL << PCG_ACTIVE)
    #define PCGF_LOCK	(1UL << PCG_LOCK)
    #define PCGF_FILE	(1UL << PCG_FILE)
    
    static const unsigned long
    pcg_default_flags[NR_CHARGE_TYPE] = {
    
    	PCGF_CACHE | PCGF_FILE | PCGF_USED | PCGF_LOCK, /* File Cache */
    	PCGF_ACTIVE | PCGF_USED | PCGF_LOCK, /* Anon */
    	PCGF_ACTIVE | PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
    	0, /* FORCE */
    
    /*
     * Always modified under lru lock. Then, not necessary to preempt_disable()
     */
    
    static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
    					 struct page_cgroup *pc,
    					 bool charge)
    
    {
    	int val = (charge)? 1 : -1;
    	struct mem_cgroup_stat *stat = &mem->stat;
    
    	struct mem_cgroup_stat_cpu *cpustat;
    
    	VM_BUG_ON(!irqs_disabled());
    
    
    	cpustat = &stat->cpustat[smp_processor_id()];
    
    	if (PageCgroupCache(pc))
    
    		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
    
    		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
    
    		__mem_cgroup_stat_add_safe(cpustat,
    
    				MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
    	else
    
    		__mem_cgroup_stat_add_safe(cpustat,
    
    				MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
    
    static struct mem_cgroup_per_zone *
    
    mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
    {
    	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
    }
    
    
    static struct mem_cgroup_per_zone *
    
    page_cgroup_zoneinfo(struct page_cgroup *pc)
    {
    	struct mem_cgroup *mem = pc->mem_cgroup;
    	int nid = page_cgroup_nid(pc);
    	int zid = page_cgroup_zid(pc);
    
    	return mem_cgroup_zoneinfo(mem, nid, zid);
    }
    
    static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
    
    {
    	int nid, zid;
    	struct mem_cgroup_per_zone *mz;
    	u64 total = 0;
    
    	for_each_online_node(nid)
    		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
    			mz = mem_cgroup_zoneinfo(mem, nid, zid);
    			total += MEM_CGROUP_ZSTAT(mz, idx);
    		}
    	return total;
    
    static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
    
    {
    	return container_of(cgroup_subsys_state(cont,
    				mem_cgroup_subsys_id), struct mem_cgroup,
    				css);
    }
    
    
    struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
    
    	/*
    	 * mm_update_next_owner() may clear mm->owner to NULL
    	 * if it races with swapoff, page migration, etc.
    	 * So this can be called with p == NULL.
    	 */
    	if (unlikely(!p))
    		return NULL;
    
    
    	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
    				struct mem_cgroup, css);
    }
    
    
    static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
    			struct page_cgroup *pc)
    
    	if (PageCgroupUnevictable(pc))
    
    		lru = LRU_UNEVICTABLE;
    	else {
    
    		if (PageCgroupActive(pc))
    
    			lru += LRU_ACTIVE;
    
    		if (PageCgroupFile(pc))
    
    			lru += LRU_FILE;
    	}
    
    	MEM_CGROUP_ZSTAT(mz, lru) -= 1;
    
    	mem_cgroup_charge_statistics(pc->mem_cgroup, pc, false);
    
    	list_del(&pc->lru);
    
    static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
    
    				struct page_cgroup *pc, bool hot)
    
    	int lru = LRU_BASE;
    
    	if (PageCgroupUnevictable(pc))
    
    		lru = LRU_UNEVICTABLE;
    	else {
    
    		if (PageCgroupActive(pc))
    
    			lru += LRU_ACTIVE;
    
    		if (PageCgroupFile(pc))
    
    			lru += LRU_FILE;
    	}
    
    
    	MEM_CGROUP_ZSTAT(mz, lru) += 1;
    
    	if (hot)
    		list_add(&pc->lru, &mz->lists[lru]);
    	else
    		list_add_tail(&pc->lru, &mz->lists[lru]);
    
    	mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true);
    
    static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru)
    
    	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
    
    	int active    = PageCgroupActive(pc);
    	int file      = PageCgroupFile(pc);
    	int unevictable = PageCgroupUnevictable(pc);
    
    	enum lru_list from = unevictable ? LRU_UNEVICTABLE :
    				(LRU_FILE * !!file + !!active);
    
    	if (lru == from)
    		return;
    
    	MEM_CGROUP_ZSTAT(mz, from) -= 1;
    
    	/*
    	 * However this is done under mz->lru_lock, another flags, which
    	 * are not related to LRU, will be modified from out-of-lock.
    	 * We have to use atomic set/clear flags.
    	 */
    
    	if (is_unevictable_lru(lru)) {
    
    		ClearPageCgroupActive(pc);
    		SetPageCgroupUnevictable(pc);
    
    	} else {
    		if (is_active_lru(lru))
    
    			SetPageCgroupActive(pc);
    
    			ClearPageCgroupActive(pc);
    		ClearPageCgroupUnevictable(pc);
    
    
    	MEM_CGROUP_ZSTAT(mz, lru) += 1;
    	list_move(&pc->lru, &mz->lists[lru]);
    
    int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
    {
    	int ret;
    
    	task_lock(task);
    
    	ret = task->mm && mm_match_cgroup(task->mm, mem);
    
    /*
     * This routine assumes that the appropriate zone's lru lock is already held
     */
    
    void mem_cgroup_move_lists(struct page *page, enum lru_list lru)
    
    	struct page_cgroup *pc;
    
    	struct mem_cgroup_per_zone *mz;
    	unsigned long flags;
    
    
    	if (mem_cgroup_subsys.disabled)
    		return;
    
    
    	/*
    	 * We cannot lock_page_cgroup while holding zone's lru_lock,
    	 * because other holders of lock_page_cgroup can be interrupted
    	 * with an attempt to rotate_reclaimable_page.  But we cannot
    	 * safely get to page_cgroup without it, so just try_lock it:
    	 * mem_cgroup_isolate_pages allows for page left on wrong list.
    	 */
    
    	pc = lookup_page_cgroup(page);
    	if (!trylock_page_cgroup(pc))
    
    	if (pc && PageCgroupUsed(pc)) {
    
    		mz = page_cgroup_zoneinfo(pc);
    		spin_lock_irqsave(&mz->lru_lock, flags);
    
    		__mem_cgroup_move_lists(pc, lru);
    
    		spin_unlock_irqrestore(&mz->lru_lock, flags);
    
    	unlock_page_cgroup(pc);
    
    /*
     * Calculate mapped_ratio under memory controller. This will be used in
     * vmscan.c for deteremining we have to reclaim mapped pages.
     */
    int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
    {
    	long total, rss;
    
    	/*
    	 * usage is recorded in bytes. But, here, we assume the number of
    	 * physical pages can be represented by "long" on any arch.
    	 */
    	total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
    	rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
    	return (int)((rss * 100L) / total);
    }
    
    /*
     * prev_priority control...this will be used in memory reclaim path.
     */
    int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
    {
    	return mem->prev_priority;
    }
    
    void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
    {
    	if (priority < mem->prev_priority)
    		mem->prev_priority = priority;
    }
    
    void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
    {
    	mem->prev_priority = priority;
    }
    
    
    /*
     * Calculate # of pages to be scanned in this priority/zone.
     * See also vmscan.c
     *
     * priority starts from "DEF_PRIORITY" and decremented in each loop.
     * (see include/linux/mmzone.h)
     */
    
    
    long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
    					int priority, enum lru_list lru)
    
    	int nid = zone->zone_pgdat->node_id;
    	int zid = zone_idx(zone);
    	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
    
    
    	nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
    
    	return (nr_pages >> priority);
    
    unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
    					struct list_head *dst,
    					unsigned long *scanned, int order,
    					int mode, struct zone *z,
    					struct mem_cgroup *mem_cont,
    
    					int active, int file)
    
    {
    	unsigned long nr_taken = 0;
    	struct page *page;
    	unsigned long scan;
    	LIST_HEAD(pc_list);
    	struct list_head *src;
    
    	int nid = z->zone_pgdat->node_id;
    	int zid = zone_idx(z);
    	struct mem_cgroup_per_zone *mz;
    
    	int lru = LRU_FILE * !!file + !!active;
    
    	BUG_ON(!mem_cont);
    
    	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
    
    	src = &mz->lists[lru];
    
    	scan = 0;
    	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
    
    		if (scan >= nr_to_scan)
    
    		if (unlikely(!PageCgroupUsed(pc)))
    			continue;
    
    		if (unlikely(!PageLRU(page)))
    
    		/*
    		 * TODO: play better with lumpy reclaim, grabbing anything.
    		 */
    
    		if (PageUnevictable(page) ||
    		    (PageActive(page) && !active) ||
    		    (!PageActive(page) && active)) {
    			__mem_cgroup_move_lists(pc, page_lru(page));
    
    		scan++;
    		list_move(&pc->lru, &pc_list);
    
    		if (__isolate_lru_page(page, mode, file) == 0) {
    
    			list_move(&page->lru, dst);
    			nr_taken++;
    		}
    	}
    
    	list_splice(&pc_list, src);
    
    /*
     * Unlike exported interface, "oom" parameter is added. if oom==true,
     * oom-killer can be invoked.
    
    static int __mem_cgroup_try_charge(struct mm_struct *mm,
    			gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
    
    {
    	struct mem_cgroup *mem;
    
    	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
    
    	 * We always charge the cgroup the mm_struct belongs to.
    	 * The mm_struct's mem_cgroup changes on task migration if the
    
    	 * thread group leader migrates. It's possible that mm is not
    	 * set, if so charge the init_mm (happens for pagecache usage).
    	 */
    
    		rcu_read_lock();
    		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
    
    		if (unlikely(!mem)) {
    			rcu_read_unlock();
    			return 0;
    		}
    
    		/*
    		 * For every charge from the cgroup, increment reference count
    		 */
    		css_get(&mem->css);
    
    		rcu_read_unlock();
    	} else {
    
    		mem = *memcg;
    		css_get(&mem->css);
    
    	while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) {
    
    		if (!(gfp_mask & __GFP_WAIT))
    
    
    		if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
    
    		 * try_to_free_mem_cgroup_pages() might not give us a full
    		 * picture of reclaim. Some pages are reclaimed and might be
    		 * moved to swap cache or just unmapped from the cgroup.
    		 * Check the limit again to see if the reclaim reduced the
    		 * current usage of the cgroup before giving up
    		 */
    
    		if (res_counter_check_under_limit(&mem->res))
    			continue;
    
    
    		if (!nr_retries--) {
    
    			if (oom)
    				mem_cgroup_out_of_memory(mem, gfp_mask);
    
    	return 0;
    nomem:
    	css_put(&mem->css);
    	return -ENOMEM;
    }
    
    /**
     * mem_cgroup_try_charge - get charge of PAGE_SIZE.
     * @mm: an mm_struct which is charged against. (when *memcg is NULL)
     * @gfp_mask: gfp_mask for reclaim.
     * @memcg: a pointer to memory cgroup which is charged against.
     *
     * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated
     * memory cgroup from @mm is got and stored in *memcg.
     *
     * Returns 0 if success. -ENOMEM at failure.
     * This call can invoke OOM-Killer.
     */
    
    int mem_cgroup_try_charge(struct mm_struct *mm,
    			  gfp_t mask, struct mem_cgroup **memcg)
    {
    	return __mem_cgroup_try_charge(mm, mask, memcg, true);
    }
    
    
    /*
     * commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be
     * USED state. If already USED, uncharge and return.
     */
    
    static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
    				     struct page_cgroup *pc,
    				     enum charge_type ctype)
    {
    	struct mem_cgroup_per_zone *mz;
    	unsigned long flags;
    
    	/* try_charge() can return NULL to *memcg, taking care of it. */
    	if (!mem)
    		return;
    
    
    	lock_page_cgroup(pc);
    	if (unlikely(PageCgroupUsed(pc))) {
    		unlock_page_cgroup(pc);
    		res_counter_uncharge(&mem->res, PAGE_SIZE);
    		css_put(&mem->css);
    
    	pc->mem_cgroup = mem;
    
    	/*
    	 * If a page is accounted as a page cache, insert to inactive list.
    	 * If anon, insert to active list.
    	 */
    
    	pc->flags = pcg_default_flags[ctype];
    
    	__mem_cgroup_add_list(mz, pc, true);
    
    	spin_unlock_irqrestore(&mz->lru_lock, flags);
    
    	unlock_page_cgroup(pc);
    
    /**
     * mem_cgroup_move_account - move account of the page
     * @pc:	page_cgroup of the page.
     * @from: mem_cgroup which the page is moved from.
     * @to:	mem_cgroup which the page is moved to. @from != @to.
     *
     * The caller must confirm following.
     * 1. disable irq.
     * 2. lru_lock of old mem_cgroup(@from) should be held.
     *
     * returns 0 at success,
     * returns -EBUSY when lock is busy or "pc" is unstable.
     *
     * This function does "uncharge" from old cgroup but doesn't do "charge" to
     * new cgroup. It should be done by a caller.
     */
    
    static int mem_cgroup_move_account(struct page_cgroup *pc,
    	struct mem_cgroup *from, struct mem_cgroup *to)
    {
    	struct mem_cgroup_per_zone *from_mz, *to_mz;
    	int nid, zid;
    	int ret = -EBUSY;
    
    	VM_BUG_ON(!irqs_disabled());
    	VM_BUG_ON(from == to);
    
    	nid = page_cgroup_nid(pc);
    	zid = page_cgroup_zid(pc);
    	from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
    	to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
    
    
    	if (!trylock_page_cgroup(pc))
    		return ret;
    
    	if (!PageCgroupUsed(pc))
    		goto out;
    
    	if (pc->mem_cgroup != from)
    		goto out;
    
    	if (spin_trylock(&to_mz->lru_lock)) {
    		__mem_cgroup_remove_list(from_mz, pc);
    		css_put(&from->css);
    		res_counter_uncharge(&from->res, PAGE_SIZE);
    		pc->mem_cgroup = to;
    		css_get(&to->css);
    		__mem_cgroup_add_list(to_mz, pc, false);
    		ret = 0;
    		spin_unlock(&to_mz->lru_lock);
    	}
    out:
    	unlock_page_cgroup(pc);
    	return ret;
    }
    
    /*
     * move charges to its parent.
     */
    
    static int mem_cgroup_move_parent(struct page_cgroup *pc,
    				  struct mem_cgroup *child,
    				  gfp_t gfp_mask)
    {
    	struct cgroup *cg = child->css.cgroup;
    	struct cgroup *pcg = cg->parent;
    	struct mem_cgroup *parent;
    	struct mem_cgroup_per_zone *mz;
    	unsigned long flags;
    	int ret;
    
    	/* Is ROOT ? */
    	if (!pcg)
    		return -EINVAL;
    
    	parent = mem_cgroup_from_cont(pcg);
    
    	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
    	if (ret)
    		return ret;
    
    	mz = mem_cgroup_zoneinfo(child,
    			page_cgroup_nid(pc), page_cgroup_zid(pc));
    
    	spin_lock_irqsave(&mz->lru_lock, flags);
    	ret = mem_cgroup_move_account(pc, child, parent);
    	spin_unlock_irqrestore(&mz->lru_lock, flags);
    
    	/* drop extra refcnt */
    	css_put(&parent->css);
    	/* uncharge if move fails */
    	if (ret)
    		res_counter_uncharge(&parent->res, PAGE_SIZE);
    
    	return ret;
    }
    
    
    /*
     * Charge the memory controller for page usage.
     * Return
     * 0 if the charge was successful
     * < 0 if the cgroup is over its limit
     */
    static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
    				gfp_t gfp_mask, enum charge_type ctype,
    				struct mem_cgroup *memcg)
    {
    	struct mem_cgroup *mem;
    	struct page_cgroup *pc;
    	int ret;
    
    	pc = lookup_page_cgroup(page);
    	/* can happen at boot */
    	if (unlikely(!pc))
    		return 0;
    	prefetchw(pc);
    
    	mem = memcg;
    
    	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
    
    	if (ret)
    		return ret;
    
    	__mem_cgroup_commit_charge(mem, pc, ctype);
    
    int mem_cgroup_newpage_charge(struct page *page,
    			      struct mm_struct *mm, gfp_t gfp_mask)
    
    	if (mem_cgroup_subsys.disabled)
    		return 0;
    
    	if (PageCompound(page))
    		return 0;
    
    	/*
    	 * If already mapped, we don't have to account.
    	 * If page cache, page->mapping has address_space.
    	 * But page->mapping may have out-of-use anon_vma pointer,
    	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
    	 * is NULL.
      	 */
    	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
    		return 0;
    	if (unlikely(!mm))
    		mm = &init_mm;
    
    	return mem_cgroup_charge_common(page, mm, gfp_mask,
    
    				MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
    
    int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
    				gfp_t gfp_mask)
    
    	if (mem_cgroup_subsys.disabled)
    		return 0;
    
    	if (PageCompound(page))
    		return 0;
    
    	/*
    	 * Corner case handling. This is called from add_to_page_cache()
    	 * in usual. But some FS (shmem) precharges this page before calling it
    	 * and call add_to_page_cache() with GFP_NOWAIT.
    	 *
    	 * For GFP_NOWAIT case, the page may be pre-charged before calling
    	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
    	 * charge twice. (It works but has to pay a bit larger cost.)
    	 */
    	if (!(gfp_mask & __GFP_WAIT)) {
    		struct page_cgroup *pc;
    
    
    
    		pc = lookup_page_cgroup(page);
    		if (!pc)
    			return 0;
    		lock_page_cgroup(pc);
    		if (PageCgroupUsed(pc)) {
    			unlock_page_cgroup(pc);
    
    		unlock_page_cgroup(pc);
    
    	if (unlikely(!mm))
    
    	if (page_is_file_cache(page))
    		return mem_cgroup_charge_common(page, mm, gfp_mask,
    
    				MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
    
    	else
    		return mem_cgroup_charge_common(page, mm, gfp_mask,
    				MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
    
    void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
    {
    	struct page_cgroup *pc;
    
    	if (mem_cgroup_subsys.disabled)
    		return;
    	if (!ptr)
    		return;
    	pc = lookup_page_cgroup(page);
    	__mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
    }
    
    void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
    {
    	if (mem_cgroup_subsys.disabled)
    		return;
    	if (!mem)
    		return;
    	res_counter_uncharge(&mem->res, PAGE_SIZE);
    	css_put(&mem->css);
    }
    
    
    
     * uncharge if !page_mapped(page)
    
    static void
    __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
    
    	struct page_cgroup *pc;
    
    	struct mem_cgroup *mem;
    
    	unsigned long flags;
    
    	 * Check if our page_cgroup is valid
    
    	pc = lookup_page_cgroup(page);
    	if (unlikely(!pc || !PageCgroupUsed(pc)))
    		return;
    
    	lock_page_cgroup(pc);
    	if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED && page_mapped(page))
    	     || !PageCgroupUsed(pc)) {
    		/* This happens at race in zap_pte_range() and do_swap_page()*/
    		unlock_page_cgroup(pc);
    		return;
    	}
    	ClearPageCgroupUsed(pc);
    	mem = pc->mem_cgroup;
    
    	mz = page_cgroup_zoneinfo(pc);
    	spin_lock_irqsave(&mz->lru_lock, flags);
    	__mem_cgroup_remove_list(mz, pc);
    	spin_unlock_irqrestore(&mz->lru_lock, flags);
    
    	unlock_page_cgroup(pc);
    
    	res_counter_uncharge(&mem->res, PAGE_SIZE);
    	css_put(&mem->css);
    
    void mem_cgroup_uncharge_page(struct page *page)
    {
    
    	/* early check. */
    	if (page_mapped(page))
    		return;
    	if (page->mapping && !PageAnon(page))
    		return;
    
    	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
    }
    
    void mem_cgroup_uncharge_cache_page(struct page *page)
    {
    	VM_BUG_ON(page_mapped(page));
    
    	VM_BUG_ON(page->mapping);
    
    	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
    }
    
    
     * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
     * page belongs to.
    
    int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
    
    	struct mem_cgroup *mem = NULL;
    	int ret = 0;
    
    	if (mem_cgroup_subsys.disabled)
    		return 0;
    
    
    	pc = lookup_page_cgroup(page);
    	lock_page_cgroup(pc);
    	if (PageCgroupUsed(pc)) {
    
    		mem = pc->mem_cgroup;
    		css_get(&mem->css);
    	}
    
    	unlock_page_cgroup(pc);
    
    		ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem);
    
    		css_put(&mem->css);
    	}
    
    	*ptr = mem;
    
    	return ret;
    
    /* remove redundant charge if migration failed*/
    
    void mem_cgroup_end_migration(struct mem_cgroup *mem,
    		struct page *oldpage, struct page *newpage)
    
    	struct page *target, *unused;
    	struct page_cgroup *pc;
    	enum charge_type ctype;
    
    	if (!mem)
    		return;
    
    	/* at migration success, oldpage->mapping is NULL. */
    	if (oldpage->mapping) {
    		target = oldpage;
    		unused = NULL;
    	} else {
    		target = newpage;
    		unused = oldpage;
    	}
    
    	if (PageAnon(target))
    		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
    	else if (page_is_file_cache(target))
    		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
    	else
    		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
    
    	/* unused page is not on radix-tree now. */
    	if (unused && ctype != MEM_CGROUP_CHARGE_TYPE_MAPPED)
    		__mem_cgroup_uncharge_common(unused, ctype);
    
    	pc = lookup_page_cgroup(target);
    
    	 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
    	 * So, double-counting is effectively avoided.
    	 */
    	__mem_cgroup_commit_charge(mem, pc, ctype);
    
    	/*
    	 * Both of oldpage and newpage are still under lock_page().
    	 * Then, we don't have to care about race in radix-tree.
    	 * But we have to be careful that this page is unmapped or not.
    	 *
    	 * There is a case for !page_mapped(). At the start of
    	 * migration, oldpage was mapped. But now, it's zapped.
    	 * But we know *target* page is not freed/reused under us.
    	 * mem_cgroup_uncharge_page() does all necessary checks.
    
    	if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
    		mem_cgroup_uncharge_page(target);
    
    /*
     * A call to try to shrink memory usage under specified resource controller.
     * This is typically used for page reclaiming for shmem for reducing side
     * effect of page allocation from shmem, which is used by some mem_cgroup.
     */
    int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
    {
    	struct mem_cgroup *mem;
    	int progress = 0;
    	int retry = MEM_CGROUP_RECLAIM_RETRIES;
    
    
    	if (mem_cgroup_subsys.disabled)
    		return 0;
    
    	if (!mm)
    		return 0;
    
    	rcu_read_lock();
    	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
    
    	if (unlikely(!mem)) {
    		rcu_read_unlock();
    		return 0;
    	}
    
    	css_get(&mem->css);
    	rcu_read_unlock();
    
    	do {
    		progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
    
    		progress += res_counter_check_under_limit(&mem->res);
    
    	} while (!progress && --retry);
    
    	css_put(&mem->css);
    	if (!retry)
    		return -ENOMEM;
    	return 0;
    }
    
    
    static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
    				   unsigned long long val)
    
    {
    
    	int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
    	int progress;
    	int ret = 0;
    
    	while (res_counter_set_limit(&memcg->res, val)) {
    		if (signal_pending(current)) {
    			ret = -EINTR;
    			break;
    		}
    		if (!retry_count) {
    			ret = -EBUSY;
    			break;
    		}
    
    		progress = try_to_free_mem_cgroup_pages(memcg,