Skip to content
Snippets Groups Projects
memcontrol.c 87.9 KiB
Newer Older
  • Learn to ignore specific revisions
  • KOSAKI Motohiro's avatar
    KOSAKI Motohiro committed
    		return vm_swappiness;
    
    	spin_lock(&memcg->reclaim_param_lock);
    	swappiness = memcg->swappiness;
    	spin_unlock(&memcg->reclaim_param_lock);
    
    	return swappiness;
    }
    
    
    static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
    {
    	int *val = data;
    	(*val)++;
    	return 0;
    }
    
    
    /**
     * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
     * @memcg: The memory cgroup that went over limit
     * @p: Task that is going to be killed
     *
     * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
     * enabled
     */
    void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
    {
    	struct cgroup *task_cgrp;
    	struct cgroup *mem_cgrp;
    	/*
    	 * Need a buffer in BSS, can't rely on allocations. The code relies
    	 * on the assumption that OOM is serialized for memory controller.
    	 * If this assumption is broken, revisit this code.
    	 */
    	static char memcg_name[PATH_MAX];
    	int ret;
    
    
    		return;
    
    
    	rcu_read_lock();
    
    	mem_cgrp = memcg->css.cgroup;
    	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
    
    	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
    	if (ret < 0) {
    		/*
    		 * Unfortunately, we are unable to convert to a useful name
    		 * But we'll still print out the usage information
    		 */
    		rcu_read_unlock();
    		goto done;
    	}
    	rcu_read_unlock();
    
    	printk(KERN_INFO "Task in %s killed", memcg_name);
    
    	rcu_read_lock();
    	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
    	if (ret < 0) {
    		rcu_read_unlock();
    		goto done;
    	}
    	rcu_read_unlock();
    
    	/*
    	 * Continues from above, so we don't need an KERN_ level
    	 */
    	printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
    done:
    
    	printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
    		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
    		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
    		res_counter_read_u64(&memcg->res, RES_FAILCNT));
    	printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
    		"failcnt %llu\n",
    		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
    		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
    		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
    }
    
    
    /*
     * This function returns the number of memcg under hierarchy tree. Returns
     * 1(self count) if no children.
     */
    static int mem_cgroup_count_children(struct mem_cgroup *mem)
    {
    	int num = 0;
     	mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
    	return num;
    }
    
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
     * Visit the first child (need not be the first child as per the ordering
     * of the cgroup list, since we track last_scanned_child) of @mem and use
     * that to reclaim free pages from.
     */
    static struct mem_cgroup *
    mem_cgroup_select_victim(struct mem_cgroup *root_mem)
    {
    	struct mem_cgroup *ret = NULL;
    	struct cgroup_subsys_state *css;
    	int nextid, found;
    
    	if (!root_mem->use_hierarchy) {
    		css_get(&root_mem->css);
    		ret = root_mem;
    	}
    
    	while (!ret) {
    		rcu_read_lock();
    		nextid = root_mem->last_scanned_child + 1;
    		css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
    				   &found);
    		if (css && css_tryget(css))
    			ret = container_of(css, struct mem_cgroup, css);
    
    		rcu_read_unlock();
    		/* Updates scanning parameter */
    		spin_lock(&root_mem->reclaim_param_lock);
    		if (!css) {
    			/* this means start scan from ID:1 */
    			root_mem->last_scanned_child = 0;
    		} else
    			root_mem->last_scanned_child = found;
    		spin_unlock(&root_mem->reclaim_param_lock);
    	}
    
    	return ret;
    }
    
    /*
     * Scan the hierarchy if needed to reclaim memory. We remember the last child
     * we reclaimed from, so that we don't end up penalizing one child extensively
     * based on its position in the children list.
    
     *
     * root_mem is the original ancestor that we've been reclaim from.
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
     *
     * We give up and return to the caller when we visit root_mem twice.
     * (other groups can be removed while we're walking....)
    
     *
     * If shrink==true, for avoiding to free too much, this returns immedieately.
    
     */
    static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
    
    						gfp_t gfp_mask,
    						unsigned long reclaim_options)
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
    	struct mem_cgroup *victim;
    	int ret, total = 0;
    	int loop = 0;
    
    	bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
    	bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
    
    	bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
    	unsigned long excess = mem_cgroup_get_excess(root_mem);
    
    	/* If memsw_is_minimum==1, swap-out is of-no-use. */
    	if (root_mem->memsw_is_minimum)
    		noswap = true;
    
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
    		victim = mem_cgroup_select_victim(root_mem);
    
    		if (victim == root_mem) {
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
    			loop++;
    
    			if (loop >= 1)
    				drain_all_stock_async();
    
    			if (loop >= 2) {
    				/*
    				 * If we have not been able to reclaim
    				 * anything, it might because there are
    				 * no reclaimable pages under this hierarchy
    				 */
    				if (!check_soft || !total) {
    					css_put(&victim->css);
    					break;
    				}
    				/*
    				 * We want to do more targetted reclaim.
    				 * excess >> 2 is not to excessive so as to
    				 * reclaim too much, nor too less that we keep
    				 * coming back to reclaim from this cgroup
    				 */
    				if (total >= (excess >> 2) ||
    					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
    					css_put(&victim->css);
    					break;
    				}
    			}
    		}
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
    		if (!mem_cgroup_local_usage(&victim->stat)) {
    			/* this cgroup's local usage == 0 */
    			css_put(&victim->css);
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
    		/* we use swappiness of local cgroup */
    
    		if (check_soft)
    			ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
    				noswap, get_swappiness(victim), zone,
    				zone->zone_pgdat->node_id);
    		else
    			ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
    						noswap, get_swappiness(victim));
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
    		css_put(&victim->css);
    
    		/*
    		 * At shrinking usage, we can't check we should stop here or
    		 * reclaim more. It's depends on callers. last_scanned_child
    		 * will work enough for keeping fairness under tree.
    		 */
    		if (shrink)
    			return ret;
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
    		total += ret;
    
    		if (check_soft) {
    			if (res_counter_check_under_soft_limit(&root_mem->res))
    				return total;
    		} else if (mem_cgroup_check_under_limit(root_mem))
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
    			return 1 + total;
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
    	return total;
    
    bool mem_cgroup_oom_called(struct task_struct *task)
    {
    	bool ret = false;
    	struct mem_cgroup *mem;
    	struct mm_struct *mm;
    
    	rcu_read_lock();
    	mm = task->mm;
    	if (!mm)
    		mm = &init_mm;
    	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
    	if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
    		ret = true;
    	rcu_read_unlock();
    	return ret;
    }
    
    
    static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
    {
    	mem->last_oom_jiffies = jiffies;
    	return 0;
    }
    
    static void record_last_oom(struct mem_cgroup *mem)
    {
    	mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
    }
    
    
    /*
     * Currently used to update mapped file statistics, but the routine can be
     * generalized to update other statistics as well.
     */
    
    void mem_cgroup_update_file_mapped(struct page *page, int val)
    
    {
    	struct mem_cgroup *mem;
    	struct mem_cgroup_stat *stat;
    	struct mem_cgroup_stat_cpu *cpustat;
    	int cpu;
    	struct page_cgroup *pc;
    
    	pc = lookup_page_cgroup(page);
    	if (unlikely(!pc))
    		return;
    
    	lock_page_cgroup(pc);
    	mem = pc->mem_cgroup;
    	if (!mem)
    		goto done;
    
    	if (!PageCgroupUsed(pc))
    		goto done;
    
    	/*
    	 * Preemption is already disabled, we don't need get_cpu()
    	 */
    	cpu = smp_processor_id();
    	stat = &mem->stat;
    	cpustat = &stat->cpustat[cpu];
    
    
    	__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, val);
    
    done:
    	unlock_page_cgroup(pc);
    }
    
    /*
     * size of first charge trial. "32" comes from vmscan.c's magic value.
     * TODO: maybe necessary to use big numbers in big irons.
     */
    #define CHARGE_SIZE	(32 * PAGE_SIZE)
    struct memcg_stock_pcp {
    	struct mem_cgroup *cached; /* this never be root cgroup */
    	int charge;
    	struct work_struct work;
    };
    static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
    static atomic_t memcg_drain_count;
    
    /*
     * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
     * from local stock and true is returned. If the stock is 0 or charges from a
     * cgroup which is not current target, returns false. This stock will be
     * refilled.
     */
    static bool consume_stock(struct mem_cgroup *mem)
    {
    	struct memcg_stock_pcp *stock;
    	bool ret = true;
    
    	stock = &get_cpu_var(memcg_stock);
    	if (mem == stock->cached && stock->charge)
    		stock->charge -= PAGE_SIZE;
    	else /* need to call res_counter_charge */
    		ret = false;
    	put_cpu_var(memcg_stock);
    	return ret;
    }
    
    /*
     * Returns stocks cached in percpu to res_counter and reset cached information.
     */
    static void drain_stock(struct memcg_stock_pcp *stock)
    {
    	struct mem_cgroup *old = stock->cached;
    
    	if (stock->charge) {
    		res_counter_uncharge(&old->res, stock->charge);
    		if (do_swap_account)
    			res_counter_uncharge(&old->memsw, stock->charge);
    	}
    	stock->cached = NULL;
    	stock->charge = 0;
    }
    
    /*
     * This must be called under preempt disabled or must be called by
     * a thread which is pinned to local cpu.
     */
    static void drain_local_stock(struct work_struct *dummy)
    {
    	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
    	drain_stock(stock);
    }
    
    /*
     * Cache charges(val) which is from res_counter, to local per_cpu area.
     * This will be consumed by consumt_stock() function, later.
     */
    static void refill_stock(struct mem_cgroup *mem, int val)
    {
    	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
    
    	if (stock->cached != mem) { /* reset if necessary */
    		drain_stock(stock);
    		stock->cached = mem;
    	}
    	stock->charge += val;
    	put_cpu_var(memcg_stock);
    }
    
    /*
     * Tries to drain stocked charges in other cpus. This function is asynchronous
     * and just put a work per cpu for draining localy on each cpu. Caller can
     * expects some charges will be back to res_counter later but cannot wait for
     * it.
     */
    static void drain_all_stock_async(void)
    {
    	int cpu;
    	/* This function is for scheduling "drain" in asynchronous way.
    	 * The result of "drain" is not directly handled by callers. Then,
    	 * if someone is calling drain, we don't have to call drain more.
    	 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
    	 * there is a race. We just do loose check here.
    	 */
    	if (atomic_read(&memcg_drain_count))
    		return;
    	/* Notify other cpus that system-wide "drain" is running */
    	atomic_inc(&memcg_drain_count);
    	get_online_cpus();
    	for_each_online_cpu(cpu) {
    		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
    		schedule_work_on(cpu, &stock->work);
    	}
     	put_online_cpus();
    	atomic_dec(&memcg_drain_count);
    	/* We don't wait for flush_work */
    }
    
    /* This is a synchronous drain interface. */
    static void drain_all_stock_sync(void)
    {
    	/* called when force_empty is called */
    	atomic_inc(&memcg_drain_count);
    	schedule_on_each_cpu(drain_local_stock);
    	atomic_dec(&memcg_drain_count);
    }
    
    static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
    					unsigned long action,
    					void *hcpu)
    {
    	int cpu = (unsigned long)hcpu;
    	struct memcg_stock_pcp *stock;
    
    	if (action != CPU_DEAD)
    		return NOTIFY_OK;
    	stock = &per_cpu(memcg_stock, cpu);
    	drain_stock(stock);
    	return NOTIFY_OK;
    }
    
    
    /*
     * Unlike exported interface, "oom" parameter is added. if oom==true,
     * oom-killer can be invoked.
    
    static int __mem_cgroup_try_charge(struct mm_struct *mm,
    
    			gfp_t gfp_mask, struct mem_cgroup **memcg,
    
    			bool oom, struct page *page)
    
    	struct mem_cgroup *mem, *mem_over_limit;
    
    	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
    
    	int csize = CHARGE_SIZE;
    
    
    	if (unlikely(test_thread_flag(TIF_MEMDIE))) {
    		/* Don't account this! */
    		*memcg = NULL;
    		return 0;
    	}
    
    
    	 * We always charge the cgroup the mm_struct belongs to.
    	 * The mm_struct's mem_cgroup changes on task migration if the
    
    	 * thread group leader migrates. It's possible that mm is not
    	 * set, if so charge the init_mm (happens for pagecache usage).
    	 */
    
    	mem = *memcg;
    	if (likely(!mem)) {
    		mem = try_get_mem_cgroup_from_mm(mm);
    
    	if (unlikely(!mem))
    		return 0;
    
    
    	if (mem_cgroup_is_root(mem))
    		goto done;
    
    	while (1) {
    
    		unsigned long flags = 0;
    
    		if (consume_stock(mem))
    			goto charged;
    
    		ret = res_counter_charge(&mem->res, csize, &fail_res);
    
    		if (likely(!ret)) {
    			if (!do_swap_account)
    				break;
    
    			ret = res_counter_charge(&mem->memsw, csize, &fail_res);
    
    			if (likely(!ret))
    				break;
    			/* mem+swap counter fails */
    
    			res_counter_uncharge(&mem->res, csize);
    
    			flags |= MEM_CGROUP_RECLAIM_NOSWAP;
    
    			mem_over_limit = mem_cgroup_from_res_counter(fail_res,
    									memsw);
    		} else
    			/* mem counter fails */
    			mem_over_limit = mem_cgroup_from_res_counter(fail_res,
    									res);
    
    
    		/* reduce request size and retry */
    		if (csize > PAGE_SIZE) {
    			csize = PAGE_SIZE;
    			continue;
    		}
    
    		if (!(gfp_mask & __GFP_WAIT))
    
    		ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
    						gfp_mask, flags);
    
    		if (ret)
    			continue;
    
    		 * try_to_free_mem_cgroup_pages() might not give us a full
    		 * picture of reclaim. Some pages are reclaimed and might be
    		 * moved to swap cache or just unmapped from the cgroup.
    		 * Check the limit again to see if the reclaim reduced the
    		 * current usage of the cgroup before giving up
    
    		if (mem_cgroup_check_under_limit(mem_over_limit))
    			continue;
    
    
    		if (!nr_retries--) {
    
    				mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
    
    				record_last_oom(mem_over_limit);
    
    	if (csize > PAGE_SIZE)
    		refill_stock(mem, csize - PAGE_SIZE);
    charged:
    
    	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
    	 * if they exceeds softlimit.
    
    	if (mem_cgroup_soft_limit_check(mem))
    		mem_cgroup_update_tree(mem, page);
    
    	return 0;
    nomem:
    	css_put(&mem->css);
    	return -ENOMEM;
    }
    
    /*
     * Somemtimes we have to undo a charge we got by try_charge().
     * This function is for that and do uncharge, put css's refcnt.
     * gotten by try_charge().
     */
    static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
    {
    	if (!mem_cgroup_is_root(mem)) {
    		res_counter_uncharge(&mem->res, PAGE_SIZE);
    		if (do_swap_account)
    			res_counter_uncharge(&mem->memsw, PAGE_SIZE);
    	}
    	css_put(&mem->css);
    }
    
    
    /*
     * A helper function to get mem_cgroup from ID. must be called under
     * rcu_read_lock(). The caller must check css_is_removed() or some if
     * it's concern. (dropping refcnt from swap can be called against removed
     * memcg.)
     */
    static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
    {
    	struct cgroup_subsys_state *css;
    
    	/* ID 0 is unused ID */
    	if (!id)
    		return NULL;
    	css = css_lookup(&mem_cgroup_subsys, id);
    	if (!css)
    		return NULL;
    	return container_of(css, struct mem_cgroup, css);
    }
    
    
    struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
    
    	struct mem_cgroup *mem = NULL;
    
    	struct page_cgroup *pc;
    
    	VM_BUG_ON(!PageLocked(page));
    
    	pc = lookup_page_cgroup(page);
    
    	lock_page_cgroup(pc);
    
    		mem = pc->mem_cgroup;
    
    		if (mem && !css_tryget(&mem->css))
    			mem = NULL;
    
    	} else if (PageSwapCache(page)) {
    
    		ent.val = page_private(page);
    
    		id = lookup_swap_cgroup(ent);
    		rcu_read_lock();
    		mem = mem_cgroup_lookup(id);
    		if (mem && !css_tryget(&mem->css))
    			mem = NULL;
    		rcu_read_unlock();
    
    	unlock_page_cgroup(pc);
    
     * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
    
     * USED state. If already USED, uncharge and return.
     */
    
    static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
    				     struct page_cgroup *pc,
    				     enum charge_type ctype)
    {
    	/* try_charge() can return NULL to *memcg, taking care of it. */
    	if (!mem)
    		return;
    
    
    	lock_page_cgroup(pc);
    	if (unlikely(PageCgroupUsed(pc))) {
    		unlock_page_cgroup(pc);
    
    		mem_cgroup_cancel_charge(mem);
    
    	pc->mem_cgroup = mem;
    
    	/*
    	 * We access a page_cgroup asynchronously without lock_page_cgroup().
    	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
    	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
    	 * before USED bit, we need memory barrier here.
    	 * See mem_cgroup_add_lru_list(), etc.
     	 */
    
    	smp_wmb();
    
    	switch (ctype) {
    	case MEM_CGROUP_CHARGE_TYPE_CACHE:
    	case MEM_CGROUP_CHARGE_TYPE_SHMEM:
    		SetPageCgroupCache(pc);
    		SetPageCgroupUsed(pc);
    		break;
    	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
    		ClearPageCgroupCache(pc);
    		SetPageCgroupUsed(pc);
    		break;
    	default:
    		break;
    	}
    
    	mem_cgroup_charge_statistics(mem, pc, true);
    
    
    	unlock_page_cgroup(pc);
    
     * __mem_cgroup_move_account - move account of the page
    
     * @pc:	page_cgroup of the page.
     * @from: mem_cgroup which the page is moved from.
     * @to:	mem_cgroup which the page is moved to. @from != @to.
     *
     * The caller must confirm following.
    
     * - page is not on LRU (isolate_page() is useful.)
    
     * - the pc is locked, used, and ->mem_cgroup points to @from.
    
     *
     * This function does "uncharge" from old cgroup but doesn't do "charge" to
     * new cgroup. It should be done by a caller.
     */
    
    
    static void __mem_cgroup_move_account(struct page_cgroup *pc,
    
    	struct mem_cgroup *from, struct mem_cgroup *to)
    {
    
    	struct page *page;
    	int cpu;
    	struct mem_cgroup_stat *stat;
    	struct mem_cgroup_stat_cpu *cpustat;
    
    	VM_BUG_ON(PageLRU(pc->page));
    
    	VM_BUG_ON(!PageCgroupLocked(pc));
    	VM_BUG_ON(!PageCgroupUsed(pc));
    	VM_BUG_ON(pc->mem_cgroup != from);
    
    	if (!mem_cgroup_is_root(from))
    
    		res_counter_uncharge(&from->res, PAGE_SIZE);
    
    	mem_cgroup_charge_statistics(from, pc, false);
    
    
    	page = pc->page;
    
    	if (page_mapped(page) && !PageAnon(page)) {
    
    		cpu = smp_processor_id();
    		/* Update mapped_file data for mem_cgroup "from" */
    		stat = &from->stat;
    		cpustat = &stat->cpustat[cpu];
    
    		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
    
    						-1);
    
    		/* Update mapped_file data for mem_cgroup "to" */
    		stat = &to->stat;
    		cpustat = &stat->cpustat[cpu];
    
    		__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
    
    	if (do_swap_account && !mem_cgroup_is_root(from))
    
    		res_counter_uncharge(&from->memsw, PAGE_SIZE);
    
    	css_put(&from->css);
    
    	css_get(&to->css);
    
    	pc->mem_cgroup = to;
    	mem_cgroup_charge_statistics(to, pc, true);
    
    	/*
    	 * We charges against "to" which may not have any tasks. Then, "to"
    	 * can be under rmdir(). But in current implementation, caller of
    	 * this function is just force_empty() and it's garanteed that
    	 * "to" is never removed. So, we don't check rmdir status here.
    	 */
    
    }
    
    /*
     * check whether the @pc is valid for moving account and call
     * __mem_cgroup_move_account()
     */
    static int mem_cgroup_move_account(struct page_cgroup *pc,
    				struct mem_cgroup *from, struct mem_cgroup *to)
    {
    	int ret = -EINVAL;
    	lock_page_cgroup(pc);
    	if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
    		__mem_cgroup_move_account(pc, from, to);
    		ret = 0;
    	}
    	unlock_page_cgroup(pc);
    
    	return ret;
    }
    
    /*
     * move charges to its parent.
     */
    
    static int mem_cgroup_move_parent(struct page_cgroup *pc,
    				  struct mem_cgroup *child,
    				  gfp_t gfp_mask)
    {
    
    	struct page *page = pc->page;
    
    	struct cgroup *cg = child->css.cgroup;
    	struct cgroup *pcg = cg->parent;
    	struct mem_cgroup *parent;
    	int ret;
    
    	/* Is ROOT ? */
    	if (!pcg)
    		return -EINVAL;
    
    
    	ret = -EBUSY;
    	if (!get_page_unless_zero(page))
    		goto out;
    	if (isolate_lru_page(page))
    		goto put;
    
    	parent = mem_cgroup_from_cont(pcg);
    
    	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page);
    
    
    	ret = mem_cgroup_move_account(pc, child, parent);
    
    	if (!ret)
    		css_put(&parent->css);	/* drop extra refcnt by try_charge() */
    	else
    		mem_cgroup_cancel_charge(parent);	/* does css_put */
    put_back:
    
    	putback_lru_page(page);
    
    /*
     * Charge the memory controller for page usage.
     * Return
     * 0 if the charge was successful
     * < 0 if the cgroup is over its limit
     */
    static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
    				gfp_t gfp_mask, enum charge_type ctype,
    				struct mem_cgroup *memcg)
    {
    	struct mem_cgroup *mem;
    	struct page_cgroup *pc;
    	int ret;
    
    	pc = lookup_page_cgroup(page);
    	/* can happen at boot */
    	if (unlikely(!pc))
    		return 0;
    	prefetchw(pc);
    
    	mem = memcg;
    
    	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page);
    
    		return ret;
    
    	__mem_cgroup_commit_charge(mem, pc, ctype);
    
    int mem_cgroup_newpage_charge(struct page *page,
    			      struct mm_struct *mm, gfp_t gfp_mask)
    
    	if (mem_cgroup_disabled())
    
    	if (PageCompound(page))
    		return 0;
    
    	/*
    	 * If already mapped, we don't have to account.
    	 * If page cache, page->mapping has address_space.
    	 * But page->mapping may have out-of-use anon_vma pointer,
    	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
    	 * is NULL.
      	 */
    	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
    		return 0;
    	if (unlikely(!mm))
    		mm = &init_mm;
    
    	return mem_cgroup_charge_common(page, mm, gfp_mask,
    
    				MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
    
    static void
    __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
    					enum charge_type ctype);
    
    
    int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
    				gfp_t gfp_mask)
    
    	struct mem_cgroup *mem = NULL;
    	int ret;
    
    
    	if (mem_cgroup_disabled())
    
    	if (PageCompound(page))
    		return 0;
    
    	/*
    	 * Corner case handling. This is called from add_to_page_cache()
    	 * in usual. But some FS (shmem) precharges this page before calling it
    	 * and call add_to_page_cache() with GFP_NOWAIT.
    	 *
    	 * For GFP_NOWAIT case, the page may be pre-charged before calling
    	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
    	 * charge twice. (It works but has to pay a bit larger cost.)
    
    	 * And when the page is SwapCache, it should take swap information
    	 * into account. This is under lock_page() now.
    
    	 */
    	if (!(gfp_mask & __GFP_WAIT)) {
    		struct page_cgroup *pc;
    
    
    
    		pc = lookup_page_cgroup(page);
    		if (!pc)
    			return 0;
    		lock_page_cgroup(pc);
    		if (PageCgroupUsed(pc)) {
    			unlock_page_cgroup(pc);
    
    		unlock_page_cgroup(pc);
    
    	if (unlikely(!mm && !mem))
    
    	if (page_is_file_cache(page))
    		return mem_cgroup_charge_common(page, mm, gfp_mask,
    
    				MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
    
    	/* shmem */
    	if (PageSwapCache(page)) {
    		ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
    		if (!ret)
    			__mem_cgroup_commit_charge_swapin(page, mem,
    					MEM_CGROUP_CHARGE_TYPE_SHMEM);
    	} else
    		ret = mem_cgroup_charge_common(page, mm, gfp_mask,
    					MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
    
    /*
     * While swap-in, try_charge -> commit or cancel, the page is locked.
     * And when try_charge() successfully returns, one refcnt to memcg without
    
     * struct page_cgroup is acquired. This refcnt will be consumed by
    
     * "commit()" or removed by "cancel()"
     */
    
    int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
    				 struct page *page,
    				 gfp_t mask, struct mem_cgroup **ptr)
    {
    	struct mem_cgroup *mem;
    
    	if (mem_cgroup_disabled())
    
    		return 0;
    
    	if (!do_swap_account)
    		goto charge_cur_mm;
    	/*
    	 * A racing thread's fault, or swapoff, may have already updated
    
    	 * the pte, and even removed page from swap cache: in those cases
    	 * do_swap_page()'s pte_same() test will fail; but there's also a
    	 * KSM case which does need to charge the page.
    
    	 */
    	if (!PageSwapCache(page))
    
    		goto charge_cur_mm;
    
    	mem = try_get_mem_cgroup_from_page(page);
    
    	if (!mem)
    		goto charge_cur_mm;
    
    	*ptr = mem;
    
    	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, page);
    
    	/* drop extra refcnt from tryget */
    	css_put(&mem->css);
    	return ret;
    
    charge_cur_mm:
    	if (unlikely(!mm))
    		mm = &init_mm;
    
    	return __mem_cgroup_try_charge(mm, mask, ptr, true, page);
    
    static void
    __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
    					enum charge_type ctype)
    
    	if (mem_cgroup_disabled())
    
    	cgroup_exclude_rmdir(&ptr->css);
    
    	pc = lookup_page_cgroup(page);
    
    	mem_cgroup_lru_del_before_commit_swapcache(page);
    
    	__mem_cgroup_commit_charge(ptr, pc, ctype);
    
    	mem_cgroup_lru_add_after_commit_swapcache(page);
    
    	/*
    	 * Now swap is on-memory. This means this page may be
    	 * counted both as mem and swap....double count.
    
    	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
    	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
    	 * may call delete_from_swap_cache() before reach here.
    
    	if (do_swap_account && PageSwapCache(page)) {
    
    		swp_entry_t ent = {.val = page_private(page)};
    
    		struct mem_cgroup *memcg;
    
    
    		id = swap_cgroup_record(ent, 0);
    		rcu_read_lock();
    		memcg = mem_cgroup_lookup(id);
    
    		if (memcg) {
    
    			/*
    			 * This recorded memcg can be obsolete one. So, avoid
    			 * calling css_tryget
    			 */
    
    			if (!mem_cgroup_is_root(memcg))
    
    				res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
    
    			mem_cgroup_swap_statistics(memcg, false);
    
    			mem_cgroup_put(memcg);
    		}
    
    	/*
    	 * At swapin, we may charge account against cgroup which has no tasks.
    	 * So, rmdir()->pre_destroy() can be called while we do this charge.
    	 * In that case, we need to call pre_destroy() again. check it here.
    	 */
    	cgroup_release_and_wakeup_rmdir(&ptr->css);
    
    void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
    {
    	__mem_cgroup_commit_charge_swapin(page, ptr,
    					MEM_CGROUP_CHARGE_TYPE_MAPPED);
    }
    
    
    void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
    {
    
    	if (mem_cgroup_disabled())
    
    	mem_cgroup_cancel_charge(mem);
    
    static void
    __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
    {
    	struct memcg_batch_info *batch = NULL;
    	bool uncharge_memsw = true;
    	/* If swapout, usage of swap doesn't decrease */
    	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
    		uncharge_memsw = false;
    	/*
    	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
    	 * In those cases, all pages freed continously can be expected to be in
    	 * the same cgroup and we have chance to coalesce uncharges.
    	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
    	 * because we want to do uncharge as soon as possible.
    	 */
    	if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE))
    		goto direct_uncharge;
    
    	batch = &current->memcg_batch;
    	/*
    	 * In usual, we do css_get() when we remember memcg pointer.
    	 * But in this case, we keep res->usage until end of a series of
    	 * uncharges. Then, it's ok to ignore memcg's refcnt.
    	 */
    	if (!batch->memcg)
    		batch->memcg = mem;
    	/*
    	 * In typical case, batch->memcg == mem. This means we can