Skip to content
Snippets Groups Projects
memcontrol.c 48.5 KiB
Newer Older
  • Learn to ignore specific revisions
  • 		return 0;
    
    	ent.val = page_private(page);
    
    	mem = lookup_swap_cgroup(ent);
    	if (!mem || mem->obsolete)
    		goto charge_cur_mm;
    	*ptr = mem;
    	return __mem_cgroup_try_charge(NULL, mask, ptr, true);
    charge_cur_mm:
    	if (unlikely(!mm))
    		mm = &init_mm;
    	return __mem_cgroup_try_charge(mm, mask, ptr, true);
    }
    
    
    #ifdef CONFIG_SWAP
    
    int mem_cgroup_cache_charge_swapin(struct page *page,
    			struct mm_struct *mm, gfp_t mask, bool locked)
    {
    	int ret = 0;
    
    
    	if (mem_cgroup_disabled())
    
    		return 0;
    	if (unlikely(!mm))
    		mm = &init_mm;
    	if (!locked)
    		lock_page(page);
    	/*
    	 * If not locked, the page can be dropped from SwapCache until
    	 * we reach here.
    	 */
    	if (PageSwapCache(page)) {
    
    		struct mem_cgroup *mem = NULL;
    		swp_entry_t ent;
    
    		ent.val = page_private(page);
    		if (do_swap_account) {
    			mem = lookup_swap_cgroup(ent);
    			if (mem && mem->obsolete)
    				mem = NULL;
    			if (mem)
    				mm = NULL;
    		}
    
    		ret = mem_cgroup_charge_common(page, mm, mask,
    
    				MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
    
    		if (!ret && do_swap_account) {
    			/* avoid double counting */
    			mem = swap_cgroup_record(ent, NULL);
    			if (mem) {
    				res_counter_uncharge(&mem->memsw, PAGE_SIZE);
    				mem_cgroup_put(mem);
    			}
    		}
    
    	}
    	if (!locked)
    		unlock_page(page);
    
    	/* add this page(page_cgroup) to the LRU we want. */
    	mem_cgroup_lru_fixup(page);
    
    void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
    {
    	struct page_cgroup *pc;
    
    
    	if (mem_cgroup_disabled())
    
    		return;
    	if (!ptr)
    		return;
    	pc = lookup_page_cgroup(page);
    	__mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
    
    	/*
    	 * Now swap is on-memory. This means this page may be
    	 * counted both as mem and swap....double count.
    	 * Fix it by uncharging from memsw. This SwapCache is stable
    	 * because we're still under lock_page().
    	 */
    	if (do_swap_account) {
    		swp_entry_t ent = {.val = page_private(page)};
    		struct mem_cgroup *memcg;
    		memcg = swap_cgroup_record(ent, NULL);
    		if (memcg) {
    			/* If memcg is obsolete, memcg can be != ptr */
    			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
    			mem_cgroup_put(memcg);
    		}
    
    	}
    
    	/* add this page(page_cgroup) to the LRU we want. */
    	mem_cgroup_lru_fixup(page);
    
    }
    
    void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
    {
    
    	if (mem_cgroup_disabled())
    
    		return;
    	if (!mem)
    		return;
    	res_counter_uncharge(&mem->res, PAGE_SIZE);
    
    	if (do_swap_account)
    		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
    
     * uncharge if !page_mapped(page)
    
    static struct mem_cgroup *
    
    __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
    
    	struct page_cgroup *pc;
    
    	struct mem_cgroup *mem = NULL;
    
    	if (mem_cgroup_disabled())
    
    		return NULL;
    
    	if (PageSwapCache(page))
    
    		return NULL;
    
    	 * Check if our page_cgroup is valid
    
    	pc = lookup_page_cgroup(page);
    	if (unlikely(!pc || !PageCgroupUsed(pc)))
    
    		return NULL;
    
    	lock_page_cgroup(pc);
    
    	mem = pc->mem_cgroup;
    
    
    	if (!PageCgroupUsed(pc))
    		goto unlock_out;
    
    	switch (ctype) {
    	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
    		if (page_mapped(page))
    			goto unlock_out;
    		break;
    	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
    		if (!PageAnon(page)) {	/* Shared memory */
    			if (page->mapping && !page_is_file_cache(page))
    				goto unlock_out;
    		} else if (page_mapped(page)) /* Anon */
    				goto unlock_out;
    		break;
    	default:
    		break;
    
    	res_counter_uncharge(&mem->res, PAGE_SIZE);
    	if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
    		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
    
    
    	mem_cgroup_charge_statistics(mem, pc, false);
    
    	ClearPageCgroupUsed(pc);
    
    	mz = page_cgroup_zoneinfo(pc);
    
    	unlock_page_cgroup(pc);
    
    	css_put(&mem->css);
    
    	return mem;
    
    
    unlock_out:
    	unlock_page_cgroup(pc);
    
    	return NULL;
    
    void mem_cgroup_uncharge_page(struct page *page)
    {
    
    	/* early check. */
    	if (page_mapped(page))
    		return;
    	if (page->mapping && !PageAnon(page))
    		return;
    
    	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
    }
    
    void mem_cgroup_uncharge_cache_page(struct page *page)
    {
    	VM_BUG_ON(page_mapped(page));
    
    	VM_BUG_ON(page->mapping);
    
    	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
    }
    
    
    /*
     * called from __delete_from_swap_cache() and drop "page" account.
     * memcg information is recorded to swap_cgroup of "ent"
     */
    void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
    {
    	struct mem_cgroup *memcg;
    
    	memcg = __mem_cgroup_uncharge_common(page,
    					MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
    	/* record memcg information */
    	if (do_swap_account && memcg) {
    		swap_cgroup_record(ent, memcg);
    		mem_cgroup_get(memcg);
    	}
    }
    
    #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
    /*
     * called from swap_entry_free(). remove record in swap_cgroup and
     * uncharge "memsw" account.
     */
    void mem_cgroup_uncharge_swap(swp_entry_t ent)
    
    	struct mem_cgroup *memcg;
    
    	if (!do_swap_account)
    		return;
    
    	memcg = swap_cgroup_record(ent, NULL);
    	if (memcg) {
    		res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
    		mem_cgroup_put(memcg);
    	}
    
     * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
     * page belongs to.
    
    int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
    
    	struct mem_cgroup *mem = NULL;
    	int ret = 0;
    
    	if (mem_cgroup_disabled())
    
    	pc = lookup_page_cgroup(page);
    	lock_page_cgroup(pc);
    	if (PageCgroupUsed(pc)) {
    
    		mem = pc->mem_cgroup;
    		css_get(&mem->css);
    	}
    
    	unlock_page_cgroup(pc);
    
    		ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem);
    
    		css_put(&mem->css);
    	}
    
    	*ptr = mem;
    
    	return ret;
    
    /* remove redundant charge if migration failed*/
    
    void mem_cgroup_end_migration(struct mem_cgroup *mem,
    		struct page *oldpage, struct page *newpage)
    
    	struct page *target, *unused;
    	struct page_cgroup *pc;
    	enum charge_type ctype;
    
    	if (!mem)
    		return;
    
    	/* at migration success, oldpage->mapping is NULL. */
    	if (oldpage->mapping) {
    		target = oldpage;
    		unused = NULL;
    	} else {
    		target = newpage;
    		unused = oldpage;
    	}
    
    	if (PageAnon(target))
    		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
    	else if (page_is_file_cache(target))
    		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
    	else
    		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
    
    	/* unused page is not on radix-tree now. */
    
    	if (unused)
    
    		__mem_cgroup_uncharge_common(unused, ctype);
    
    	pc = lookup_page_cgroup(target);
    
    	 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
    	 * So, double-counting is effectively avoided.
    	 */
    	__mem_cgroup_commit_charge(mem, pc, ctype);
    
    	/*
    	 * Both of oldpage and newpage are still under lock_page().
    	 * Then, we don't have to care about race in radix-tree.
    	 * But we have to be careful that this page is unmapped or not.
    	 *
    	 * There is a case for !page_mapped(). At the start of
    	 * migration, oldpage was mapped. But now, it's zapped.
    	 * But we know *target* page is not freed/reused under us.
    	 * mem_cgroup_uncharge_page() does all necessary checks.
    
    	if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
    		mem_cgroup_uncharge_page(target);
    
    /*
     * A call to try to shrink memory usage under specified resource controller.
     * This is typically used for page reclaiming for shmem for reducing side
     * effect of page allocation from shmem, which is used by some mem_cgroup.
     */
    int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
    {
    	struct mem_cgroup *mem;
    	int progress = 0;
    	int retry = MEM_CGROUP_RECLAIM_RETRIES;
    
    
    	if (mem_cgroup_disabled())
    
    	rcu_read_lock();
    	mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
    
    	if (unlikely(!mem)) {
    		rcu_read_unlock();
    		return 0;
    	}
    
    	css_get(&mem->css);
    	rcu_read_unlock();
    
    	do {
    
    		progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true);
    
    		progress += res_counter_check_under_limit(&mem->res);
    
    	} while (!progress && --retry);
    
    	css_put(&mem->css);
    	if (!retry)
    		return -ENOMEM;
    	return 0;
    }
    
    
    static DEFINE_MUTEX(set_limit_mutex);
    
    
    static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
    
    				unsigned long long val)
    
    {
    
    	int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
    	int progress;
    
    	u64 memswlimit;
    
    	while (retry_count) {
    
    		if (signal_pending(current)) {
    			ret = -EINTR;
    			break;
    		}
    
    		/*
    		 * Rather than hide all in some function, I do this in
    		 * open coded manner. You see what this really does.
    		 * We have to guarantee mem->res.limit < mem->memsw.limit.
    		 */
    		mutex_lock(&set_limit_mutex);
    		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
    		if (memswlimit < val) {
    			ret = -EINVAL;
    			mutex_unlock(&set_limit_mutex);
    
    		ret = res_counter_set_limit(&memcg->res, val);
    		mutex_unlock(&set_limit_mutex);
    
    		if (!ret)
    			break;
    
    
    		progress = try_to_free_mem_cgroup_pages(memcg,
    
    				GFP_KERNEL, false);
    
      		if (!progress)			retry_count--;
    	}
    	return ret;
    }
    
    int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
    				unsigned long long val)
    {
    	int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
    	u64 memlimit, oldusage, curusage;
    	int ret;
    
    	if (!do_swap_account)
    		return -EINVAL;
    
    	while (retry_count) {
    		if (signal_pending(current)) {
    			ret = -EINTR;
    			break;
    		}
    		/*
    		 * Rather than hide all in some function, I do this in
    		 * open coded manner. You see what this really does.
    		 * We have to guarantee mem->res.limit < mem->memsw.limit.
    		 */
    		mutex_lock(&set_limit_mutex);
    		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
    		if (memlimit > val) {
    			ret = -EINVAL;
    			mutex_unlock(&set_limit_mutex);
    			break;
    		}
    		ret = res_counter_set_limit(&memcg->memsw, val);
    		mutex_unlock(&set_limit_mutex);
    
    		if (!ret)
    			break;
    
    		oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
    
    		try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true);
    
    		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
    		if (curusage >= oldusage)
    
    /*
     * This routine traverse page_cgroup in given list and drop them all.
     * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
     */
    
    static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
    
    				int node, int zid, enum lru_list lru)
    
    	struct zone *zone;
    	struct mem_cgroup_per_zone *mz;
    
    	struct page_cgroup *pc, *busy;
    
    	unsigned long flags, loop;
    
    	zone = &NODE_DATA(node)->node_zones[zid];
    	mz = mem_cgroup_zoneinfo(mem, node, zid);
    
    	list = &mz->lists[lru];
    
    	loop = MEM_CGROUP_ZSTAT(mz, lru);
    	/* give some margin against EBUSY etc...*/
    	loop += 256;
    	busy = NULL;
    	while (loop--) {
    		ret = 0;
    
    		spin_lock_irqsave(&zone->lru_lock, flags);
    
    		if (list_empty(list)) {
    
    			spin_unlock_irqrestore(&zone->lru_lock, flags);
    
    		}
    		pc = list_entry(list->prev, struct page_cgroup, lru);
    		if (busy == pc) {
    			list_move(&pc->lru, list);
    			busy = 0;
    
    			spin_unlock_irqrestore(&zone->lru_lock, flags);
    
    		spin_unlock_irqrestore(&zone->lru_lock, flags);
    
    		ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
    
    
    		if (ret == -EBUSY || ret == -EINVAL) {
    			/* found lock contention or "pc" is obsolete. */
    			busy = pc;
    			cond_resched();
    		} else
    			busy = NULL;
    
    	if (!ret && !list_empty(list))
    		return -EBUSY;
    	return ret;
    
    }
    
    /*
     * make mem_cgroup's charge to be 0 if there is no task.
     * This enables deleting this mem_cgroup.
     */
    
    static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
    
    	int ret;
    	int node, zid, shrink;
    	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
    
    	struct cgroup *cgrp = mem->css.cgroup;
    
    	/* should free all ? */
    	if (free_all)
    		goto try_to_free;
    
    		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
    			goto out;
    		ret = -EINTR;
    		if (signal_pending(current))
    
    		/* This is for making all *used* pages to be on LRU. */
    		lru_add_drain_all();
    
    		ret = 0;
    		for_each_node_state(node, N_POSSIBLE) {
    			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
    
    				for_each_lru(l) {
    					ret = mem_cgroup_force_empty_list(mem,
    
    							node, zid, l);
    
    			if (ret)
    				break;
    		}
    		/* it seems parent cgroup doesn't have enough mem */
    		if (ret == -ENOMEM)
    			goto try_to_free;
    
    	/* returns EBUSY if there is a task or if we come here twice. */
    	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
    
    	/* we call try-to-free pages for make this cgroup empty */
    	lru_add_drain_all();
    
    	/* try to free all pages in this cgroup */
    	shrink = 1;
    	while (nr_retries && mem->res.usage > 0) {
    		int progress;
    
    
    		if (signal_pending(current)) {
    			ret = -EINTR;
    			goto out;
    		}
    
    		progress = try_to_free_mem_cgroup_pages(mem,
    
    						  GFP_KERNEL, false);
    
    			/* maybe some writeback is necessary */
    			congestion_wait(WRITE, HZ/10);
    		}
    
    	lru_add_drain();
    
    	/* try move_account...there may be some *locked* pages. */
    	if (mem->res.usage)
    		goto move_account;
    	ret = 0;
    	goto out;
    
    int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
    {
    	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
    }
    
    
    
    static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
    {
    	return mem_cgroup_from_cont(cont)->use_hierarchy;
    }
    
    static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
    					u64 val)
    {
    	int retval = 0;
    	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
    	struct cgroup *parent = cont->parent;
    	struct mem_cgroup *parent_mem = NULL;
    
    	if (parent)
    		parent_mem = mem_cgroup_from_cont(parent);
    
    	cgroup_lock();
    	/*
    	 * If parent's use_hiearchy is set, we can't make any modifications
    	 * in the child subtrees. If it is unset, then the change can
    	 * occur, provided the current cgroup has no children.
    	 *
    	 * For the root cgroup, parent_mem is NULL, we allow value to be
    	 * set if there are no children.
    	 */
    	if ((!parent_mem || !parent_mem->use_hierarchy) &&
    				(val == 1 || val == 0)) {
    		if (list_empty(&cont->children))
    			mem->use_hierarchy = val;
    		else
    			retval = -EBUSY;
    	} else
    		retval = -EINVAL;
    	cgroup_unlock();
    
    	return retval;
    }
    
    
    static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
    
    	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
    	u64 val = 0;
    	int type, name;
    
    	type = MEMFILE_TYPE(cft->private);
    	name = MEMFILE_ATTR(cft->private);
    	switch (type) {
    	case _MEM:
    		val = res_counter_read_u64(&mem->res, name);
    		break;
    	case _MEMSWAP:
    		if (do_swap_account)
    			val = res_counter_read_u64(&mem->memsw, name);
    		break;
    	default:
    		BUG();
    		break;
    	}
    	return val;
    
    /*
     * The user of this function is...
     * RES_LIMIT.
     */
    
    static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
    			    const char *buffer)
    
    	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
    
    	int type, name;
    
    	unsigned long long val;
    	int ret;
    
    
    	type = MEMFILE_TYPE(cft->private);
    	name = MEMFILE_ATTR(cft->private);
    	switch (name) {
    
    	case RES_LIMIT:
    		/* This function does all necessary parse...reuse it */
    		ret = res_counter_memparse_write_strategy(buffer, &val);
    
    		if (ret)
    			break;
    		if (type == _MEM)
    
    			ret = mem_cgroup_resize_limit(memcg, val);
    
    		else
    			ret = mem_cgroup_resize_memsw_limit(memcg, val);
    
    		break;
    	default:
    		ret = -EINVAL; /* should be BUG() ? */
    		break;
    	}
    	return ret;
    
    static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
    
    	int type, name;
    
    
    	mem = mem_cgroup_from_cont(cont);
    
    	type = MEMFILE_TYPE(event);
    	name = MEMFILE_ATTR(event);
    	switch (name) {
    
    	case RES_MAX_USAGE:
    
    		if (type == _MEM)
    			res_counter_reset_max(&mem->res);
    		else
    			res_counter_reset_max(&mem->memsw);
    
    		break;
    	case RES_FAILCNT:
    
    		if (type == _MEM)
    			res_counter_reset_failcnt(&mem->res);
    		else
    			res_counter_reset_failcnt(&mem->memsw);
    
    static const struct mem_cgroup_stat_desc {
    	const char *msg;
    	u64 unit;
    } mem_cgroup_stat_desc[] = {
    	[MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
    	[MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
    
    	[MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
    	[MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
    
    static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
    				 struct cgroup_map_cb *cb)
    
    {
    	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
    	struct mem_cgroup_stat *stat = &mem_cont->stat;
    	int i;
    
    	for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
    		s64 val;
    
    		val = mem_cgroup_read_stat(stat, i);
    		val *= mem_cgroup_stat_desc[i].unit;
    
    		cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
    
    		unsigned long active_anon, inactive_anon;
    		unsigned long active_file, inactive_file;
    
    		unsigned long unevictable;
    
    
    		inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
    						LRU_INACTIVE_ANON);
    		active_anon = mem_cgroup_get_all_zonestat(mem_cont,
    						LRU_ACTIVE_ANON);
    		inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
    						LRU_INACTIVE_FILE);
    		active_file = mem_cgroup_get_all_zonestat(mem_cont,
    						LRU_ACTIVE_FILE);
    
    		unevictable = mem_cgroup_get_all_zonestat(mem_cont,
    							LRU_UNEVICTABLE);
    
    
    		cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
    		cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
    		cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
    		cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
    
    		cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
    
    
    static struct cftype mem_cgroup_files[] = {
    	{
    
    		.name = "usage_in_bytes",
    
    		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
    
    		.read_u64 = mem_cgroup_read,
    
    	{
    		.name = "max_usage_in_bytes",
    
    		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
    
    		.trigger = mem_cgroup_reset,
    
    		.read_u64 = mem_cgroup_read,
    	},
    
    		.name = "limit_in_bytes",
    
    		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
    
    		.read_u64 = mem_cgroup_read,
    
    	},
    	{
    		.name = "failcnt",
    
    		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
    
    		.trigger = mem_cgroup_reset,
    
    		.read_u64 = mem_cgroup_read,
    
    		.read_map = mem_control_stat_show,
    
    	{
    		.name = "force_empty",
    		.trigger = mem_cgroup_force_empty_write,
    	},
    
    	{
    		.name = "use_hierarchy",
    		.write_u64 = mem_cgroup_hierarchy_write,
    		.read_u64 = mem_cgroup_hierarchy_read,
    	},
    
    #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
    static struct cftype memsw_cgroup_files[] = {
    	{
    		.name = "memsw.usage_in_bytes",
    		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
    		.read_u64 = mem_cgroup_read,
    	},
    	{
    		.name = "memsw.max_usage_in_bytes",
    		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
    		.trigger = mem_cgroup_reset,
    		.read_u64 = mem_cgroup_read,
    	},
    	{
    		.name = "memsw.limit_in_bytes",
    		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
    		.write_string = mem_cgroup_write,
    		.read_u64 = mem_cgroup_read,
    	},
    	{
    		.name = "memsw.failcnt",
    		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
    		.trigger = mem_cgroup_reset,
    		.read_u64 = mem_cgroup_read,
    	},
    };
    
    static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
    {
    	if (!do_swap_account)
    		return 0;
    	return cgroup_add_files(cont, ss, memsw_cgroup_files,
    				ARRAY_SIZE(memsw_cgroup_files));
    };
    #else
    static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
    {
    	return 0;
    }
    #endif
    
    
    static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
    {
    	struct mem_cgroup_per_node *pn;
    
    	int zone, tmp = node;
    
    	/*
    	 * This routine is called against possible nodes.
    	 * But it's BUG to call kmalloc() against offline node.
    	 *
    	 * TODO: this routine can waste much memory for nodes which will
    	 *       never be onlined. It's better to use memory hotplug callback
    	 *       function.
    	 */
    
    	if (!node_state(node, N_NORMAL_MEMORY))
    		tmp = -1;
    	pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
    
    	mem->info.nodeinfo[node] = pn;
    	memset(pn, 0, sizeof(*pn));
    
    
    	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
    		mz = &pn->zoneinfo[zone];
    
    		for_each_lru(l)
    			INIT_LIST_HEAD(&mz->lists[l]);
    
    static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
    {
    	kfree(mem->info.nodeinfo[node]);
    }
    
    
    static int mem_cgroup_size(void)
    {
    	int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
    	return sizeof(struct mem_cgroup) + cpustat_size;
    }
    
    
    static struct mem_cgroup *mem_cgroup_alloc(void)
    {
    	struct mem_cgroup *mem;
    
    	int size = mem_cgroup_size();
    
    	if (size < PAGE_SIZE)
    		mem = kmalloc(size, GFP_KERNEL);
    
    		mem = vmalloc(size);
    
    		memset(mem, 0, size);
    
    /*
     * At destroying mem_cgroup, references from swap_cgroup can remain.
     * (scanning all at force_empty is too costly...)
     *
     * Instead of clearing all references at force_empty, we remember
     * the number of reference from swap_cgroup and free mem_cgroup when
     * it goes down to 0.
     *
     * When mem_cgroup is destroyed, mem->obsolete will be set to 0 and
     * entry which points to this memcg will be ignore at swapin.
     *
     * Removal of cgroup itself succeeds regardless of refs from swap.
     */
    
    
    static void mem_cgroup_free(struct mem_cgroup *mem)
    {
    
    	if (atomic_read(&mem->refcnt) > 0)
    		return;
    
    
    
    	for_each_node_state(node, N_POSSIBLE)
    		free_mem_cgroup_per_zone_info(mem, node);
    
    
    	if (mem_cgroup_size() < PAGE_SIZE)
    
    static void mem_cgroup_get(struct mem_cgroup *mem)
    {
    	atomic_inc(&mem->refcnt);
    }
    
    static void mem_cgroup_put(struct mem_cgroup *mem)
    {
    	if (atomic_dec_and_test(&mem->refcnt)) {
    		if (!mem->obsolete)
    			return;
    		mem_cgroup_free(mem);
    	}
    }
    
    
    #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
    static void __init enable_swap_cgroup(void)
    {
    
    	if (!mem_cgroup_disabled() && really_do_swap_account)
    
    		do_swap_account = 1;
    }
    #else
    static void __init enable_swap_cgroup(void)
    {
    }
    #endif
    
    
    static struct cgroup_subsys_state *
    mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
    {
    
    	struct mem_cgroup *mem, *parent;
    
    	mem = mem_cgroup_alloc();
    	if (!mem)
    		return ERR_PTR(-ENOMEM);
    
    	for_each_node_state(node, N_POSSIBLE)
    		if (alloc_mem_cgroup_per_zone_info(mem, node))
    			goto free_out;
    
    	if (cont->parent == NULL) {
    
    		enable_swap_cgroup();
    
    		parent = mem_cgroup_from_cont(cont->parent);
    
    		mem->use_hierarchy = parent->use_hierarchy;
    	}
    
    	if (parent && parent->use_hierarchy) {
    		res_counter_init(&mem->res, &parent->res);
    		res_counter_init(&mem->memsw, &parent->memsw);
    	} else {
    		res_counter_init(&mem->res, NULL);
    		res_counter_init(&mem->memsw, NULL);
    	}
    
    	mem->last_scanned_child = NULL;
    
    
    	return &mem->css;
    
    	mem_cgroup_free(mem);
    
    	return ERR_PTR(-ENOMEM);
    
    static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
    					struct cgroup *cont)
    {
    	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
    
    	mem->obsolete = 1;
    
    	mem_cgroup_force_empty(mem, false);
    
    static void mem_cgroup_destroy(struct cgroup_subsys *ss,
    				struct cgroup *cont)
    {
    
    	mem_cgroup_free(mem_cgroup_from_cont(cont));
    
    }
    
    static int mem_cgroup_populate(struct cgroup_subsys *ss,
    				struct cgroup *cont)
    {
    
    	int ret;
    
    	ret = cgroup_add_files(cont, ss, mem_cgroup_files,
    				ARRAY_SIZE(mem_cgroup_files));