Skip to content
Snippets Groups Projects
memcontrol.c 110 KiB
Newer Older
  • Learn to ignore specific revisions
  • 	if (!mem)
    		goto charge_cur_mm;
    
    	*ptr = mem;
    
    	ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, page);
    
    	/* drop extra refcnt from tryget */
    	css_put(&mem->css);
    	return ret;
    
    charge_cur_mm:
    	if (unlikely(!mm))
    		mm = &init_mm;
    
    	return __mem_cgroup_try_charge(mm, mask, ptr, true, page);
    
    static void
    __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
    					enum charge_type ctype)
    
    	if (mem_cgroup_disabled())
    
    	cgroup_exclude_rmdir(&ptr->css);
    
    	pc = lookup_page_cgroup(page);
    
    	mem_cgroup_lru_del_before_commit_swapcache(page);
    
    	__mem_cgroup_commit_charge(ptr, pc, ctype);
    
    	mem_cgroup_lru_add_after_commit_swapcache(page);
    
    	/*
    	 * Now swap is on-memory. This means this page may be
    	 * counted both as mem and swap....double count.
    
    	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
    	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
    	 * may call delete_from_swap_cache() before reach here.
    
    	if (do_swap_account && PageSwapCache(page)) {
    
    		swp_entry_t ent = {.val = page_private(page)};
    
    		struct mem_cgroup *memcg;
    
    
    		id = swap_cgroup_record(ent, 0);
    		rcu_read_lock();
    		memcg = mem_cgroup_lookup(id);
    
    		if (memcg) {
    
    			/*
    			 * This recorded memcg can be obsolete one. So, avoid
    			 * calling css_tryget
    			 */
    
    			if (!mem_cgroup_is_root(memcg))
    
    				res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
    
    			mem_cgroup_swap_statistics(memcg, false);
    
    			mem_cgroup_put(memcg);
    		}
    
    	/*
    	 * At swapin, we may charge account against cgroup which has no tasks.
    	 * So, rmdir()->pre_destroy() can be called while we do this charge.
    	 * In that case, we need to call pre_destroy() again. check it here.
    	 */
    	cgroup_release_and_wakeup_rmdir(&ptr->css);
    
    void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
    {
    	__mem_cgroup_commit_charge_swapin(page, ptr,
    					MEM_CGROUP_CHARGE_TYPE_MAPPED);
    }
    
    
    void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
    {
    
    	if (mem_cgroup_disabled())
    
    	mem_cgroup_cancel_charge(mem);
    
    static void
    __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
    {
    	struct memcg_batch_info *batch = NULL;
    	bool uncharge_memsw = true;
    	/* If swapout, usage of swap doesn't decrease */
    	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
    		uncharge_memsw = false;
    	/*
    	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
    	 * In those cases, all pages freed continously can be expected to be in
    	 * the same cgroup and we have chance to coalesce uncharges.
    	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
    	 * because we want to do uncharge as soon as possible.
    	 */
    	if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE))
    		goto direct_uncharge;
    
    	batch = &current->memcg_batch;
    	/*
    	 * In usual, we do css_get() when we remember memcg pointer.
    	 * But in this case, we keep res->usage until end of a series of
    	 * uncharges. Then, it's ok to ignore memcg's refcnt.
    	 */
    	if (!batch->memcg)
    		batch->memcg = mem;
    	/*
    	 * In typical case, batch->memcg == mem. This means we can
    	 * merge a series of uncharges to an uncharge of res_counter.
    	 * If not, we uncharge res_counter ony by one.
    	 */
    	if (batch->memcg != mem)
    		goto direct_uncharge;
    	/* remember freed charge and uncharge it later */
    	batch->bytes += PAGE_SIZE;
    	if (uncharge_memsw)
    		batch->memsw_bytes += PAGE_SIZE;
    	return;
    direct_uncharge:
    	res_counter_uncharge(&mem->res, PAGE_SIZE);
    	if (uncharge_memsw)
    		res_counter_uncharge(&mem->memsw, PAGE_SIZE);
    	return;
    }
    
     * uncharge if !page_mapped(page)
    
    static struct mem_cgroup *
    
    __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
    
    	struct page_cgroup *pc;
    
    	struct mem_cgroup *mem = NULL;
    
    	if (mem_cgroup_disabled())
    
    		return NULL;
    
    	if (PageSwapCache(page))
    
    		return NULL;
    
    	 * Check if our page_cgroup is valid
    
    	pc = lookup_page_cgroup(page);
    	if (unlikely(!pc || !PageCgroupUsed(pc)))
    
    		return NULL;
    
    	lock_page_cgroup(pc);
    
    	mem = pc->mem_cgroup;
    
    
    	if (!PageCgroupUsed(pc))
    		goto unlock_out;
    
    	switch (ctype) {
    	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
    
    	case MEM_CGROUP_CHARGE_TYPE_DROP:
    
    		if (page_mapped(page))
    			goto unlock_out;
    		break;
    	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
    		if (!PageAnon(page)) {	/* Shared memory */
    			if (page->mapping && !page_is_file_cache(page))
    				goto unlock_out;
    		} else if (page_mapped(page)) /* Anon */
    				goto unlock_out;
    		break;
    	default:
    		break;
    
    	if (!mem_cgroup_is_root(mem))
    		__do_uncharge(mem, ctype);
    
    	if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
    		mem_cgroup_swap_statistics(mem, true);
    
    	mem_cgroup_charge_statistics(mem, pc, false);
    
    	ClearPageCgroupUsed(pc);
    
    	/*
    	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
    	 * freed from LRU. This is safe because uncharged page is expected not
    	 * to be reused (freed soon). Exception is SwapCache, it's handled by
    	 * special functions.
    	 */
    
    	mz = page_cgroup_zoneinfo(pc);
    
    	unlock_page_cgroup(pc);
    
    		mem_cgroup_update_tree(mem, page);
    
    	if (mem_cgroup_threshold_check(mem))
    		mem_cgroup_threshold(mem);
    
    	/* at swapout, this memcg will be accessed to record to swap */
    	if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
    		css_put(&mem->css);
    
    	return mem;
    
    
    unlock_out:
    	unlock_page_cgroup(pc);
    
    	return NULL;
    
    void mem_cgroup_uncharge_page(struct page *page)
    {
    
    	/* early check. */
    	if (page_mapped(page))
    		return;
    	if (page->mapping && !PageAnon(page))
    		return;
    
    	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
    }
    
    void mem_cgroup_uncharge_cache_page(struct page *page)
    {
    	VM_BUG_ON(page_mapped(page));
    
    	VM_BUG_ON(page->mapping);
    
    	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
    }
    
    
    /*
     * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
     * In that cases, pages are freed continuously and we can expect pages
     * are in the same memcg. All these calls itself limits the number of
     * pages freed at once, then uncharge_start/end() is called properly.
     * This may be called prural(2) times in a context,
     */
    
    void mem_cgroup_uncharge_start(void)
    {
    	current->memcg_batch.do_batch++;
    	/* We can do nest. */
    	if (current->memcg_batch.do_batch == 1) {
    		current->memcg_batch.memcg = NULL;
    		current->memcg_batch.bytes = 0;
    		current->memcg_batch.memsw_bytes = 0;
    	}
    }
    
    void mem_cgroup_uncharge_end(void)
    {
    	struct memcg_batch_info *batch = &current->memcg_batch;
    
    	if (!batch->do_batch)
    		return;
    
    	batch->do_batch--;
    	if (batch->do_batch) /* If stacked, do nothing. */
    		return;
    
    	if (!batch->memcg)
    		return;
    	/*
    	 * This "batch->memcg" is valid without any css_get/put etc...
    	 * bacause we hide charges behind us.
    	 */
    	if (batch->bytes)
    		res_counter_uncharge(&batch->memcg->res, batch->bytes);
    	if (batch->memsw_bytes)
    		res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
    	/* forget this pointer (for sanity check) */
    	batch->memcg = NULL;
    }
    
    
     * called after __delete_from_swap_cache() and drop "page" account.
    
     * memcg information is recorded to swap_cgroup of "ent"
     */
    
    void
    mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
    
    {
    	struct mem_cgroup *memcg;
    
    	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
    
    	if (!swapout) /* this was a swap cache but the swap is unused ! */
    		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
    
    	memcg = __mem_cgroup_uncharge_common(page, ctype);
    
    
    	/* record memcg information */
    
    	if (do_swap_account && swapout && memcg) {
    
    		swap_cgroup_record(ent, css_id(&memcg->css));
    
    		mem_cgroup_get(memcg);
    	}
    
    	if (swapout && memcg)
    
    		css_put(&memcg->css);
    
    
    #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
    /*
     * called from swap_entry_free(). remove record in swap_cgroup and
     * uncharge "memsw" account.
     */
    void mem_cgroup_uncharge_swap(swp_entry_t ent)
    
    	struct mem_cgroup *memcg;
    
    
    	if (!do_swap_account)
    		return;
    
    
    	id = swap_cgroup_record(ent, 0);
    	rcu_read_lock();
    	memcg = mem_cgroup_lookup(id);
    
    	if (memcg) {
    
    		/*
    		 * We uncharge this because swap is freed.
    		 * This memcg can be obsolete one. We avoid calling css_tryget
    		 */
    
    		if (!mem_cgroup_is_root(memcg))
    
    			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
    
    		mem_cgroup_swap_statistics(memcg, false);
    
    		mem_cgroup_put(memcg);
    	}
    
    
    /**
     * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
     * @entry: swap entry to be moved
     * @from:  mem_cgroup which the entry is moved from
     * @to:  mem_cgroup which the entry is moved to
    
     * @need_fixup: whether we should fixup res_counters and refcounts.
    
     *
     * It succeeds only when the swap_cgroup's record for this entry is the same
     * as the mem_cgroup's id of @from.
     *
     * Returns 0 on success, -EINVAL on failure.
     *
     * The caller must have charged to @to, IOW, called res_counter_charge() about
     * both res and memsw, and called css_get().
     */
    static int mem_cgroup_move_swap_account(swp_entry_t entry,
    
    		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
    
    {
    	unsigned short old_id, new_id;
    
    	old_id = css_id(&from->css);
    	new_id = css_id(&to->css);
    
    	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
    		mem_cgroup_swap_statistics(from, false);
    
    		mem_cgroup_swap_statistics(to, true);
    
    		 * This function is only called from task migration context now.
    		 * It postpones res_counter and refcount handling till the end
    		 * of task migration(mem_cgroup_clear_mc()) for performance
    		 * improvement. But we cannot postpone mem_cgroup_get(to)
    		 * because if the process that has been moved to @to does
    		 * swap-in, the refcount of @to might be decreased to 0.
    
    		 */
    		mem_cgroup_get(to);
    
    		if (need_fixup) {
    			if (!mem_cgroup_is_root(from))
    				res_counter_uncharge(&from->memsw, PAGE_SIZE);
    			mem_cgroup_put(from);
    			/*
    			 * we charged both to->res and to->memsw, so we should
    			 * uncharge to->res.
    			 */
    			if (!mem_cgroup_is_root(to))
    				res_counter_uncharge(&to->res, PAGE_SIZE);
    			css_put(&to->css);
    		}
    
    		return 0;
    	}
    	return -EINVAL;
    }
    #else
    static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
    
    		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
    
     * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
     * page belongs to.
    
    int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
    
    	struct mem_cgroup *mem = NULL;
    	int ret = 0;
    
    	if (mem_cgroup_disabled())
    
    	pc = lookup_page_cgroup(page);
    	lock_page_cgroup(pc);
    	if (PageCgroupUsed(pc)) {
    
    		mem = pc->mem_cgroup;
    		css_get(&mem->css);
    	}
    
    	unlock_page_cgroup(pc);
    
    		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
    						page);
    
    		css_put(&mem->css);
    	}
    
    	*ptr = mem;
    
    	return ret;
    
    /* remove redundant charge if migration failed*/
    
    void mem_cgroup_end_migration(struct mem_cgroup *mem,
    		struct page *oldpage, struct page *newpage)
    
    	struct page *target, *unused;
    	struct page_cgroup *pc;
    	enum charge_type ctype;
    
    	if (!mem)
    		return;
    
    	cgroup_exclude_rmdir(&mem->css);
    
    	/* at migration success, oldpage->mapping is NULL. */
    	if (oldpage->mapping) {
    		target = oldpage;
    		unused = NULL;
    	} else {
    		target = newpage;
    		unused = oldpage;
    	}
    
    	if (PageAnon(target))
    		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
    	else if (page_is_file_cache(target))
    		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
    	else
    		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
    
    	/* unused page is not on radix-tree now. */
    
    	if (unused)
    
    		__mem_cgroup_uncharge_common(unused, ctype);
    
    	pc = lookup_page_cgroup(target);
    
    	 * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
    	 * So, double-counting is effectively avoided.
    	 */
    	__mem_cgroup_commit_charge(mem, pc, ctype);
    
    	/*
    	 * Both of oldpage and newpage are still under lock_page().
    	 * Then, we don't have to care about race in radix-tree.
    	 * But we have to be careful that this page is unmapped or not.
    	 *
    	 * There is a case for !page_mapped(). At the start of
    	 * migration, oldpage was mapped. But now, it's zapped.
    	 * But we know *target* page is not freed/reused under us.
    	 * mem_cgroup_uncharge_page() does all necessary checks.
    
    	if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
    		mem_cgroup_uncharge_page(target);
    
    	/*
    	 * At migration, we may charge account against cgroup which has no tasks
    	 * So, rmdir()->pre_destroy() can be called while we do this charge.
    	 * In that case, we need to call pre_destroy() again. check it here.
    	 */
    	cgroup_release_and_wakeup_rmdir(&mem->css);
    
     * A call to try to shrink memory usage on charge failure at shmem's swapin.
     * Calling hierarchical_reclaim is not enough because we should update
     * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
     * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
     * not from the memcg which this page would be charged to.
     * try_charge_swapin does all of these works properly.
    
    int mem_cgroup_shmem_charge_fallback(struct page *page,
    
    			    struct mm_struct *mm,
    			    gfp_t gfp_mask)
    
    	struct mem_cgroup *mem = NULL;
    
    	if (mem_cgroup_disabled())
    
    	ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
    	if (!ret)
    		mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
    
    static DEFINE_MUTEX(set_limit_mutex);
    
    
    static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
    
    				unsigned long long val)
    
    	u64 memswlimit;
    
    	int ret = 0;
    
    	int children = mem_cgroup_count_children(memcg);
    	u64 curusage, oldusage;
    
    	/*
    	 * For keeping hierarchical_reclaim simple, how long we should retry
    	 * is depends on callers. We set our retry-count to be function
    	 * of # of children which we should visit in this loop.
    	 */
    	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
    
    	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
    
    	while (retry_count) {
    
    		if (signal_pending(current)) {
    			ret = -EINTR;
    			break;
    		}
    
    		/*
    		 * Rather than hide all in some function, I do this in
    		 * open coded manner. You see what this really does.
    		 * We have to guarantee mem->res.limit < mem->memsw.limit.
    		 */
    		mutex_lock(&set_limit_mutex);
    		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
    		if (memswlimit < val) {
    			ret = -EINVAL;
    			mutex_unlock(&set_limit_mutex);
    
    		ret = res_counter_set_limit(&memcg->res, val);
    
    		if (!ret) {
    			if (memswlimit == val)
    				memcg->memsw_is_minimum = true;
    			else
    				memcg->memsw_is_minimum = false;
    		}
    
    		mutex_unlock(&set_limit_mutex);
    
    		if (!ret)
    			break;
    
    
    		mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
    
    						MEM_CGROUP_RECLAIM_SHRINK);
    
    		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
    		/* Usage is reduced ? */
      		if (curusage >= oldusage)
    			retry_count--;
    		else
    			oldusage = curusage;
    
    static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
    					unsigned long long val)
    
    	u64 memlimit, oldusage, curusage;
    
    	int children = mem_cgroup_count_children(memcg);
    	int ret = -EBUSY;
    
    	/* see mem_cgroup_resize_res_limit */
     	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
    	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
    
    	while (retry_count) {
    		if (signal_pending(current)) {
    			ret = -EINTR;
    			break;
    		}
    		/*
    		 * Rather than hide all in some function, I do this in
    		 * open coded manner. You see what this really does.
    		 * We have to guarantee mem->res.limit < mem->memsw.limit.
    		 */
    		mutex_lock(&set_limit_mutex);
    		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
    		if (memlimit > val) {
    			ret = -EINVAL;
    			mutex_unlock(&set_limit_mutex);
    			break;
    		}
    		ret = res_counter_set_limit(&memcg->memsw, val);
    
    		if (!ret) {
    			if (memlimit == val)
    				memcg->memsw_is_minimum = true;
    			else
    				memcg->memsw_is_minimum = false;
    		}
    
    		mutex_unlock(&set_limit_mutex);
    
    		if (!ret)
    			break;
    
    
    		mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
    
    						MEM_CGROUP_RECLAIM_NOSWAP |
    						MEM_CGROUP_RECLAIM_SHRINK);
    
    		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
    
    		if (curusage >= oldusage)
    
    			retry_count--;
    
    unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
    						gfp_t gfp_mask, int nid,
    						int zid)
    {
    	unsigned long nr_reclaimed = 0;
    	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
    	unsigned long reclaimed;
    	int loop = 0;
    	struct mem_cgroup_tree_per_zone *mctz;
    
    	unsigned long long excess;
    
    
    	if (order > 0)
    		return 0;
    
    	mctz = soft_limit_tree_node_zone(nid, zid);
    	/*
    	 * This loop can run a while, specially if mem_cgroup's continuously
    	 * keep exceeding their soft limit and putting the system under
    	 * pressure
    	 */
    	do {
    		if (next_mz)
    			mz = next_mz;
    		else
    			mz = mem_cgroup_largest_soft_limit_node(mctz);
    		if (!mz)
    			break;
    
    		reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
    						gfp_mask,
    						MEM_CGROUP_RECLAIM_SOFT);
    		nr_reclaimed += reclaimed;
    		spin_lock(&mctz->lock);
    
    		/*
    		 * If we failed to reclaim anything from this memory cgroup
    		 * it is time to move on to the next cgroup
    		 */
    		next_mz = NULL;
    		if (!reclaimed) {
    			do {
    				/*
    				 * Loop until we find yet another one.
    				 *
    				 * By the time we get the soft_limit lock
    				 * again, someone might have aded the
    				 * group back on the RB tree. Iterate to
    				 * make sure we get a different mem.
    				 * mem_cgroup_largest_soft_limit_node returns
    				 * NULL if no other cgroup is present on
    				 * the tree
    				 */
    				next_mz =
    				__mem_cgroup_largest_soft_limit_node(mctz);
    				if (next_mz == mz) {
    					css_put(&next_mz->mem->css);
    					next_mz = NULL;
    				} else /* next_mz == NULL or other memcg */
    					break;
    			} while (1);
    		}
    		__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
    
    		excess = res_counter_soft_limit_excess(&mz->mem->res);
    
    		/*
    		 * One school of thought says that we should not add
    		 * back the node to the tree if reclaim returns 0.
    		 * But our reclaim could return 0, simply because due
    		 * to priority we are exposing a smaller subset of
    		 * memory to reclaim from. Consider this as a longer
    		 * term TODO.
    		 */
    
    		/* If excess == 0, no tree ops */
    		__mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
    
    		spin_unlock(&mctz->lock);
    		css_put(&mz->mem->css);
    		loop++;
    		/*
    		 * Could not reclaim anything and there are no more
    		 * mem cgroups to try or we seem to be looping without
    		 * reclaiming anything.
    		 */
    		if (!nr_reclaimed &&
    			(next_mz == NULL ||
    			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
    			break;
    	} while (!nr_reclaimed);
    	if (next_mz)
    		css_put(&next_mz->mem->css);
    	return nr_reclaimed;
    }
    
    
    /*
     * This routine traverse page_cgroup in given list and drop them all.
     * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
     */
    
    static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
    
    				int node, int zid, enum lru_list lru)
    
    	struct zone *zone;
    	struct mem_cgroup_per_zone *mz;
    
    	struct page_cgroup *pc, *busy;
    
    	unsigned long flags, loop;
    
    	zone = &NODE_DATA(node)->node_zones[zid];
    	mz = mem_cgroup_zoneinfo(mem, node, zid);
    
    	list = &mz->lists[lru];
    
    	loop = MEM_CGROUP_ZSTAT(mz, lru);
    	/* give some margin against EBUSY etc...*/
    	loop += 256;
    	busy = NULL;
    	while (loop--) {
    		ret = 0;
    
    		spin_lock_irqsave(&zone->lru_lock, flags);
    
    		if (list_empty(list)) {
    
    			spin_unlock_irqrestore(&zone->lru_lock, flags);
    
    		}
    		pc = list_entry(list->prev, struct page_cgroup, lru);
    		if (busy == pc) {
    			list_move(&pc->lru, list);
    
    			spin_unlock_irqrestore(&zone->lru_lock, flags);
    
    		spin_unlock_irqrestore(&zone->lru_lock, flags);
    
    		ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
    
    
    		if (ret == -EBUSY || ret == -EINVAL) {
    			/* found lock contention or "pc" is obsolete. */
    			busy = pc;
    			cond_resched();
    		} else
    			busy = NULL;
    
    	if (!ret && !list_empty(list))
    		return -EBUSY;
    	return ret;
    
    }
    
    /*
     * make mem_cgroup's charge to be 0 if there is no task.
     * This enables deleting this mem_cgroup.
     */
    
    static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
    
    	int ret;
    	int node, zid, shrink;
    	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
    
    	struct cgroup *cgrp = mem->css.cgroup;
    
    	/* should free all ? */
    	if (free_all)
    		goto try_to_free;
    
    		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
    			goto out;
    		ret = -EINTR;
    		if (signal_pending(current))
    
    		/* This is for making all *used* pages to be on LRU. */
    		lru_add_drain_all();
    
    		drain_all_stock_sync();
    
    		for_each_node_state(node, N_HIGH_MEMORY) {
    
    			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
    
    				for_each_lru(l) {
    					ret = mem_cgroup_force_empty_list(mem,
    
    							node, zid, l);
    
    			if (ret)
    				break;
    		}
    		/* it seems parent cgroup doesn't have enough mem */
    		if (ret == -ENOMEM)
    			goto try_to_free;
    
    	/* "ret" should also be checked to ensure all lists are empty. */
    	} while (mem->res.usage > 0 || ret);
    
    	/* returns EBUSY if there is a task or if we come here twice. */
    	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
    
    	/* we call try-to-free pages for make this cgroup empty */
    	lru_add_drain_all();
    
    	/* try to free all pages in this cgroup */
    	shrink = 1;
    	while (nr_retries && mem->res.usage > 0) {
    		int progress;
    
    
    		if (signal_pending(current)) {
    			ret = -EINTR;
    			goto out;
    		}
    
    KOSAKI Motohiro's avatar
    KOSAKI Motohiro committed
    		progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
    						false, get_swappiness(mem));
    
    			/* maybe some writeback is necessary */
    
    			congestion_wait(BLK_RW_ASYNC, HZ/10);
    
    	lru_add_drain();
    
    	/* try move_account...there may be some *locked* pages. */
    
    	goto move_account;
    
    int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
    {
    	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
    }
    
    
    
    static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
    {
    	return mem_cgroup_from_cont(cont)->use_hierarchy;
    }
    
    static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
    					u64 val)
    {
    	int retval = 0;
    	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
    	struct cgroup *parent = cont->parent;
    	struct mem_cgroup *parent_mem = NULL;
    
    	if (parent)
    		parent_mem = mem_cgroup_from_cont(parent);
    
    	cgroup_lock();
    	/*
    
    	 * If parent's use_hierarchy is set, we can't make any modifications
    
    	 * in the child subtrees. If it is unset, then the change can
    	 * occur, provided the current cgroup has no children.
    	 *
    	 * For the root cgroup, parent_mem is NULL, we allow value to be
    	 * set if there are no children.
    	 */
    	if ((!parent_mem || !parent_mem->use_hierarchy) &&
    				(val == 1 || val == 0)) {
    		if (list_empty(&cont->children))
    			mem->use_hierarchy = val;
    		else
    			retval = -EBUSY;
    	} else
    		retval = -EINVAL;
    	cgroup_unlock();
    
    	return retval;
    }
    
    
    struct mem_cgroup_idx_data {
    	s64 val;
    	enum mem_cgroup_stat_index idx;
    };
    
    static int
    mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
    {
    	struct mem_cgroup_idx_data *d = data;
    	d->val += mem_cgroup_read_stat(&mem->stat, d->idx);
    	return 0;
    }
    
    static void
    mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
    				enum mem_cgroup_stat_index idx, s64 *val)
    {
    	struct mem_cgroup_idx_data d;
    	d.idx = idx;
    	d.val = 0;
    	mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
    	*val = d.val;
    }
    
    
    static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
    {
    	u64 idx_val, val;
    
    	if (!mem_cgroup_is_root(mem)) {
    		if (!swap)
    			return res_counter_read_u64(&mem->res, RES_USAGE);
    		else
    			return res_counter_read_u64(&mem->memsw, RES_USAGE);
    	}
    
    	mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val);
    	val = idx_val;
    	mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val);
    	val += idx_val;
    
    	if (swap) {
    		mem_cgroup_get_recursive_idx_stat(mem,
    				MEM_CGROUP_STAT_SWAPOUT, &idx_val);
    		val += idx_val;
    	}
    
    	return val << PAGE_SHIFT;
    }
    
    
    static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
    
    	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
    
    	int type, name;
    
    	type = MEMFILE_TYPE(cft->private);
    	name = MEMFILE_ATTR(cft->private);
    	switch (type) {
    	case _MEM:
    
    		if (name == RES_USAGE)
    			val = mem_cgroup_usage(mem, false);
    		else
    
    			val = res_counter_read_u64(&mem->res, name);
    
    		break;
    	case _MEMSWAP:
    
    		if (name == RES_USAGE)
    			val = mem_cgroup_usage(mem, true);
    		else
    
    			val = res_counter_read_u64(&mem->memsw, name);
    
    		break;
    	default:
    		BUG();
    		break;
    	}
    	return val;
    
    /*
     * The user of this function is...
     * RES_LIMIT.
     */
    
    static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
    			    const char *buffer)
    
    	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
    
    	int type, name;
    
    	unsigned long long val;
    	int ret;
    
    
    	type = MEMFILE_TYPE(cft->private);
    	name = MEMFILE_ATTR(cft->private);
    	switch (name) {
    
    	case RES_LIMIT:
    
    		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
    			ret = -EINVAL;
    			break;
    		}
    
    		/* This function does all necessary parse...reuse it */
    		ret = res_counter_memparse_write_strategy(buffer, &val);
    
    		if (ret)
    			break;
    		if (type == _MEM)
    
    			ret = mem_cgroup_resize_limit(memcg, val);
    
    		else
    			ret = mem_cgroup_resize_memsw_limit(memcg, val);
    
    	case RES_SOFT_LIMIT:
    		ret = res_counter_memparse_write_strategy(buffer, &val);
    		if (ret)
    			break;
    		/*
    		 * For memsw, soft limits are hard to implement in terms
    		 * of semantics, for now, we support soft limits for
    		 * control without swap
    		 */
    		if (type == _MEM)
    			ret = res_counter_set_soft_limit(&memcg->res, val);
    		else
    			ret = -EINVAL;
    		break;
    
    	default:
    		ret = -EINVAL; /* should be BUG() ? */
    		break;