Skip to content
Snippets Groups Projects
memcontrol.c 188 KiB
Newer Older
  • Learn to ignore specific revisions
  • 		.name = "limit_in_bytes",
    
    		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
    
    	{
    		.name = "soft_limit_in_bytes",
    		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
    		.write_string = mem_cgroup_write,
    
    	{
    		.name = "failcnt",
    
    		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
    
    		.trigger = mem_cgroup_reset,
    
    		.read_seq_string = memcg_stat_show,
    
    	{
    		.name = "force_empty",
    		.trigger = mem_cgroup_force_empty_write,
    	},
    
    	{
    		.name = "use_hierarchy",
    
    		.flags = CFTYPE_INSANE,
    
    		.write_u64 = mem_cgroup_hierarchy_write,
    		.read_u64 = mem_cgroup_hierarchy_read,
    	},
    
    KOSAKI Motohiro's avatar
    KOSAKI Motohiro committed
    	{
    		.name = "swappiness",
    		.read_u64 = mem_cgroup_swappiness_read,
    		.write_u64 = mem_cgroup_swappiness_write,
    	},
    
    	{
    		.name = "move_charge_at_immigrate",
    		.read_u64 = mem_cgroup_move_charge_read,
    		.write_u64 = mem_cgroup_move_charge_write,
    	},
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
    	{
    		.name = "oom_control",
    
    		.read_map = mem_cgroup_oom_control_read,
    		.write_u64 = mem_cgroup_oom_control_write,
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
    		.register_event = mem_cgroup_oom_register_event,
    		.unregister_event = mem_cgroup_oom_unregister_event,
    		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
    	},
    
    	{
    		.name = "pressure_level",
    		.register_event = vmpressure_register_event,
    		.unregister_event = vmpressure_unregister_event,
    	},
    
    #ifdef CONFIG_NUMA
    	{
    		.name = "numa_stat",
    
    		.read_seq_string = memcg_numa_stat_show,
    
    #ifdef CONFIG_MEMCG_KMEM
    	{
    		.name = "kmem.limit_in_bytes",
    		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
    		.write_string = mem_cgroup_write,
    		.read = mem_cgroup_read,
    	},
    	{
    		.name = "kmem.usage_in_bytes",
    		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
    		.read = mem_cgroup_read,
    	},
    	{
    		.name = "kmem.failcnt",
    		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
    		.trigger = mem_cgroup_reset,
    		.read = mem_cgroup_read,
    	},
    	{
    		.name = "kmem.max_usage_in_bytes",
    		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
    		.trigger = mem_cgroup_reset,
    		.read = mem_cgroup_read,
    	},
    
    #ifdef CONFIG_SLABINFO
    	{
    		.name = "kmem.slabinfo",
    		.read_seq_string = mem_cgroup_slabinfo_read,
    	},
    #endif
    
    #ifdef CONFIG_MEMCG_SWAP
    static struct cftype memsw_cgroup_files[] = {
    	{
    		.name = "memsw.usage_in_bytes",
    		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
    		.read = mem_cgroup_read,
    		.register_event = mem_cgroup_usage_register_event,
    		.unregister_event = mem_cgroup_usage_unregister_event,
    	},
    	{
    		.name = "memsw.max_usage_in_bytes",
    		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
    		.trigger = mem_cgroup_reset,
    		.read = mem_cgroup_read,
    	},
    	{
    		.name = "memsw.limit_in_bytes",
    		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
    		.write_string = mem_cgroup_write,
    		.read = mem_cgroup_read,
    	},
    	{
    		.name = "memsw.failcnt",
    		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
    		.trigger = mem_cgroup_reset,
    		.read = mem_cgroup_read,
    	},
    	{ },	/* terminate */
    };
    #endif
    
    static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
    
    	int zone, tmp = node;
    
    	/*
    	 * This routine is called against possible nodes.
    	 * But it's BUG to call kmalloc() against offline node.
    	 *
    	 * TODO: this routine can waste much memory for nodes which will
    	 *       never be onlined. It's better to use memory hotplug callback
    	 *       function.
    	 */
    
    	if (!node_state(node, N_NORMAL_MEMORY))
    		tmp = -1;
    
    	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
    
    
    	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
    		mz = &pn->zoneinfo[zone];
    
    		lruvec_init(&mz->lruvec);
    
    		mz->usage_in_excess = 0;
    		mz->on_tree = false;
    
    		mz->memcg = memcg;
    
    	memcg->nodeinfo[node] = pn;
    
    static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
    
    	kfree(memcg->nodeinfo[node]);
    
    static struct mem_cgroup *mem_cgroup_alloc(void)
    {
    
    	struct mem_cgroup *memcg;
    
    	size_t size = memcg_size();
    
    	/* Can be very big if nr_node_ids is very big */
    
    	if (size < PAGE_SIZE)
    
    		memcg = kzalloc(size, GFP_KERNEL);
    
    		memcg = vzalloc(size);
    
    	memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
    	if (!memcg->stat)
    
    	spin_lock_init(&memcg->pcp_counter_lock);
    	return memcg;
    
    
    out_free:
    	if (size < PAGE_SIZE)
    
     * At destroying mem_cgroup, references from swap_cgroup can remain.
     * (scanning all at force_empty is too costly...)
     *
     * Instead of clearing all references at force_empty, we remember
     * the number of reference from swap_cgroup and free mem_cgroup when
     * it goes down to 0.
     *
     * Removal of cgroup itself succeeds regardless of refs from swap.
    
    
    static void __mem_cgroup_free(struct mem_cgroup *memcg)
    
    	size_t size = memcg_size();
    
    	mem_cgroup_remove_from_trees(memcg);
    
    	free_css_id(&mem_cgroup_subsys, &memcg->css);
    
    	for_each_node(node)
    		free_mem_cgroup_per_zone_info(memcg, node);
    
    	free_percpu(memcg->stat);
    
    
    	/*
    	 * We need to make sure that (at least for now), the jump label
    	 * destruction code runs outside of the cgroup lock. This is because
    	 * get_online_cpus(), which is called from the static_branch update,
    	 * can't be called inside the cgroup_lock. cpusets are the ones
    	 * enforcing this dependency, so if they ever change, we might as well.
    	 *
    	 * schedule_work() will guarantee this happens. Be careful if you need
    	 * to move this code around, and make sure it is outside
    	 * the cgroup_lock.
    	 */
    
    	disarm_static_keys(memcg);
    
    	if (size < PAGE_SIZE)
    		kfree(memcg);
    	else
    		vfree(memcg);
    
    /*
     * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
     */
    
    struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
    
    	if (!memcg->res.parent)
    
    	return mem_cgroup_from_res_counter(memcg->res.parent, res);
    
    EXPORT_SYMBOL(parent_mem_cgroup);
    
    static void __init mem_cgroup_soft_limit_tree_init(void)
    {
    	struct mem_cgroup_tree_per_node *rtpn;
    	struct mem_cgroup_tree_per_zone *rtpz;
    	int tmp, node, zone;
    
    	for_each_node(node) {
    		tmp = node;
    		if (!node_state(node, N_NORMAL_MEMORY))
    			tmp = -1;
    		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
    		BUG_ON(!rtpn);
    
    		soft_limit_tree.rb_tree_per_node[node] = rtpn;
    
    		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
    			rtpz = &rtpn->rb_tree_per_zone[zone];
    			rtpz->rb_root = RB_ROOT;
    			spin_lock_init(&rtpz->lock);
    		}
    	}
    }
    
    
    Li Zefan's avatar
    Li Zefan committed
    static struct cgroup_subsys_state * __ref
    
    mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
    
    	struct mem_cgroup *memcg;
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
    	long error = -ENOMEM;
    
    	memcg = mem_cgroup_alloc();
    	if (!memcg)
    
    KAMEZAWA Hiroyuki's avatar
    KAMEZAWA Hiroyuki committed
    		return ERR_PTR(error);
    
    	for_each_node(node)
    
    		if (alloc_mem_cgroup_per_zone_info(memcg, node))
    
    		root_mem_cgroup = memcg;
    
    		res_counter_init(&memcg->res, NULL);
    		res_counter_init(&memcg->memsw, NULL);
    		res_counter_init(&memcg->kmem, NULL);
    
    	memcg->last_scanned_node = MAX_NUMNODES;
    	INIT_LIST_HEAD(&memcg->oom_notify);
    	memcg->move_charge_at_immigrate = 0;
    	mutex_init(&memcg->thresholds_lock);
    	spin_lock_init(&memcg->move_lock);
    
    	vmpressure_init(&memcg->vmpressure);
    
    
    	return &memcg->css;
    
    free_out:
    	__mem_cgroup_free(memcg);
    	return ERR_PTR(error);
    }
    
    static int
    
    mem_cgroup_css_online(struct cgroup_subsys_state *css)
    
    	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
    	struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
    
    Tejun Heo's avatar
    Tejun Heo committed
    	if (!parent)
    
    	mutex_lock(&memcg_create_mutex);
    
    
    	memcg->use_hierarchy = parent->use_hierarchy;
    	memcg->oom_kill_disable = parent->oom_kill_disable;
    	memcg->swappiness = mem_cgroup_swappiness(parent);
    
    	if (parent->use_hierarchy) {
    
    		res_counter_init(&memcg->res, &parent->res);
    		res_counter_init(&memcg->memsw, &parent->memsw);
    
    		res_counter_init(&memcg->kmem, &parent->kmem);
    
    		 * No need to take a reference to the parent because cgroup
    		 * core guarantees its existence.
    
    		res_counter_init(&memcg->res, NULL);
    		res_counter_init(&memcg->memsw, NULL);
    
    		res_counter_init(&memcg->kmem, NULL);
    
    		/*
    		 * Deeper hierachy with use_hierarchy == false doesn't make
    		 * much sense so let cgroup subsystem know about this
    		 * unfortunate state in our controller.
    		 */
    
    		if (parent != root_mem_cgroup)
    
    
    	error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
    
    	mutex_unlock(&memcg_create_mutex);
    
    /*
     * Announce all parents that a group from their hierarchy is gone.
     */
    static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
    {
    	struct mem_cgroup *parent = memcg;
    
    	while ((parent = parent_mem_cgroup(parent)))
    
    		mem_cgroup_iter_invalidate(parent);
    
    
    	/*
    	 * if the root memcg is not hierarchical we have to check it
    	 * explicitely.
    	 */
    	if (!root_mem_cgroup->use_hierarchy)
    
    		mem_cgroup_iter_invalidate(root_mem_cgroup);
    
    static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
    
    	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
    
    	kmem_cgroup_css_offline(memcg);
    
    
    	mem_cgroup_invalidate_reclaim_iterators(memcg);
    
    	mem_cgroup_reparent_charges(memcg);
    
    	mem_cgroup_destroy_all_caches(memcg);
    
    	vmpressure_cleanup(&memcg->vmpressure);
    
    static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
    
    	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
    
    	memcg_destroy_kmem(memcg);
    
    	__mem_cgroup_free(memcg);
    
    #ifdef CONFIG_MMU
    
    /* Handlers for move charge at task migration. */
    
    #define PRECHARGE_COUNT_AT_ONCE	256
    static int mem_cgroup_do_precharge(unsigned long count)
    
    	int ret = 0;
    	int batch_count = PRECHARGE_COUNT_AT_ONCE;
    
    	struct mem_cgroup *memcg = mc.to;
    
    	if (mem_cgroup_is_root(memcg)) {
    
    		mc.precharge += count;
    		/* we don't need css_get for root */
    		return ret;
    	}
    	/* try to charge at once */
    	if (count > 1) {
    		struct res_counter *dummy;
    		/*
    
    		 * "memcg" cannot be under rmdir() because we've already checked
    
    		 * by cgroup_lock_live_cgroup() that it is not removed and we
    		 * are still under the same cgroup_mutex. So we can postpone
    		 * css_get().
    		 */
    
    		if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
    
    		if (do_swap_account && res_counter_charge(&memcg->memsw,
    
    						PAGE_SIZE * count, &dummy)) {
    
    			res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
    
    			goto one_by_one;
    		}
    		mc.precharge += count;
    		return ret;
    	}
    one_by_one:
    	/* fall back to one by one charge */
    	while (count--) {
    		if (signal_pending(current)) {
    			ret = -EINTR;
    			break;
    		}
    		if (!batch_count--) {
    			batch_count = PRECHARGE_COUNT_AT_ONCE;
    			cond_resched();
    		}
    
    		ret = __mem_cgroup_try_charge(NULL,
    					GFP_KERNEL, 1, &memcg, false);
    
    			/* mem_cgroup_clear_mc() will do uncharge later */
    
     * get_mctgt_type - get target type of moving charge
    
     * @vma: the vma the pte to be checked belongs
     * @addr: the address corresponding to the pte to be checked
     * @ptent: the pte to be checked
    
     * @target: the pointer the target page or swap ent will be stored(can be NULL)
    
     *
     * Returns
     *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
     *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
     *     move charge. if @target is not NULL, the page is stored in target->page
     *     with extra refcnt got(Callers should handle it).
    
     *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
     *     target for charge migration. if @target is not NULL, the entry is stored
     *     in target->ent.
    
     *
     * Called with pte lock held.
     */
    union mc_target {
    	struct page	*page;
    
    	swp_entry_t	ent;
    
    	MC_TARGET_NONE = 0,
    
    	MC_TARGET_PAGE,
    
    	MC_TARGET_SWAP,
    
    static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
    						unsigned long addr, pte_t ptent)
    
    	struct page *page = vm_normal_page(vma, addr, ptent);
    
    	if (!page || !page_mapped(page))
    		return NULL;
    	if (PageAnon(page)) {
    		/* we don't move shared anon */
    
    			return NULL;
    
    	} else if (!move_file())
    		/* we ignore mapcount for file pages */
    
    		return NULL;
    	if (!get_page_unless_zero(page))
    		return NULL;
    
    	return page;
    }
    
    
    static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
    			unsigned long addr, pte_t ptent, swp_entry_t *entry)
    {
    	struct page *page = NULL;
    	swp_entry_t ent = pte_to_swp_entry(ptent);
    
    	if (!move_anon() || non_swap_entry(ent))
    		return NULL;
    
    	/*
    	 * Because lookup_swap_cache() updates some statistics counter,
    	 * we call find_get_page() with swapper_space directly.
    	 */
    
    	page = find_get_page(swap_address_space(ent), ent.val);
    
    	if (do_swap_account)
    		entry->val = ent.val;
    
    	return page;
    }
    
    #else
    static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
    			unsigned long addr, pte_t ptent, swp_entry_t *entry)
    {
    	return NULL;
    }
    #endif
    
    static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
    			unsigned long addr, pte_t ptent, swp_entry_t *entry)
    {
    	struct page *page = NULL;
    	struct address_space *mapping;
    	pgoff_t pgoff;
    
    	if (!vma->vm_file) /* anonymous vma */
    		return NULL;
    	if (!move_file())
    		return NULL;
    
    	mapping = vma->vm_file->f_mapping;
    	if (pte_none(ptent))
    		pgoff = linear_page_index(vma, addr);
    	else /* pte_file(ptent) is true */
    		pgoff = pte_to_pgoff(ptent);
    
    	/* page is moved even if it's not RSS of this task(page-faulted). */
    
    	page = find_get_page(mapping, pgoff);
    
    #ifdef CONFIG_SWAP
    	/* shmem/tmpfs may report page out on swap: account for that too. */
    	if (radix_tree_exceptional_entry(page)) {
    		swp_entry_t swap = radix_to_swp_entry(page);
    
    		if (do_swap_account)
    
    		page = find_get_page(swap_address_space(swap), swap.val);
    
    static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
    
    		unsigned long addr, pte_t ptent, union mc_target *target)
    {
    	struct page *page = NULL;
    	struct page_cgroup *pc;
    
    	enum mc_target_type ret = MC_TARGET_NONE;
    
    	swp_entry_t ent = { .val = 0 };
    
    	if (pte_present(ptent))
    		page = mc_handle_present_pte(vma, addr, ptent);
    	else if (is_swap_pte(ptent))
    		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
    
    	else if (pte_none(ptent) || pte_file(ptent))
    		page = mc_handle_file_pte(vma, addr, ptent, &ent);
    
    
    	if (!page && !ent.val)
    
    	if (page) {
    		pc = lookup_page_cgroup(page);
    		/*
    		 * Do only loose check w/o page_cgroup lock.
    		 * mem_cgroup_move_account() checks the pc is valid or not under
    		 * the lock.
    		 */
    		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
    			ret = MC_TARGET_PAGE;
    			if (target)
    				target->page = page;
    		}
    		if (!ret || !target)
    			put_page(page);
    	}
    
    	/* There is a swap entry and a page doesn't exist or isn't charged */
    	if (ent.val && !ret &&
    
    			css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
    
    		ret = MC_TARGET_SWAP;
    		if (target)
    			target->ent = ent;
    
    #ifdef CONFIG_TRANSPARENT_HUGEPAGE
    /*
     * We don't consider swapping or file mapped pages because THP does not
     * support them for now.
     * Caller should make sure that pmd_trans_huge(pmd) is true.
     */
    static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
    		unsigned long addr, pmd_t pmd, union mc_target *target)
    {
    	struct page *page = NULL;
    	struct page_cgroup *pc;
    	enum mc_target_type ret = MC_TARGET_NONE;
    
    	page = pmd_page(pmd);
    	VM_BUG_ON(!page || !PageHead(page));
    	if (!move_anon())
    		return ret;
    	pc = lookup_page_cgroup(page);
    	if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
    		ret = MC_TARGET_PAGE;
    		if (target) {
    			get_page(page);
    			target->page = page;
    		}
    	}
    	return ret;
    }
    #else
    static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
    		unsigned long addr, pmd_t pmd, union mc_target *target)
    {
    	return MC_TARGET_NONE;
    }
    #endif
    
    
    static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
    					unsigned long addr, unsigned long end,
    					struct mm_walk *walk)
    {
    	struct vm_area_struct *vma = walk->private;
    	pte_t *pte;
    	spinlock_t *ptl;
    
    
    	if (pmd_trans_huge_lock(pmd, vma) == 1) {
    		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
    			mc.precharge += HPAGE_PMD_NR;
    		spin_unlock(&vma->vm_mm->page_table_lock);
    
    	if (pmd_trans_unstable(pmd))
    		return 0;
    
    	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
    	for (; addr != end; pte++, addr += PAGE_SIZE)
    
    		if (get_mctgt_type(vma, addr, *pte, NULL))
    
    			mc.precharge++;	/* increment precharge temporarily */
    	pte_unmap_unlock(pte - 1, ptl);
    	cond_resched();
    
    
    static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
    {
    	unsigned long precharge;
    	struct vm_area_struct *vma;
    
    
    	down_read(&mm->mmap_sem);
    
    	for (vma = mm->mmap; vma; vma = vma->vm_next) {
    		struct mm_walk mem_cgroup_count_precharge_walk = {
    			.pmd_entry = mem_cgroup_count_precharge_pte_range,
    			.mm = mm,
    			.private = vma,
    		};
    		if (is_vm_hugetlb_page(vma))
    			continue;
    		walk_page_range(vma->vm_start, vma->vm_end,
    					&mem_cgroup_count_precharge_walk);
    	}
    
    	up_read(&mm->mmap_sem);
    
    
    	precharge = mc.precharge;
    	mc.precharge = 0;
    
    	return precharge;
    }
    
    static int mem_cgroup_precharge_mc(struct mm_struct *mm)
    {
    
    	unsigned long precharge = mem_cgroup_count_precharge(mm);
    
    	VM_BUG_ON(mc.moving_task);
    	mc.moving_task = current;
    	return mem_cgroup_do_precharge(precharge);
    
    /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
    static void __mem_cgroup_clear_mc(void)
    
    	struct mem_cgroup *from = mc.from;
    	struct mem_cgroup *to = mc.to;
    
    	/* we must uncharge all the leftover precharges from mc.to */
    
    	if (mc.precharge) {
    		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
    		mc.precharge = 0;
    	}
    	/*
    	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
    	 * we must uncharge here.
    	 */
    	if (mc.moved_charge) {
    		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
    		mc.moved_charge = 0;
    
    	/* we must fixup refcnts and charges */
    	if (mc.moved_swap) {
    		/* uncharge swap account from the old cgroup */
    		if (!mem_cgroup_is_root(mc.from))
    			res_counter_uncharge(&mc.from->memsw,
    						PAGE_SIZE * mc.moved_swap);
    
    
    		for (i = 0; i < mc.moved_swap; i++)
    			css_put(&mc.from->css);
    
    
    		if (!mem_cgroup_is_root(mc.to)) {
    			/*
    			 * we charged both to->res and to->memsw, so we should
    			 * uncharge to->res.
    			 */
    			res_counter_uncharge(&mc.to->res,
    						PAGE_SIZE * mc.moved_swap);
    		}
    
    		/* we've already done css_get(mc.to) */
    
    	memcg_oom_recover(from);
    	memcg_oom_recover(to);
    	wake_up_all(&mc.waitq);
    }
    
    static void mem_cgroup_clear_mc(void)
    {
    	struct mem_cgroup *from = mc.from;
    
    	/*
    	 * we must clear moving_task before waking up waiters at the end of
    	 * task migration.
    	 */
    	mc.moving_task = NULL;
    	__mem_cgroup_clear_mc();
    
    	spin_lock(&mc.lock);
    
    	mc.from = NULL;
    	mc.to = NULL;
    
    	spin_unlock(&mc.lock);
    
    static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
    
    				 struct cgroup_taskset *tset)
    
    	struct task_struct *p = cgroup_taskset_first(tset);
    
    	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
    
    	unsigned long move_charge_at_immigrate;
    
    	/*
    	 * We are now commited to this value whatever it is. Changes in this
    	 * tunable will only affect upcoming migrations, not the current one.
    	 * So we need to save it, and keep it going.
    	 */
    	move_charge_at_immigrate  = memcg->move_charge_at_immigrate;
    	if (move_charge_at_immigrate) {
    
    		struct mm_struct *mm;
    		struct mem_cgroup *from = mem_cgroup_from_task(p);
    
    
    		VM_BUG_ON(from == memcg);
    
    
    		mm = get_task_mm(p);
    		if (!mm)
    			return 0;
    		/* We move charges only when we move a owner of the mm */
    
    		if (mm->owner == p) {
    			VM_BUG_ON(mc.from);
    			VM_BUG_ON(mc.to);
    			VM_BUG_ON(mc.precharge);
    
    			VM_BUG_ON(mc.moved_charge);
    
    			VM_BUG_ON(mc.moved_swap);
    
    			spin_lock(&mc.lock);
    
    			mc.from = from;
    
    			mc.to = memcg;
    
    			mc.immigrate_flags = move_charge_at_immigrate;
    
    			spin_unlock(&mc.lock);
    
    			/* We set mc.moving_task later */
    
    
    			ret = mem_cgroup_precharge_mc(mm);
    			if (ret)
    				mem_cgroup_clear_mc();
    
    static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
    
    				     struct cgroup_taskset *tset)
    
    	mem_cgroup_clear_mc();
    
    static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
    				unsigned long addr, unsigned long end,
    				struct mm_walk *walk)
    
    	int ret = 0;
    	struct vm_area_struct *vma = walk->private;
    	pte_t *pte;
    	spinlock_t *ptl;
    
    	enum mc_target_type target_type;
    	union mc_target target;
    	struct page *page;
    	struct page_cgroup *pc;
    
    	/*
    	 * We don't take compound_lock() here but no race with splitting thp
    	 * happens because:
    	 *  - if pmd_trans_huge_lock() returns 1, the relevant thp is not
    	 *    under splitting, which means there's no concurrent thp split,
    	 *  - if another thread runs into split_huge_page() just after we
    	 *    entered this if-block, the thread must wait for page table lock
    	 *    to be unlocked in __split_huge_page_splitting(), where the main
    	 *    part of thp split is not executed yet.
    	 */
    	if (pmd_trans_huge_lock(pmd, vma) == 1) {
    
    		if (mc.precharge < HPAGE_PMD_NR) {
    
    			spin_unlock(&vma->vm_mm->page_table_lock);
    			return 0;
    		}
    		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
    		if (target_type == MC_TARGET_PAGE) {
    			page = target.page;
    			if (!isolate_lru_page(page)) {
    				pc = lookup_page_cgroup(page);
    				if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
    
    					mc.precharge -= HPAGE_PMD_NR;
    					mc.moved_charge += HPAGE_PMD_NR;
    				}
    				putback_lru_page(page);
    			}
    			put_page(page);
    		}
    		spin_unlock(&vma->vm_mm->page_table_lock);
    
    	if (pmd_trans_unstable(pmd))
    		return 0;
    
    retry:
    	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
    	for (; addr != end; addr += PAGE_SIZE) {
    		pte_t ptent = *(pte++);
    
    		swp_entry_t ent;
    
    		switch (get_mctgt_type(vma, addr, ptent, &target)) {
    
    		case MC_TARGET_PAGE:
    			page = target.page;
    			if (isolate_lru_page(page))
    				goto put;
    			pc = lookup_page_cgroup(page);
    
    			if (!mem_cgroup_move_account(page, 1, pc,
    
    				mc.precharge--;
    
    				/* we uncharge from mc.from later. */
    				mc.moved_charge++;
    
    			}
    			putback_lru_page(page);
    
    put:			/* get_mctgt_type() gets the page */
    
    			put_page(page);
    			break;
    
    		case MC_TARGET_SWAP:
    			ent = target.ent;
    
    			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
    
    				mc.precharge--;
    
    				/* we fixup refcnts and charges later. */
    				mc.moved_swap++;
    			}
    
    		default:
    			break;
    		}
    	}
    	pte_unmap_unlock(pte - 1, ptl);
    	cond_resched();
    
    	if (addr != end) {
    		/*
    		 * We have consumed all precharges we got in can_attach().
    		 * We try charge one by one, but don't do any additional
    		 * charges to mc.to if we have failed in charge once in attach()
    		 * phase.
    		 */
    
    		ret = mem_cgroup_do_precharge(1);
    
    		if (!ret)
    			goto retry;
    	}
    
    	return ret;
    }
    
    static void mem_cgroup_move_charge(struct mm_struct *mm)
    {
    	struct vm_area_struct *vma;
    
    	lru_add_drain_all();
    
    retry:
    	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
    		/*
    		 * Someone who are holding the mmap_sem might be waiting in
    		 * waitq. So we cancel all extra charges, wake up all waiters,
    		 * and retry. Because we cancel precharges, we might not be able
    		 * to move enough charges, but moving charge is a best-effort
    		 * feature anyway, so it wouldn't be a big problem.
    		 */
    		__mem_cgroup_clear_mc();
    		cond_resched();
    		goto retry;
    	}
    
    	for (vma = mm->mmap; vma; vma = vma->vm_next) {
    		int ret;
    		struct mm_walk mem_cgroup_move_charge_walk = {
    			.pmd_entry = mem_cgroup_move_charge_pte_range,
    			.mm = mm,
    			.private = vma,
    		};
    		if (is_vm_hugetlb_page(vma))
    			continue;
    		ret = walk_page_range(vma->vm_start, vma->vm_end,
    						&mem_cgroup_move_charge_walk);
    		if (ret)
    			/*
    			 * means we have consumed all precharges and failed in
    			 * doing additional charge. Just abandon here.
    			 */
    			break;
    	}
    
    	up_read(&mm->mmap_sem);
    
    static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
    
    				 struct cgroup_taskset *tset)
    
    	struct task_struct *p = cgroup_taskset_first(tset);
    
    	struct mm_struct *mm = get_task_mm(p);
    
    		if (mc.to)
    			mem_cgroup_move_charge(mm);
    
    	if (mc.to)
    		mem_cgroup_clear_mc();
    
    #else	/* !CONFIG_MMU */
    
    static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
    
    				 struct cgroup_taskset *tset)
    
    static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
    
    				     struct cgroup_taskset *tset)
    
    static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
    
    				 struct cgroup_taskset *tset)
    
    /*
     * Cgroup retains root cgroups across [un]mount cycles making it necessary
     * to verify sane_behavior flag on each mount attempt.
     */
    
    static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
    
    {
    	/*
    	 * use_hierarchy is forced with sane_behavior.  cgroup core
    	 * guarantees that @root doesn't have any children, so turning it
    	 * on for the root memcg is enough.
    	 */
    
    	if (cgroup_sane_behavior(root_css->cgroup))
    		mem_cgroup_from_css(root_css)->use_hierarchy = true;
    
    struct cgroup_subsys mem_cgroup_subsys = {
    	.name = "memory",
    	.subsys_id = mem_cgroup_subsys_id,
    
    	.css_online = mem_cgroup_css_online,
    
    	.css_offline = mem_cgroup_css_offline,
    	.css_free = mem_cgroup_css_free,
    
    	.can_attach = mem_cgroup_can_attach,
    	.cancel_attach = mem_cgroup_cancel_attach,
    
    	.attach = mem_cgroup_move_task,
    
    	.bind = mem_cgroup_bind,
    
    	.base_cftypes = mem_cgroup_files,