Newer
Older

KAMEZAWA Hiroyuki
committed
return 1;

KAMEZAWA Hiroyuki
committed

KAMEZAWA Hiroyuki
committed
mem->info.nodeinfo[node] = pn;
memset(pn, 0, sizeof(*pn));

KAMEZAWA Hiroyuki
committed
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
mz = &pn->zoneinfo[zone];
INIT_LIST_HEAD(&mz->active_list);
INIT_LIST_HEAD(&mz->inactive_list);

KAMEZAWA Hiroyuki
committed
spin_lock_init(&mz->lru_lock);

KAMEZAWA Hiroyuki
committed
}

KAMEZAWA Hiroyuki
committed
return 0;
}

KAMEZAWA Hiroyuki
committed
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
kfree(mem->info.nodeinfo[node]);
}
static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
struct mem_cgroup *mem;

KAMEZAWA Hiroyuki
committed
int node;
if (unlikely((cont->parent) == NULL)) {
mem = &init_mem_cgroup;
init_mm.mem_cgroup = mem;
} else
mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
if (mem == NULL)
return ERR_PTR(-ENOMEM);

KAMEZAWA Hiroyuki
committed

KAMEZAWA Hiroyuki
committed
memset(&mem->info, 0, sizeof(mem->info));
for_each_node_state(node, N_POSSIBLE)
if (alloc_mem_cgroup_per_zone_info(mem, node))
goto free_out;

KAMEZAWA Hiroyuki
committed
free_out:
for_each_node_state(node, N_POSSIBLE)

KAMEZAWA Hiroyuki
committed
free_mem_cgroup_per_zone_info(mem, node);

KAMEZAWA Hiroyuki
committed
if (cont->parent != NULL)
kfree(mem);
return ERR_PTR(-ENOMEM);
static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{
struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
mem_cgroup_force_empty(mem);
}
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{

KAMEZAWA Hiroyuki
committed
int node;
struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
for_each_node_state(node, N_POSSIBLE)

KAMEZAWA Hiroyuki
committed
free_mem_cgroup_per_zone_info(mem, node);

KAMEZAWA Hiroyuki
committed
kfree(mem_cgroup_from_cont(cont));
}
static int mem_cgroup_populate(struct cgroup_subsys *ss,
struct cgroup *cont)
{
return cgroup_add_files(cont, ss, mem_cgroup_files,
ARRAY_SIZE(mem_cgroup_files));
}
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct cgroup *cont,
struct cgroup *old_cont,
struct task_struct *p)
{
struct mm_struct *mm;
struct mem_cgroup *mem, *old_mem;
mm = get_task_mm(p);
if (mm == NULL)
return;
mem = mem_cgroup_from_cont(cont);
old_mem = mem_cgroup_from_cont(old_cont);
if (mem == old_mem)
goto out;
/*
* Only thread group leaders are allowed to migrate, the mm_struct is
* in effect owned by the leader
*/
if (p->tgid != p->pid)
goto out;
css_get(&mem->css);
rcu_assign_pointer(mm->mem_cgroup, mem);
css_put(&old_mem->css);
out:
mmput(mm);
}
struct cgroup_subsys mem_cgroup_subsys = {
.name = "memory",
.subsys_id = mem_cgroup_subsys_id,
.create = mem_cgroup_create,
.pre_destroy = mem_cgroup_pre_destroy,
.destroy = mem_cgroup_destroy,
.populate = mem_cgroup_populate,

KAMEZAWA Hiroyuki
committed
.early_init = 0,