Newer
Older
for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
s64 val;
val = mem_cgroup_read_stat(stat, i);
val *= mem_cgroup_stat_desc[i].unit;
cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);

KAMEZAWA Hiroyuki
committed
/* showing # of active pages */
{
unsigned long active_anon, inactive_anon;
unsigned long active_file, inactive_file;
inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
LRU_INACTIVE_ANON);
active_anon = mem_cgroup_get_all_zonestat(mem_cont,
LRU_ACTIVE_ANON);
inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
LRU_INACTIVE_FILE);
active_file = mem_cgroup_get_all_zonestat(mem_cont,
LRU_ACTIVE_FILE);
unevictable = mem_cgroup_get_all_zonestat(mem_cont,
LRU_UNEVICTABLE);
cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);

KAMEZAWA Hiroyuki
committed
}
return 0;
}
static struct cftype mem_cgroup_files[] = {
{
.read_u64 = mem_cgroup_read,
{
.name = "max_usage_in_bytes",
.private = RES_MAX_USAGE,
.read_u64 = mem_cgroup_read,
},

Paul Menage
committed
.write_string = mem_cgroup_write,
.read_u64 = mem_cgroup_read,
},
{
.name = "failcnt",
.private = RES_FAILCNT,
.read_u64 = mem_cgroup_read,

KAMEZAWA Hiroyuki
committed
{
.name = "force_empty",
.trigger = mem_force_empty_write,

KAMEZAWA Hiroyuki
committed
},
{
.name = "stat",
.read_map = mem_control_stat_show,

KAMEZAWA Hiroyuki
committed
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
struct mem_cgroup_per_node *pn;

KAMEZAWA Hiroyuki
committed
struct mem_cgroup_per_zone *mz;

KAMEZAWA Hiroyuki
committed
/*
* This routine is called against possible nodes.
* But it's BUG to call kmalloc() against offline node.
*
* TODO: this routine can waste much memory for nodes which will
* never be onlined. It's better to use memory hotplug callback
* function.
*/
if (!node_state(node, N_NORMAL_MEMORY))
tmp = -1;
pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);

KAMEZAWA Hiroyuki
committed
if (!pn)
return 1;

KAMEZAWA Hiroyuki
committed

KAMEZAWA Hiroyuki
committed
mem->info.nodeinfo[node] = pn;
memset(pn, 0, sizeof(*pn));

KAMEZAWA Hiroyuki
committed
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
mz = &pn->zoneinfo[zone];

KAMEZAWA Hiroyuki
committed
spin_lock_init(&mz->lru_lock);
for_each_lru(l)
INIT_LIST_HEAD(&mz->lists[l]);

KAMEZAWA Hiroyuki
committed
}

KAMEZAWA Hiroyuki
committed
return 0;
}

KAMEZAWA Hiroyuki
committed
static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
kfree(mem->info.nodeinfo[node]);
}
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
static struct mem_cgroup *mem_cgroup_alloc(void)
{
struct mem_cgroup *mem;
if (sizeof(*mem) < PAGE_SIZE)
mem = kmalloc(sizeof(*mem), GFP_KERNEL);
else
mem = vmalloc(sizeof(*mem));
if (mem)
memset(mem, 0, sizeof(*mem));
return mem;
}
static void mem_cgroup_free(struct mem_cgroup *mem)
{
if (sizeof(*mem) < PAGE_SIZE)
kfree(mem);
else
vfree(mem);
}
static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
struct mem_cgroup *mem;

KAMEZAWA Hiroyuki
committed
int node;
if (unlikely((cont->parent) == NULL)) {
page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
} else {
mem = mem_cgroup_alloc();
if (!mem)
return ERR_PTR(-ENOMEM);

KAMEZAWA Hiroyuki
committed

KAMEZAWA Hiroyuki
committed
for_each_node_state(node, N_POSSIBLE)
if (alloc_mem_cgroup_per_zone_info(mem, node))
goto free_out;

KAMEZAWA Hiroyuki
committed
free_out:
for_each_node_state(node, N_POSSIBLE)

KAMEZAWA Hiroyuki
committed
free_mem_cgroup_per_zone_info(mem, node);

KAMEZAWA Hiroyuki
committed
if (cont->parent != NULL)
mem_cgroup_free(mem);
return ERR_PTR(-ENOMEM);
static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{
struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
mem_cgroup_force_empty(mem);
}
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{

KAMEZAWA Hiroyuki
committed
int node;
struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
for_each_node_state(node, N_POSSIBLE)

KAMEZAWA Hiroyuki
committed
free_mem_cgroup_per_zone_info(mem, node);

KAMEZAWA Hiroyuki
committed
mem_cgroup_free(mem_cgroup_from_cont(cont));
}
static int mem_cgroup_populate(struct cgroup_subsys *ss,
struct cgroup *cont)
{
return cgroup_add_files(cont, ss, mem_cgroup_files,
ARRAY_SIZE(mem_cgroup_files));
}
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct cgroup *cont,
struct cgroup *old_cont,
struct task_struct *p)
{
struct mm_struct *mm;
struct mem_cgroup *mem, *old_mem;
mm = get_task_mm(p);
if (mm == NULL)
return;
mem = mem_cgroup_from_cont(cont);
old_mem = mem_cgroup_from_cont(old_cont);
/*
* Only thread group leaders are allowed to migrate, the mm_struct is
* in effect owned by the leader
*/
if (!thread_group_leader(p))
goto out;
out:
mmput(mm);
}
struct cgroup_subsys mem_cgroup_subsys = {
.name = "memory",
.subsys_id = mem_cgroup_subsys_id,
.create = mem_cgroup_create,
.pre_destroy = mem_cgroup_pre_destroy,
.destroy = mem_cgroup_destroy,
.populate = mem_cgroup_populate,

KAMEZAWA Hiroyuki
committed
.early_init = 0,