Skip to content
Snippets Groups Projects
task_mmu.c 27.3 KiB
Newer Older
  • Learn to ignore specific revisions
  • }
    
    #else
    static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
    		unsigned long addr, unsigned long end, struct mm_walk *walk)
    {
    	return 0;
    }
    #endif
    
    /*
     * Display pages allocated per node and memory policy via /proc.
     */
    static int show_numa_map(struct seq_file *m, void *v)
    {
    
    	struct numa_maps_private *numa_priv = m->private;
    	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
    
    	struct vm_area_struct *vma = v;
    
    	struct numa_maps *md = &numa_priv->md;
    
    	struct file *file = vma->vm_file;
    	struct mm_struct *mm = vma->vm_mm;
    	struct mm_walk walk = {};
    	struct mempolicy *pol;
    	int n;
    	char buffer[50];
    
    	if (!mm)
    		return 0;
    
    
    	/* Ensure we start with an empty set of numa_maps statistics. */
    	memset(md, 0, sizeof(*md));
    
    
    	md->vma = vma;
    
    	walk.hugetlb_entry = gather_hugetbl_stats;
    	walk.pmd_entry = gather_pte_stats;
    	walk.private = md;
    	walk.mm = mm;
    
    
    	pol = get_vma_policy(proc_priv->task, vma, vma->vm_start);
    
    	mpol_to_str(buffer, sizeof(buffer), pol, 0);
    	mpol_cond_put(pol);
    
    	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
    
    	if (file) {
    		seq_printf(m, " file=");
    		seq_path(m, &file->f_path, "\n\t= ");
    	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
    		seq_printf(m, " heap");
    	} else if (vma->vm_start <= mm->start_stack &&
    			vma->vm_end >= mm->start_stack) {
    		seq_printf(m, " stack");
    	}
    
    
    	if (is_vm_hugetlb_page(vma))
    		seq_printf(m, " huge");
    
    
    	walk_page_range(vma->vm_start, vma->vm_end, &walk);
    
    	if (!md->pages)
    		goto out;
    
    	if (md->anon)
    		seq_printf(m, " anon=%lu", md->anon);
    
    	if (md->dirty)
    		seq_printf(m, " dirty=%lu", md->dirty);
    
    	if (md->pages != md->anon && md->pages != md->dirty)
    		seq_printf(m, " mapped=%lu", md->pages);
    
    	if (md->mapcount_max > 1)
    		seq_printf(m, " mapmax=%lu", md->mapcount_max);
    
    	if (md->swapcache)
    		seq_printf(m, " swapcache=%lu", md->swapcache);
    
    	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
    		seq_printf(m, " active=%lu", md->active);
    
    	if (md->writeback)
    		seq_printf(m, " writeback=%lu", md->writeback);
    
    	for_each_node_state(n, N_HIGH_MEMORY)
    		if (md->node[n])
    			seq_printf(m, " N%d=%lu", n, md->node[n]);
    out:
    	seq_putc(m, '\n');
    
    	if (m->count < m->size)
    
    		m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
    
    static const struct seq_operations proc_pid_numa_maps_op = {
    
            .start  = m_start,
            .next   = m_next,
            .stop   = m_stop,
    
            .show   = show_numa_map,
    
    
    static int numa_maps_open(struct inode *inode, struct file *file)
    {
    
    	struct numa_maps_private *priv;
    	int ret = -ENOMEM;
    	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
    	if (priv) {
    		priv->proc_maps.pid = proc_pid(inode);
    		ret = seq_open(file, &proc_pid_numa_maps_op);
    		if (!ret) {
    			struct seq_file *m = file->private_data;
    			m->private = priv;
    		} else {
    			kfree(priv);
    		}
    	}
    	return ret;
    
    const struct file_operations proc_numa_maps_operations = {
    
    	.open		= numa_maps_open,
    	.read		= seq_read,
    	.llseek		= seq_lseek,
    
    	.release	= seq_release_private,
    
    #endif /* CONFIG_NUMA */