Skip to content
Snippets Groups Projects
Commit a0db00fc authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds
Browse files
parent 3a7951b4
No related merge requests found
...@@ -73,7 +73,7 @@ static int really_do_swap_account __initdata = 0; ...@@ -73,7 +73,7 @@ static int really_do_swap_account __initdata = 0;
#endif #endif
#else #else
#define do_swap_account (0) #define do_swap_account 0
#endif #endif
...@@ -112,9 +112,9 @@ enum mem_cgroup_events_target { ...@@ -112,9 +112,9 @@ enum mem_cgroup_events_target {
MEM_CGROUP_TARGET_NUMAINFO, MEM_CGROUP_TARGET_NUMAINFO,
MEM_CGROUP_NTARGETS, MEM_CGROUP_NTARGETS,
}; };
#define THRESHOLDS_EVENTS_TARGET (128) #define THRESHOLDS_EVENTS_TARGET 128
#define SOFTLIMIT_EVENTS_TARGET (1024) #define SOFTLIMIT_EVENTS_TARGET 1024
#define NUMAINFO_EVENTS_TARGET (1024) #define NUMAINFO_EVENTS_TARGET 1024
struct mem_cgroup_stat_cpu { struct mem_cgroup_stat_cpu {
long count[MEM_CGROUP_STAT_NSTATS]; long count[MEM_CGROUP_STAT_NSTATS];
...@@ -359,8 +359,8 @@ static bool move_file(void) ...@@ -359,8 +359,8 @@ static bool move_file(void)
* Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
* limit reclaim to prevent infinite loops, if they ever occur. * limit reclaim to prevent infinite loops, if they ever occur.
*/ */
#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100) #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2) #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
enum charge_type { enum charge_type {
MEM_CGROUP_CHARGE_TYPE_CACHE = 0, MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
...@@ -376,8 +376,8 @@ enum charge_type { ...@@ -376,8 +376,8 @@ enum charge_type {
#define _MEM (0) #define _MEM (0)
#define _MEMSWAP (1) #define _MEMSWAP (1)
#define _OOM_TYPE (2) #define _OOM_TYPE (2)
#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff) #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
#define MEMFILE_ATTR(val) ((val) & 0xffff) #define MEMFILE_ATTR(val) ((val) & 0xffff)
/* Used for OOM nofiier */ /* Used for OOM nofiier */
#define OOM_CONTROL (0) #define OOM_CONTROL (0)
...@@ -1987,7 +1987,7 @@ struct memcg_stock_pcp { ...@@ -1987,7 +1987,7 @@ struct memcg_stock_pcp {
unsigned int nr_pages; unsigned int nr_pages;
struct work_struct work; struct work_struct work;
unsigned long flags; unsigned long flags;
#define FLUSHING_CACHED_CHARGE (0) #define FLUSHING_CACHED_CHARGE 0
}; };
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
static DEFINE_MUTEX(percpu_charge_mutex); static DEFINE_MUTEX(percpu_charge_mutex);
...@@ -2542,7 +2542,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, ...@@ -2542,7 +2542,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MIGRATION)) #define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
/* /*
* Because tail pages are not marked as "used", set it. We're under * Because tail pages are not marked as "used", set it. We're under
* zone->lru_lock, 'splitting on pmd' and compound_lock. * zone->lru_lock, 'splitting on pmd' and compound_lock.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment