Commit 9af744d7 authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

lib/show_mem.c: teach show_mem to work with the given nodemask

show_mem() allows to filter out node specific data which is irrelevant
to the allocation request via SHOW_MEM_FILTER_NODES.  The filtering is
done in skip_free_areas_node which skips all nodes which are not in the
mems_allowed of the current process.  This works most of the time as
expected because the nodemask shouldn't be outside of the allocating
task but there are some exceptions.  E.g.  memory hotplug might want to
request allocations from outside of the allowed nodes (see
new_node_page).

Get rid of this hardcoded behavior and push the allocation mask down the
show_mem path and use it instead of cpuset_current_mems_allowed.  NULL
nodemask is interpreted as cpuset_current_mems_allowed.

[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/20170117091543.25850-5-mhocko@kernel.orgSigned-off-by: 's avatarMichal Hocko <mhocko@suse.com>
Acked-by: 's avatarMel Gorman <mgorman@suse.de>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: 's avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: 's avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6d23f8a5
...@@ -916,7 +916,7 @@ cmds(struct pt_regs *excp) ...@@ -916,7 +916,7 @@ cmds(struct pt_regs *excp)
memzcan(); memzcan();
break; break;
case 'i': case 'i':
show_mem(0); show_mem(0, NULL);
break; break;
default: default:
termch = cmd; termch = cmd;
......
...@@ -82,7 +82,7 @@ static void prom_sync_me(void) ...@@ -82,7 +82,7 @@ static void prom_sync_me(void)
"nop\n\t" : : "r" (&trapbase)); "nop\n\t" : : "r" (&trapbase));
prom_printf("PROM SYNC COMMAND...\n"); prom_printf("PROM SYNC COMMAND...\n");
show_free_areas(0); show_free_areas(0, NULL);
if (!is_idle_task(current)) { if (!is_idle_task(current)) {
local_irq_enable(); local_irq_enable();
sys_sync(); sys_sync();
......
...@@ -914,7 +914,7 @@ static void ioc3_alloc_rings(struct net_device *dev) ...@@ -914,7 +914,7 @@ static void ioc3_alloc_rings(struct net_device *dev)
skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
if (!skb) { if (!skb) {
show_free_areas(0); show_free_areas(0, NULL);
continue; continue;
} }
......
...@@ -317,7 +317,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = { ...@@ -317,7 +317,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = {
static void sysrq_handle_showmem(int key) static void sysrq_handle_showmem(int key)
{ {
show_mem(0); show_mem(0, NULL);
} }
static struct sysrq_key_op sysrq_showmem_op = { static struct sysrq_key_op sysrq_showmem_op = {
.handler = sysrq_handle_showmem, .handler = sysrq_handle_showmem,
......
...@@ -572,7 +572,7 @@ static void fn_scroll_back(struct vc_data *vc) ...@@ -572,7 +572,7 @@ static void fn_scroll_back(struct vc_data *vc)
static void fn_show_mem(struct vc_data *vc) static void fn_show_mem(struct vc_data *vc)
{ {
show_mem(0); show_mem(0, NULL);
} }
static void fn_show_state(struct vc_data *vc) static void fn_show_state(struct vc_data *vc)
......
...@@ -1152,8 +1152,7 @@ extern void pagefault_out_of_memory(void); ...@@ -1152,8 +1152,7 @@ extern void pagefault_out_of_memory(void);
*/ */
#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */ #define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
extern void show_free_areas(unsigned int flags); extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
extern bool skip_free_areas_node(unsigned int flags, int nid);
int shmem_zero_setup(struct vm_area_struct *); int shmem_zero_setup(struct vm_area_struct *);
#ifdef CONFIG_SHMEM #ifdef CONFIG_SHMEM
...@@ -1934,7 +1933,7 @@ extern void setup_per_zone_wmarks(void); ...@@ -1934,7 +1933,7 @@ extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void); extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void); extern void mem_init(void);
extern void __init mmap_init(void); extern void __init mmap_init(void);
extern void show_mem(unsigned int flags); extern void show_mem(unsigned int flags, nodemask_t *nodemask);
extern long si_mem_available(void); extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val); extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid); extern void si_meminfo_node(struct sysinfo *val, int nid);
......
...@@ -9,13 +9,13 @@ ...@@ -9,13 +9,13 @@
#include <linux/quicklist.h> #include <linux/quicklist.h>
#include <linux/cma.h> #include <linux/cma.h>
void show_mem(unsigned int filter) void show_mem(unsigned int filter, nodemask_t *nodemask)
{ {
pg_data_t *pgdat; pg_data_t *pgdat;
unsigned long total = 0, reserved = 0, highmem = 0; unsigned long total = 0, reserved = 0, highmem = 0;
printk("Mem-Info:\n"); printk("Mem-Info:\n");
show_free_areas(filter); show_free_areas(filter, nodemask);
for_each_online_pgdat(pgdat) { for_each_online_pgdat(pgdat) {
unsigned long flags; unsigned long flags;
......
...@@ -1191,7 +1191,7 @@ error_free: ...@@ -1191,7 +1191,7 @@ error_free:
enomem: enomem:
pr_err("Allocation of length %lu from process %d (%s) failed\n", pr_err("Allocation of length %lu from process %d (%s) failed\n",
len, current->pid, current->comm); len, current->pid, current->comm);
show_free_areas(0); show_free_areas(0, NULL);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1412,13 +1412,13 @@ error_getting_vma: ...@@ -1412,13 +1412,13 @@ error_getting_vma:
kmem_cache_free(vm_region_jar, region); kmem_cache_free(vm_region_jar, region);
pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n", pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
len, current->pid); len, current->pid);
show_free_areas(0); show_free_areas(0, NULL);
return -ENOMEM; return -ENOMEM;
error_getting_region: error_getting_region:
pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n", pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
len, current->pid); len, current->pid);
show_free_areas(0); show_free_areas(0, NULL);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -417,7 +417,7 @@ static void dump_header(struct oom_control *oc, struct task_struct *p) ...@@ -417,7 +417,7 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
if (oc->memcg) if (oc->memcg)
mem_cgroup_print_oom_info(oc->memcg, p); mem_cgroup_print_oom_info(oc->memcg, p);
else else
show_mem(SHOW_MEM_FILTER_NODES); show_mem(SHOW_MEM_FILTER_NODES, nm);
if (sysctl_oom_dump_tasks) if (sysctl_oom_dump_tasks)
dump_tasks(oc->memcg, oc->nodemask); dump_tasks(oc->memcg, oc->nodemask);
} }
......
...@@ -3005,7 +3005,7 @@ static inline bool should_suppress_show_mem(void) ...@@ -3005,7 +3005,7 @@ static inline bool should_suppress_show_mem(void)
return ret; return ret;
} }
static void warn_alloc_show_mem(gfp_t gfp_mask) static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
{ {
unsigned int filter = SHOW_MEM_FILTER_NODES; unsigned int filter = SHOW_MEM_FILTER_NODES;
static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1); static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
...@@ -3025,7 +3025,7 @@ static void warn_alloc_show_mem(gfp_t gfp_mask) ...@@ -3025,7 +3025,7 @@ static void warn_alloc_show_mem(gfp_t gfp_mask)
if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
filter &= ~SHOW_MEM_FILTER_NODES; filter &= ~SHOW_MEM_FILTER_NODES;
show_mem(filter); show_mem(filter, nodemask);
} }
void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
...@@ -3052,7 +3052,7 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) ...@@ -3052,7 +3052,7 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
cpuset_print_current_mems_allowed(); cpuset_print_current_mems_allowed();
dump_stack(); dump_stack();
warn_alloc_show_mem(gfp_mask); warn_alloc_show_mem(gfp_mask, nm);
} }
static inline struct page * static inline struct page *
...@@ -4274,20 +4274,20 @@ void si_meminfo_node(struct sysinfo *val, int nid) ...@@ -4274,20 +4274,20 @@ void si_meminfo_node(struct sysinfo *val, int nid)
* Determine whether the node should be displayed or not, depending on whether * Determine whether the node should be displayed or not, depending on whether
* SHOW_MEM_FILTER_NODES was passed to show_free_areas(). * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
*/ */
bool skip_free_areas_node(unsigned int flags, int nid) static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
{ {
bool ret = false;
unsigned int cpuset_mems_cookie;
if (!(flags & SHOW_MEM_FILTER_NODES)) if (!(flags & SHOW_MEM_FILTER_NODES))
goto out; return false;
do { /*
cpuset_mems_cookie = read_mems_allowed_begin(); * no node mask - aka implicit memory numa policy. Do not bother with
ret = !node_isset(nid, cpuset_current_mems_allowed); * the synchronization - read_mems_allowed_begin - because we do not
} while (read_mems_allowed_retry(cpuset_mems_cookie)); * have to be precise here.
out: */
return ret; if (!nodemask)
nodemask = &cpuset_current_mems_allowed;
return !node_isset(nid, *nodemask);
} }
#define K(x) ((x) << (PAGE_SHIFT-10)) #define K(x) ((x) << (PAGE_SHIFT-10))
...@@ -4328,7 +4328,7 @@ static void show_migration_types(unsigned char type) ...@@ -4328,7 +4328,7 @@ static void show_migration_types(unsigned char type)
* SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
* cpuset. * cpuset.
*/ */
void show_free_areas(unsigned int filter) void show_free_areas(unsigned int filter, nodemask_t *nodemask)
{ {
unsigned long free_pcp = 0; unsigned long free_pcp = 0;
int cpu; int cpu;
...@@ -4336,7 +4336,7 @@ void show_free_areas(unsigned int filter) ...@@ -4336,7 +4336,7 @@ void show_free_areas(unsigned int filter)
pg_data_t *pgdat; pg_data_t *pgdat;
for_each_populated_zone(zone) { for_each_populated_zone(zone) {
if (skip_free_areas_node(filter, zone_to_nid(zone))) if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
continue; continue;
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
...@@ -4370,7 +4370,7 @@ void show_free_areas(unsigned int filter) ...@@ -4370,7 +4370,7 @@ void show_free_areas(unsigned int filter)
global_page_state(NR_FREE_CMA_PAGES)); global_page_state(NR_FREE_CMA_PAGES));
for_each_online_pgdat(pgdat) { for_each_online_pgdat(pgdat) {
if (skip_free_areas_node(filter, pgdat->node_id)) if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
continue; continue;
printk("Node %d" printk("Node %d"
...@@ -4422,7 +4422,7 @@ void show_free_areas(unsigned int filter) ...@@ -4422,7 +4422,7 @@ void show_free_areas(unsigned int filter)
for_each_populated_zone(zone) { for_each_populated_zone(zone) {
int i; int i;
if (skip_free_areas_node(filter, zone_to_nid(zone))) if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
continue; continue;
free_pcp = 0; free_pcp = 0;
...@@ -4487,7 +4487,7 @@ void show_free_areas(unsigned int filter) ...@@ -4487,7 +4487,7 @@ void show_free_areas(unsigned int filter)
unsigned long nr[MAX_ORDER], flags, total = 0; unsigned long nr[MAX_ORDER], flags, total = 0;
unsigned char types[MAX_ORDER]; unsigned char types[MAX_ORDER];
if (skip_free_areas_node(filter, zone_to_nid(zone))) if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
continue; continue;
show_node(zone); show_node(zone);
printk(KERN_CONT "%s: ", zone->name); printk(KERN_CONT "%s: ", zone->name);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment