cpuset.c 77.3 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6
/*
 *  kernel/cpuset.c
 *
 *  Processor and Memory placement constraints for sets of tasks.
 *
 *  Copyright (C) 2003 BULL SA.
Paul Jackson's avatar
Paul Jackson committed
7
 *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
8
 *  Copyright (C) 2006 Google, Inc
Linus Torvalds's avatar
Linus Torvalds committed
9 10 11 12
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *
13
 *  2003-10-10 Written by Simon Derr.
Linus Torvalds's avatar
Linus Torvalds committed
14
 *  2003-10-22 Updates by Stephen Hemminger.
15
 *  2004 May-July Rework by Paul Jackson.
16
 *  2006 Rework by Paul Menage to use generic cgroups
17 18
 *  2008 Rework of the scheduler domains and CPU hotplug handling
 *       by Max Krasnyansky
Linus Torvalds's avatar
Linus Torvalds committed
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpuset.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/list.h>
37
#include <linux/mempolicy.h>
Linus Torvalds's avatar
Linus Torvalds committed
38
#include <linux/mm.h>
39
#include <linux/memory.h>
40
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
41 42 43 44
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
45
#include <linux/rcupdate.h>
Linus Torvalds's avatar
Linus Torvalds committed
46 47
#include <linux/sched.h>
#include <linux/seq_file.h>
48
#include <linux/security.h>
Linus Torvalds's avatar
Linus Torvalds committed
49 50 51 52 53 54 55 56 57
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/backing-dev.h>
#include <linux/sort.h>

#include <asm/uaccess.h>
Arun Sharma's avatar
Arun Sharma committed
58
#include <linux/atomic.h>
59
#include <linux/mutex.h>
60 61
#include <linux/workqueue.h>
#include <linux/cgroup.h>
62
#include <linux/wait.h>
Linus Torvalds's avatar
Linus Torvalds committed
63

64
struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE;
65

66 67 68 69 70 71 72 73 74
/* See "Frequency meter" comments, below. */

struct fmeter {
	int cnt;		/* unprocessed events count */
	int val;		/* most recent output value */
	time_t time;		/* clock (secs) when val computed */
	spinlock_t lock;	/* guards read or write of above */
};

Linus Torvalds's avatar
Linus Torvalds committed
75
struct cpuset {
76 77
	struct cgroup_subsys_state css;

Linus Torvalds's avatar
Linus Torvalds committed
78
	unsigned long flags;		/* "unsigned long" so bitops work */
79

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
	/*
	 * On default hierarchy:
	 *
	 * The user-configured masks can only be changed by writing to
	 * cpuset.cpus and cpuset.mems, and won't be limited by the
	 * parent masks.
	 *
	 * The effective masks is the real masks that apply to the tasks
	 * in the cpuset. They may be changed if the configured masks are
	 * changed or hotplug happens.
	 *
	 * effective_mask == configured_mask & parent's effective_mask,
	 * and if it ends up empty, it will inherit the parent's mask.
	 *
	 *
	 * On legacy hierachy:
	 *
	 * The user-configured masks are always the same with effective masks.
	 */

100 101 102 103 104 105 106
	/* user-configured CPUs and Memory Nodes allow to tasks */
	cpumask_var_t cpus_allowed;
	nodemask_t mems_allowed;

	/* effective CPUs and Memory Nodes allow to tasks */
	cpumask_var_t effective_cpus;
	nodemask_t effective_mems;
Linus Torvalds's avatar
Linus Torvalds committed
107

108 109 110 111 112 113 114 115 116 117 118 119
	/*
	 * This is old Memory Nodes tasks took on.
	 *
	 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
	 * - A new cpuset's old_mems_allowed is initialized when some
	 *   task is moved into it.
	 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
	 *   cpuset.mems_allowed and have tasks' nodemask updated, and
	 *   then old_mems_allowed is updated to mems_allowed.
	 */
	nodemask_t old_mems_allowed;

120
	struct fmeter fmeter;		/* memory_pressure filter */
Paul Jackson's avatar
Paul Jackson committed
121

122 123 124 125 126 127
	/*
	 * Tasks are being attached to this cpuset.  Used to prevent
	 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
	 */
	int attach_in_progress;

Paul Jackson's avatar
Paul Jackson committed
128 129
	/* partition number for rebuild_sched_domains() */
	int pn;
130

131 132
	/* for custom sched domain */
	int relax_domain_level;
Linus Torvalds's avatar
Linus Torvalds committed
133 134
};

135
static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
136
{
137
	return css ? container_of(css, struct cpuset, css) : NULL;
138 139 140 141 142
}

/* Retrieve the cpuset for a task */
static inline struct cpuset *task_cs(struct task_struct *task)
{
143
	return css_cs(task_css(task, cpuset_cgrp_id));
144 145
}

146
static inline struct cpuset *parent_cs(struct cpuset *cs)
Tejun Heo's avatar
Tejun Heo committed
147
{
Tejun Heo's avatar
Tejun Heo committed
148
	return css_cs(cs->css.parent);
Tejun Heo's avatar
Tejun Heo committed
149 150
}

151 152 153 154 155 156 157 158 159 160 161 162 163
#ifdef CONFIG_NUMA
static inline bool task_has_mempolicy(struct task_struct *task)
{
	return task->mempolicy;
}
#else
static inline bool task_has_mempolicy(struct task_struct *task)
{
	return false;
}
#endif


Linus Torvalds's avatar
Linus Torvalds committed
164 165
/* bits in struct cpuset flags field */
typedef enum {
Tejun Heo's avatar
Tejun Heo committed
166
	CS_ONLINE,
Linus Torvalds's avatar
Linus Torvalds committed
167 168
	CS_CPU_EXCLUSIVE,
	CS_MEM_EXCLUSIVE,
169
	CS_MEM_HARDWALL,
170
	CS_MEMORY_MIGRATE,
Paul Jackson's avatar
Paul Jackson committed
171
	CS_SCHED_LOAD_BALANCE,
172 173
	CS_SPREAD_PAGE,
	CS_SPREAD_SLAB,
Linus Torvalds's avatar
Linus Torvalds committed
174 175 176
} cpuset_flagbits_t;

/* convenient tests for these bits */
Tejun Heo's avatar
Tejun Heo committed
177 178 179 180 181
static inline bool is_cpuset_online(const struct cpuset *cs)
{
	return test_bit(CS_ONLINE, &cs->flags);
}

Linus Torvalds's avatar
Linus Torvalds committed
182 183
static inline int is_cpu_exclusive(const struct cpuset *cs)
{
184
	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
Linus Torvalds's avatar
Linus Torvalds committed
185 186 187 188
}

static inline int is_mem_exclusive(const struct cpuset *cs)
{
189
	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
Linus Torvalds's avatar
Linus Torvalds committed
190 191
}

192 193 194 195 196
static inline int is_mem_hardwall(const struct cpuset *cs)
{
	return test_bit(CS_MEM_HARDWALL, &cs->flags);
}

Paul Jackson's avatar
Paul Jackson committed
197 198 199 200 201
static inline int is_sched_load_balance(const struct cpuset *cs)
{
	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
}

202 203
static inline int is_memory_migrate(const struct cpuset *cs)
{
204
	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
205 206
}

207 208 209 210 211 212 213 214 215 216
static inline int is_spread_page(const struct cpuset *cs)
{
	return test_bit(CS_SPREAD_PAGE, &cs->flags);
}

static inline int is_spread_slab(const struct cpuset *cs)
{
	return test_bit(CS_SPREAD_SLAB, &cs->flags);
}

Linus Torvalds's avatar
Linus Torvalds committed
217
static struct cpuset top_cpuset = {
Tejun Heo's avatar
Tejun Heo committed
218 219
	.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
		  (1 << CS_MEM_EXCLUSIVE)),
Linus Torvalds's avatar
Linus Torvalds committed
220 221
};

222 223 224
/**
 * cpuset_for_each_child - traverse online children of a cpuset
 * @child_cs: loop cursor pointing to the current child
225
 * @pos_css: used for iteration
226 227 228 229 230
 * @parent_cs: target cpuset to walk children of
 *
 * Walk @child_cs through the online children of @parent_cs.  Must be used
 * with RCU read locked.
 */
231 232 233
#define cpuset_for_each_child(child_cs, pos_css, parent_cs)		\
	css_for_each_child((pos_css), &(parent_cs)->css)		\
		if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
234

235 236 237
/**
 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
 * @des_cs: loop cursor pointing to the current descendant
238
 * @pos_css: used for iteration
239 240 241
 * @root_cs: target cpuset to walk ancestor of
 *
 * Walk @des_cs through the online descendants of @root_cs.  Must be used
242
 * with RCU read locked.  The caller may modify @pos_css by calling
243 244
 * css_rightmost_descendant() to skip subtree.  @root_cs is included in the
 * iteration and the first node to be visited.
245
 */
246 247 248
#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs)	\
	css_for_each_descendant_pre((pos_css), &(root_cs)->css)		\
		if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
249

Linus Torvalds's avatar
Linus Torvalds committed
250
/*
251 252 253 254 255 256 257 258 259 260 261 262 263 264
 * There are two global mutexes guarding cpuset structures - cpuset_mutex
 * and callback_mutex.  The latter may nest inside the former.  We also
 * require taking task_lock() when dereferencing a task's cpuset pointer.
 * See "The task_lock() exception", at the end of this comment.
 *
 * A task must hold both mutexes to modify cpusets.  If a task holds
 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
 * is the only task able to also acquire callback_mutex and be able to
 * modify cpusets.  It can perform various checks on the cpuset structure
 * first, knowing nothing will change.  It can also allocate memory while
 * just holding cpuset_mutex.  While it is performing these checks, various
 * callback routines can briefly acquire callback_mutex to query cpusets.
 * Once it is ready to make the changes, it takes callback_mutex, blocking
 * everyone else.
265 266
 *
 * Calls to the kernel memory allocator can not be made while holding
267
 * callback_mutex, as that would risk double tripping on callback_mutex
268 269 270
 * from one of the callbacks into the cpuset code from within
 * __alloc_pages().
 *
271
 * If a task is only holding callback_mutex, then it has read-only
272 273
 * access to cpusets.
 *
274 275 276
 * Now, the task_struct fields mems_allowed and mempolicy may be changed
 * by other task, we use alloc_lock in the task_struct fields to protect
 * them.
277
 *
278
 * The cpuset_common_file_read() handlers only hold callback_mutex across
279 280 281
 * small pieces of code, such as when reading out possibly multi-word
 * cpumasks and nodemasks.
 *
282 283
 * Accessing a task's cpuset should be done in accordance with the
 * guidelines for accessing subsystem state in kernel/cgroup.c
Linus Torvalds's avatar
Linus Torvalds committed
284 285
 */

286
static DEFINE_MUTEX(cpuset_mutex);
287
static DEFINE_MUTEX(callback_mutex);
288

289 290 291 292 293 294
/*
 * CPU / memory hotplug is handled asynchronously.
 */
static void cpuset_hotplug_workfn(struct work_struct *work);
static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);

295 296
static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);

297 298
/*
 * This is ugly, but preserves the userspace API for existing cpuset
299
 * users. If someone tries to mount the "cpuset" filesystem, we
300 301
 * silently switch it to mount "cgroup" instead
 */
Al Viro's avatar
Al Viro committed
302 303
static struct dentry *cpuset_mount(struct file_system_type *fs_type,
			 int flags, const char *unused_dev_name, void *data)
Linus Torvalds's avatar
Linus Torvalds committed
304
{
305
	struct file_system_type *cgroup_fs = get_fs_type("cgroup");
Al Viro's avatar
Al Viro committed
306
	struct dentry *ret = ERR_PTR(-ENODEV);
307 308 309 310
	if (cgroup_fs) {
		char mountopts[] =
			"cpuset,noprefix,"
			"release_agent=/sbin/cpuset_release_agent";
Al Viro's avatar
Al Viro committed
311 312
		ret = cgroup_fs->mount(cgroup_fs, flags,
					   unused_dev_name, mountopts);
313 314 315
		put_filesystem(cgroup_fs);
	}
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
316 317 318 319
}

static struct file_system_type cpuset_fs_type = {
	.name = "cpuset",
Al Viro's avatar
Al Viro committed
320
	.mount = cpuset_mount,
Linus Torvalds's avatar
Linus Torvalds committed
321 322 323
};

/*
324
 * Return in pmask the portion of a cpusets's cpus_allowed that
Linus Torvalds's avatar
Linus Torvalds committed
325
 * are online.  If none are online, walk up the cpuset hierarchy
326 327
 * until we find one that does have some online cpus.  The top
 * cpuset always has some cpus online.
Linus Torvalds's avatar
Linus Torvalds committed
328 329
 *
 * One way or another, we guarantee to return some non-empty subset
330
 * of cpu_online_mask.
Linus Torvalds's avatar
Linus Torvalds committed
331
 *
332
 * Call with callback_mutex held.
Linus Torvalds's avatar
Linus Torvalds committed
333
 */
334
static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
Linus Torvalds's avatar
Linus Torvalds committed
335
{
336
	while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask))
Tejun Heo's avatar
Tejun Heo committed
337
		cs = parent_cs(cs);
338
	cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
Linus Torvalds's avatar
Linus Torvalds committed
339 340 341 342
}

/*
 * Return in *pmask the portion of a cpusets's mems_allowed that
343 344
 * are online, with memory.  If none are online with memory, walk
 * up the cpuset hierarchy until we find one that does have some
345
 * online mems.  The top cpuset always has some mems online.
Linus Torvalds's avatar
Linus Torvalds committed
346 347
 *
 * One way or another, we guarantee to return some non-empty subset
348
 * of node_states[N_MEMORY].
Linus Torvalds's avatar
Linus Torvalds committed
349
 *
350
 * Call with callback_mutex held.
Linus Torvalds's avatar
Linus Torvalds committed
351
 */
352
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
Linus Torvalds's avatar
Linus Torvalds committed
353
{
354
	while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
Tejun Heo's avatar
Tejun Heo committed
355
		cs = parent_cs(cs);
356
	nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
Linus Torvalds's avatar
Linus Torvalds committed
357 358
}

359 360 361
/*
 * update task's spread flag if cpuset's page/slab spread flag is set
 *
362
 * Called with callback_mutex/cpuset_mutex held
363 364 365 366 367
 */
static void cpuset_update_task_spread_flag(struct cpuset *cs,
					struct task_struct *tsk)
{
	if (is_spread_page(cs))
368
		task_set_spread_page(tsk);
369
	else
370 371
		task_clear_spread_page(tsk);

372
	if (is_spread_slab(cs))
373
		task_set_spread_slab(tsk);
374
	else
375
		task_clear_spread_slab(tsk);
376 377
}

Linus Torvalds's avatar
Linus Torvalds committed
378 379 380 381 382
/*
 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
 *
 * One cpuset is a subset of another if all its allowed CPUs and
 * Memory Nodes are a subset of the other, and its exclusive flags
383
 * are only set if the other's are set.  Call holding cpuset_mutex.
Linus Torvalds's avatar
Linus Torvalds committed
384 385 386 387
 */

static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
{
388
	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
Linus Torvalds's avatar
Linus Torvalds committed
389 390 391 392 393
		nodes_subset(p->mems_allowed, q->mems_allowed) &&
		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
		is_mem_exclusive(p) <= is_mem_exclusive(q);
}

394 395 396 397
/**
 * alloc_trial_cpuset - allocate a trial cpuset
 * @cs: the cpuset that the trial cpuset duplicates
 */
398
static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
399
{
400 401 402 403 404 405
	struct cpuset *trial;

	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
	if (!trial)
		return NULL;

406 407 408 409
	if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL))
		goto free_cs;
	if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL))
		goto free_cpus;
410

411 412
	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
	cpumask_copy(trial->effective_cpus, cs->effective_cpus);
413
	return trial;
414 415 416 417 418 419

free_cpus:
	free_cpumask_var(trial->cpus_allowed);
free_cs:
	kfree(trial);
	return NULL;
420 421 422 423 424 425 426 427
}

/**
 * free_trial_cpuset - free the trial cpuset
 * @trial: the trial cpuset to be freed
 */
static void free_trial_cpuset(struct cpuset *trial)
{
428
	free_cpumask_var(trial->effective_cpus);
429
	free_cpumask_var(trial->cpus_allowed);
430 431 432
	kfree(trial);
}

Linus Torvalds's avatar
Linus Torvalds committed
433 434 435 436 437 438 439
/*
 * validate_change() - Used to validate that any proposed cpuset change
 *		       follows the structural rules for cpusets.
 *
 * If we replaced the flag and mask values of the current cpuset
 * (cur) with those values in the trial cpuset (trial), would
 * our various subset and exclusive rules still be valid?  Presumes
440
 * cpuset_mutex held.
Linus Torvalds's avatar
Linus Torvalds committed
441 442 443 444 445 446 447 448 449 450 451 452
 *
 * 'cur' is the address of an actual, in-use cpuset.  Operations
 * such as list traversal that depend on the actual address of the
 * cpuset in the list must use cur below, not trial.
 *
 * 'trial' is the address of bulk structure copy of cur, with
 * perhaps one or more of the fields cpus_allowed, mems_allowed,
 * or flags changed to new, trial values.
 *
 * Return 0 if valid, -errno if not.
 */

453
static int validate_change(struct cpuset *cur, struct cpuset *trial)
Linus Torvalds's avatar
Linus Torvalds committed
454
{
455
	struct cgroup_subsys_state *css;
Linus Torvalds's avatar
Linus Torvalds committed
456
	struct cpuset *c, *par;
457 458 459
	int ret;

	rcu_read_lock();
Linus Torvalds's avatar
Linus Torvalds committed
460 461

	/* Each of our child cpusets must be a subset of us */
462
	ret = -EBUSY;
463
	cpuset_for_each_child(c, css, cur)
464 465
		if (!is_cpuset_subset(c, trial))
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
466 467

	/* Remaining checks don't apply to root cpuset */
468
	ret = 0;
469
	if (cur == &top_cpuset)
470
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
471

Tejun Heo's avatar
Tejun Heo committed
472
	par = parent_cs(cur);
473

474
	/* On legacy hiearchy, we must be a subset of our parent cpuset. */
475
	ret = -EACCES;
476
	if (!cgroup_on_dfl(cur->css.cgroup) && !is_cpuset_subset(trial, par))
477
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
478

479 480 481 482
	/*
	 * If either I or some sibling (!= me) is exclusive, we can't
	 * overlap
	 */
483
	ret = -EINVAL;
484
	cpuset_for_each_child(c, css, par) {
Linus Torvalds's avatar
Linus Torvalds committed
485 486
		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
		    c != cur &&
487
		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
488
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
489 490 491
		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
		    c != cur &&
		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
492
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
493 494
	}

495 496
	/*
	 * Cpusets with tasks - existing or newly being attached - can't
497
	 * be changed to have empty cpus_allowed or mems_allowed.
498
	 */
499
	ret = -ENOSPC;
500
	if ((cgroup_has_tasks(cur->css.cgroup) || cur->attach_in_progress)) {
501 502 503 504 505 506 507
		if (!cpumask_empty(cur->cpus_allowed) &&
		    cpumask_empty(trial->cpus_allowed))
			goto out;
		if (!nodes_empty(cur->mems_allowed) &&
		    nodes_empty(trial->mems_allowed))
			goto out;
	}
508

509 510 511 512
	ret = 0;
out:
	rcu_read_unlock();
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
513 514
}

515
#ifdef CONFIG_SMP
Paul Jackson's avatar
Paul Jackson committed
516
/*
517
 * Helper routine for generate_sched_domains().
518
 * Do cpusets a, b have overlapping effective cpus_allowed masks?
Paul Jackson's avatar
Paul Jackson committed
519 520 521
 */
static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
{
522
	return cpumask_intersects(a->effective_cpus, b->effective_cpus);
Paul Jackson's avatar
Paul Jackson committed
523 524
}

525 526 527 528 529 530 531 532
static void
update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
{
	if (dattr->relax_domain_level < c->relax_domain_level)
		dattr->relax_domain_level = c->relax_domain_level;
	return;
}

533 534
static void update_domain_attr_tree(struct sched_domain_attr *dattr,
				    struct cpuset *root_cs)
535
{
536
	struct cpuset *cp;
537
	struct cgroup_subsys_state *pos_css;
538

539
	rcu_read_lock();
540
	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
541 542 543
		if (cp == root_cs)
			continue;

544 545
		/* skip the whole subtree if @cp doesn't have any CPU */
		if (cpumask_empty(cp->cpus_allowed)) {
546
			pos_css = css_rightmost_descendant(pos_css);
547
			continue;
548
		}
549 550 551 552

		if (is_sched_load_balance(cp))
			update_domain_attr(dattr, cp);
	}
553
	rcu_read_unlock();
554 555
}

Paul Jackson's avatar
Paul Jackson committed
556
/*
557 558 559 560 561
 * generate_sched_domains()
 *
 * This function builds a partial partition of the systems CPUs
 * A 'partial partition' is a set of non-overlapping subsets whose
 * union is a subset of that set.
562
 * The output of this function needs to be passed to kernel/sched/core.c
563 564 565
 * partition_sched_domains() routine, which will rebuild the scheduler's
 * load balancing domains (sched domains) as specified by that partial
 * partition.
Paul Jackson's avatar
Paul Jackson committed
566
 *
567
 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
Paul Jackson's avatar
Paul Jackson committed
568 569 570 571 572 573 574
 * for a background explanation of this.
 *
 * Does not return errors, on the theory that the callers of this
 * routine would rather not worry about failures to rebuild sched
 * domains when operating in the severe memory shortage situations
 * that could cause allocation failures below.
 *
575
 * Must be called with cpuset_mutex held.
Paul Jackson's avatar
Paul Jackson committed
576 577
 *
 * The three key local variables below are:
578
 *    q  - a linked-list queue of cpuset pointers, used to implement a
Paul Jackson's avatar
Paul Jackson committed
579 580 581 582 583 584 585 586 587 588 589 590
 *	   top-down scan of all cpusets.  This scan loads a pointer
 *	   to each cpuset marked is_sched_load_balance into the
 *	   array 'csa'.  For our purposes, rebuilding the schedulers
 *	   sched domains, we can ignore !is_sched_load_balance cpusets.
 *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
 *	   that need to be load balanced, for convenient iterative
 *	   access by the subsequent code that finds the best partition,
 *	   i.e the set of domains (subsets) of CPUs such that the
 *	   cpus_allowed of every cpuset marked is_sched_load_balance
 *	   is a subset of one of these domains, while there are as
 *	   many such domains as possible, each as small as possible.
 * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
591
 *	   the kernel/sched/core.c routine partition_sched_domains() in a
Paul Jackson's avatar
Paul Jackson committed
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609
 *	   convenient format, that can be easily compared to the prior
 *	   value to determine what partition elements (sched domains)
 *	   were changed (added or removed.)
 *
 * Finding the best partition (set of domains):
 *	The triple nested loops below over i, j, k scan over the
 *	load balanced cpusets (using the array of cpuset pointers in
 *	csa[]) looking for pairs of cpusets that have overlapping
 *	cpus_allowed, but which don't have the same 'pn' partition
 *	number and gives them in the same partition number.  It keeps
 *	looping on the 'restart' label until it can no longer find
 *	any such pairs.
 *
 *	The union of the cpus_allowed masks from the set of
 *	all cpusets having the same 'pn' value then form the one
 *	element of the partition (one sched domain) to be passed to
 *	partition_sched_domains().
 */
610
static int generate_sched_domains(cpumask_var_t **domains,
611
			struct sched_domain_attr **attributes)
Paul Jackson's avatar
Paul Jackson committed
612 613 614 615 616
{
	struct cpuset *cp;	/* scans q */
	struct cpuset **csa;	/* array of all cpuset ptrs */
	int csn;		/* how many cpuset ptrs in csa so far */
	int i, j, k;		/* indices for partition finding loops */
617
	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
618
	struct sched_domain_attr *dattr;  /* attributes for custom domains */
619
	int ndoms = 0;		/* number of sched domains in result */
620
	int nslot;		/* next empty doms[] struct cpumask slot */
621
	struct cgroup_subsys_state *pos_css;
Paul Jackson's avatar
Paul Jackson committed
622 623

	doms = NULL;
624
	dattr = NULL;
625
	csa = NULL;
Paul Jackson's avatar
Paul Jackson committed
626 627 628

	/* Special case for the 99% of systems with one, full, sched domain */
	if (is_sched_load_balance(&top_cpuset)) {
629 630
		ndoms = 1;
		doms = alloc_sched_domains(ndoms);
Paul Jackson's avatar
Paul Jackson committed
631
		if (!doms)
632 633
			goto done;

634 635 636
		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
		if (dattr) {
			*dattr = SD_ATTR_INIT;
637
			update_domain_attr_tree(dattr, &top_cpuset);
638
		}
639
		cpumask_copy(doms[0], top_cpuset.effective_cpus);
640 641

		goto done;
Paul Jackson's avatar
Paul Jackson committed
642 643
	}

644
	csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL);
Paul Jackson's avatar
Paul Jackson committed
645 646 647 648
	if (!csa)
		goto done;
	csn = 0;

649
	rcu_read_lock();
650
	cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
651 652
		if (cp == &top_cpuset)
			continue;
653
		/*
654 655 656 657 658 659
		 * Continue traversing beyond @cp iff @cp has some CPUs and
		 * isn't load balancing.  The former is obvious.  The
		 * latter: All child cpusets contain a subset of the
		 * parent's cpus, so just skip them, and then we call
		 * update_domain_attr_tree() to calc relax_domain_level of
		 * the corresponding sched domain.
660
		 */
661 662
		if (!cpumask_empty(cp->cpus_allowed) &&
		    !is_sched_load_balance(cp))
663
			continue;
664

665 666 667 668
		if (is_sched_load_balance(cp))
			csa[csn++] = cp;

		/* skip @cp's subtree */
669
		pos_css = css_rightmost_descendant(pos_css);
670 671
	}
	rcu_read_unlock();
Paul Jackson's avatar
Paul Jackson committed
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699

	for (i = 0; i < csn; i++)
		csa[i]->pn = i;
	ndoms = csn;

restart:
	/* Find the best partition (set of sched domains) */
	for (i = 0; i < csn; i++) {
		struct cpuset *a = csa[i];
		int apn = a->pn;

		for (j = 0; j < csn; j++) {
			struct cpuset *b = csa[j];
			int bpn = b->pn;

			if (apn != bpn && cpusets_overlap(a, b)) {
				for (k = 0; k < csn; k++) {
					struct cpuset *c = csa[k];

					if (c->pn == bpn)
						c->pn = apn;
				}
				ndoms--;	/* one less element */
				goto restart;
			}
		}
	}

700 701 702 703
	/*
	 * Now we know how many domains to create.
	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
	 */
704
	doms = alloc_sched_domains(ndoms);
705
	if (!doms)
706 707 708 709 710 711
		goto done;

	/*
	 * The rest of the code, including the scheduler, can deal with
	 * dattr==NULL case. No need to abort if alloc fails.
	 */
712
	dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
Paul Jackson's avatar
Paul Jackson committed
713 714 715

	for (nslot = 0, i = 0; i < csn; i++) {
		struct cpuset *a = csa[i];
716
		struct cpumask *dp;
Paul Jackson's avatar
Paul Jackson committed
717 718
		int apn = a->pn;

719 720 721 722 723
		if (apn < 0) {
			/* Skip completed partitions */
			continue;
		}

724
		dp = doms[nslot];
725 726 727 728

		if (nslot == ndoms) {
			static int warnings = 10;
			if (warnings) {
729 730
				pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
					nslot, ndoms, csn, i, apn);
731
				warnings--;
Paul Jackson's avatar
Paul Jackson committed
732
			}
733 734
			continue;
		}
Paul Jackson's avatar
Paul Jackson committed
735

736
		cpumask_clear(dp);
737 738 739 740 741 742
		if (dattr)
			*(dattr + nslot) = SD_ATTR_INIT;
		for (j = i; j < csn; j++) {
			struct cpuset *b = csa[j];

			if (apn == b->pn) {
743
				cpumask_or(dp, dp, b->effective_cpus);
744 745 746 747 748
				if (dattr)
					update_domain_attr_tree(dattr + nslot, b);

				/* Done with this partition */
				b->pn = -1;
Paul Jackson's avatar
Paul Jackson committed
749 750
			}
		}
751
		nslot++;
Paul Jackson's avatar
Paul Jackson committed
752 753 754
	}
	BUG_ON(nslot != ndoms);

755 756 757
done:
	kfree(csa);

758 759 760 761 762 763 764
	/*
	 * Fallback to the default domain if kmalloc() failed.
	 * See comments in partition_sched_domains().
	 */
	if (doms == NULL)
		ndoms = 1;

765 766 767 768 769 770 771 772
	*domains    = doms;
	*attributes = dattr;
	return ndoms;
}

/*
 * Rebuild scheduler domains.
 *
773 774 775 776 777
 * If the flag 'sched_load_balance' of any cpuset with non-empty
 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
 * which has that flag enabled, or if any cpuset with a non-empty
 * 'cpus' is removed, then call this routine to rebuild the
 * scheduler's dynamic sched domains.
778
 *
779
 * Call with cpuset_mutex held.  Takes get_online_cpus().
780
 */
781
static void rebuild_sched_domains_locked(void)
782 783
{
	struct sched_domain_attr *attr;
784
	cpumask_var_t *doms;
785 786
	int ndoms;

787
	lockdep_assert_held(&cpuset_mutex);
788
	get_online_cpus();
789

790 791 792 793 794
	/*
	 * We have raced with CPU hotplug. Don't do anything to avoid
	 * passing doms with offlined cpu to partition_sched_domains().
	 * Anyways, hotplug work item will rebuild sched domains.
	 */
795
	if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
796 797
		goto out;

798 799 800 801 802
	/* Generate domain masks and attrs */
	ndoms = generate_sched_domains(&doms, &attr);

	/* Have scheduler rebuild the domains */
	partition_sched_domains(ndoms, doms, attr);
803
out:
804
	put_online_cpus();
805
}
806
#else /* !CONFIG_SMP */
807
static void rebuild_sched_domains_locked(void)
808 809 810
{
}
#endif /* CONFIG_SMP */
Paul Jackson's avatar
Paul Jackson committed
811

812 813
void rebuild_sched_domains(void)
{
814
	mutex_lock(&cpuset_mutex);
815
	rebuild_sched_domains_locked();
816
	mutex_unlock(&cpuset_mutex);
Paul Jackson's avatar
Paul Jackson committed
817 818
}

819 820 821 822
/**
 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
 *
823 824 825
 * Iterate through each task of @cs updating its cpus_allowed to the
 * effective cpuset's.  As this function is called with cpuset_mutex held,
 * cpuset membership stays stable.
826
 */
827
static void update_tasks_cpumask(struct cpuset *cs)
828
{
829 830 831 832 833
	struct css_task_iter it;
	struct task_struct *task;

	css_task_iter_start(&cs->css, &it);
	while ((task = css_task_iter_next(&it)))
834
		set_cpus_allowed_ptr(task, cs->effective_cpus);
835
	css_task_iter_end(&it);
836 837
}

838
/*
839 840 841 842 843 844
 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
 * @cs: the cpuset to consider
 * @new_cpus: temp variable for calculating new effective_cpus
 *
 * When congifured cpumask is changed, the effective cpumasks of this cpuset
 * and all its descendants need to be updated.
845
 *
846
 * On legacy hierachy, effective_cpus will be the same with cpu_allowed.
847 848 849
 *
 * Called with cpuset_mutex held
 */
850
static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
851 852
{
	struct cpuset *cp;
853
	struct cgroup_subsys_state *pos_css;
854
	bool need_rebuild_sched_domains = false;
855 856

	rcu_read_lock();
857 858 859 860 861
	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
		struct cpuset *parent = parent_cs(cp);

		cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus);

862 863 864 865 866 867 868
		/*
		 * If it becomes empty, inherit the effective mask of the
		 * parent, which is guaranteed to have some CPUs.
		 */
		if (cpumask_empty(new_cpus))
			cpumask_copy(new_cpus, parent->effective_cpus);

869 870 871 872
		/* Skip the whole subtree if the cpumask remains the same. */
		if (cpumask_equal(new_cpus, cp->effective_cpus)) {
			pos_css = css_rightmost_descendant(pos_css);
			continue;
873
		}
874

875
		if (!css_tryget_online(&cp->css))
876 877 878
			continue;
		rcu_read_unlock();

879 880 881 882 883 884 885
		mutex_lock(&callback_mutex);
		cpumask_copy(cp->effective_cpus, new_cpus);
		mutex_unlock(&callback_mutex);

		WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));

886
		update_tasks_cpumask(cp);
887

888 889 890 891 892 893 894 895
		/*
		 * If the effective cpumask of any non-empty cpuset is changed,
		 * we need to rebuild sched domains.
		 */
		if (!cpumask_empty(cp->cpus_allowed) &&
		    is_sched_load_balance(cp))
			need_rebuild_sched_domains = true;

896 897 898 899
		rcu_read_lock();
		css_put(&cp->css);
	}
	rcu_read_unlock();
900 901 902

	if (need_rebuild_sched_domains)
		rebuild_sched_domains_locked();
903 904
}

905 906 907
/**
 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
 * @cs: the cpuset to consider
908
 * @trialcs: trial cpuset
909 910
 * @buf: buffer of cpu numbers written to this cpuset
 */
911 912
static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
			  const char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
913
{
914
	int retval;
Linus Torvalds's avatar
Linus Torvalds committed
915

916
	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
917 918 919
	if (cs == &top_cpuset)
		return -EACCES;

920
	/*
921
	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
922 923 924
	 * Since cpulist_parse() fails on an empty mask, we special case
	 * that parsing.  The validate_change() call ensures that cpusets
	 * with tasks have cpus.
925
	 */
926
	if (!*buf) {
927
		cpumask_clear(trialcs->cpus_allowed);
928
	} else {
929
		retval = cpulist_parse(buf, trialcs->cpus_allowed);
930 931
		if (retval < 0)
			return retval;
932

933 934
		if (!cpumask_subset(trialcs->cpus_allowed,
				    top_cpuset.cpus_allowed))
935
			return -EINVAL;
936
	}
Paul Jackson's avatar
Paul Jackson committed
937

Paul Menage's avatar
Paul Menage committed
938
	/* Nothing to do if the cpus didn't change */
939
	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
Paul Menage's avatar
Paul Menage committed
940
		return 0;
941

942 943 944 945
	retval = validate_change(cs, trialcs);
	if (retval < 0)
		return retval;

946
	mutex_lock(&callback_mutex);
947
	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
948
	mutex_unlock(&callback_mutex);
Paul Jackson's avatar
Paul Jackson committed
949

950 951
	/* use trialcs->cpus_allowed as a temp variable */
	update_cpumasks_hier(cs, trialcs->cpus_allowed);
952
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
953 954
}

955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
/*
 * cpuset_migrate_mm
 *
 *    Migrate memory region from one set of nodes to another.
 *
 *    Temporarilly set tasks mems_allowed to target nodes of migration,
 *    so that the migration code can allocate pages on these nodes.
 *
 *    While the mm_struct we are migrating is typically from some
 *    other task, the task_struct mems_allowed that we are hacking
 *    is for our current task, which must allocate new pages for that
 *    migrating memory region.
 */

static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
							const nodemask_t *to)
{
	struct task_struct *tsk = current;

	tsk->mems_allowed = *to;

	do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);

978
	rcu_read_lock();
979
	guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
980
	rcu_read_unlock();
981 982
}

983
/*
984 985 986 987 988 989 990 991 992 993 994
 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
 * @tsk: the task to change
 * @newmems: new nodes that the task will be set
 *
 * In order to avoid seeing no nodes if the old and new nodes are disjoint,
 * we structure updates as setting all new allowed nodes, then clearing newly
 * disallowed ones.
 */
static void cpuset_change_task_nodemask(struct task_struct *tsk,
					nodemask_t *newmems)
{
995
	bool need_loop;
996

997 998 999 1000 1001 1002 1003 1004 1005 1006
	/*
	 * Allow tasks that have access to memory reserves because they have
	 * been OOM killed to get memory anywhere.
	 */
	if (unlikely(test_thread_flag(TIF_MEMDIE)))
		return;
	if (current->flags & PF_EXITING) /* Let dying task have memory */
		return;

	task_lock(tsk);
1007 1008
	/*
	 * Determine if a loop is necessary if another thread is doing
1009
	 * read_mems_allowed_begin().  If at least one node remains unchanged and
1010 1011 1012 1013 1014
	 * tsk does not have a mempolicy, then an empty nodemask will not be
	 * possible when mems_allowed is larger than a word.
	 */
	need_loop = task_has_mempolicy(tsk) ||
			!nodes_intersects(*newmems, tsk->mems_allowed);
1015

1016 1017
	if (need_loop) {
		local_irq_disable();
1018
		write_seqcount_begin(&tsk->mems_allowed_seq);
1019
	}
1020

1021 1022
	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
1023 1024

	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1025
	tsk->mems_allowed = *newmems;
1026

1027
	if (need_loop) {
1028
		write_seqcount_end(&tsk->mems_allowed_seq);
1029 1030
		local_irq_enable();
	}
1031