cpuset.c 77.6 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6
/*
 *  kernel/cpuset.c
 *
 *  Processor and Memory placement constraints for sets of tasks.
 *
 *  Copyright (C) 2003 BULL SA.
Paul Jackson's avatar
Paul Jackson committed
7
 *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
8
 *  Copyright (C) 2006 Google, Inc
Linus Torvalds's avatar
Linus Torvalds committed
9 10 11 12
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *
13
 *  2003-10-10 Written by Simon Derr.
Linus Torvalds's avatar
Linus Torvalds committed
14
 *  2003-10-22 Updates by Stephen Hemminger.
15
 *  2004 May-July Rework by Paul Jackson.
16
 *  2006 Rework by Paul Menage to use generic cgroups
17 18
 *  2008 Rework of the scheduler domains and CPU hotplug handling
 *       by Max Krasnyansky
Linus Torvalds's avatar
Linus Torvalds committed
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpuset.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/list.h>
37
#include <linux/mempolicy.h>
Linus Torvalds's avatar
Linus Torvalds committed
38
#include <linux/mm.h>
39
#include <linux/memory.h>
40
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
41 42 43 44
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
45
#include <linux/rcupdate.h>
Linus Torvalds's avatar
Linus Torvalds committed
46 47
#include <linux/sched.h>
#include <linux/seq_file.h>
48
#include <linux/security.h>
Linus Torvalds's avatar
Linus Torvalds committed
49 50 51 52 53 54 55 56 57
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/backing-dev.h>
#include <linux/sort.h>

#include <asm/uaccess.h>
Arun Sharma's avatar
Arun Sharma committed
58
#include <linux/atomic.h>
59
#include <linux/mutex.h>
60 61
#include <linux/workqueue.h>
#include <linux/cgroup.h>
62
#include <linux/wait.h>
Linus Torvalds's avatar
Linus Torvalds committed
63

64 65 66 67 68
/*
 * Tracks how many cpusets are currently defined in system.
 * When there is only one cpuset (the root cpuset) we can
 * short circuit some hooks.
 */
69
int number_of_cpusets __read_mostly;
70

71
/* Forward declare cgroup structures */
72 73
struct cgroup_subsys cpuset_subsys;

74 75 76 77 78 79 80 81 82
/* See "Frequency meter" comments, below. */

struct fmeter {
	int cnt;		/* unprocessed events count */
	int val;		/* most recent output value */
	time_t time;		/* clock (secs) when val computed */
	spinlock_t lock;	/* guards read or write of above */
};

Linus Torvalds's avatar
Linus Torvalds committed
83
struct cpuset {
84 85
	struct cgroup_subsys_state css;

Linus Torvalds's avatar
Linus Torvalds committed
86
	unsigned long flags;		/* "unsigned long" so bitops work */
87
	cpumask_var_t cpus_allowed;	/* CPUs allowed to tasks in cpuset */
Linus Torvalds's avatar
Linus Torvalds committed
88 89
	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */

90 91 92 93 94 95 96 97 98 99 100 101
	/*
	 * This is old Memory Nodes tasks took on.
	 *
	 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
	 * - A new cpuset's old_mems_allowed is initialized when some
	 *   task is moved into it.
	 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
	 *   cpuset.mems_allowed and have tasks' nodemask updated, and
	 *   then old_mems_allowed is updated to mems_allowed.
	 */
	nodemask_t old_mems_allowed;

102
	struct fmeter fmeter;		/* memory_pressure filter */
Paul Jackson's avatar
Paul Jackson committed
103

104 105 106 107 108 109
	/*
	 * Tasks are being attached to this cpuset.  Used to prevent
	 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
	 */
	int attach_in_progress;

Paul Jackson's avatar
Paul Jackson committed
110 111
	/* partition number for rebuild_sched_domains() */
	int pn;
112

113 114
	/* for custom sched domain */
	int relax_domain_level;
Linus Torvalds's avatar
Linus Torvalds committed
115 116
};

117
/* Retrieve the cpuset for a cgroup */
Li Zefan's avatar
Li Zefan committed
118
static inline struct cpuset *cgroup_cs(struct cgroup *cgrp)
119
{
120
	return container_of(cgroup_css(cgrp, cpuset_subsys_id),
121 122 123 124 125 126
			    struct cpuset, css);
}

/* Retrieve the cpuset for a task */
static inline struct cpuset *task_cs(struct task_struct *task)
{
127
	return container_of(task_css(task, cpuset_subsys_id),
128 129 130
			    struct cpuset, css);
}

Tejun Heo's avatar
Tejun Heo committed
131 132 133 134 135 136 137 138 139
static inline struct cpuset *parent_cs(const struct cpuset *cs)
{
	struct cgroup *pcgrp = cs->css.cgroup->parent;

	if (pcgrp)
		return cgroup_cs(pcgrp);
	return NULL;
}

140 141 142 143 144 145 146 147 148 149 150 151 152
#ifdef CONFIG_NUMA
static inline bool task_has_mempolicy(struct task_struct *task)
{
	return task->mempolicy;
}
#else
static inline bool task_has_mempolicy(struct task_struct *task)
{
	return false;
}
#endif


Linus Torvalds's avatar
Linus Torvalds committed
153 154
/* bits in struct cpuset flags field */
typedef enum {
Tejun Heo's avatar
Tejun Heo committed
155
	CS_ONLINE,
Linus Torvalds's avatar
Linus Torvalds committed
156 157
	CS_CPU_EXCLUSIVE,
	CS_MEM_EXCLUSIVE,
158
	CS_MEM_HARDWALL,
159
	CS_MEMORY_MIGRATE,
Paul Jackson's avatar
Paul Jackson committed
160
	CS_SCHED_LOAD_BALANCE,
161 162
	CS_SPREAD_PAGE,
	CS_SPREAD_SLAB,
Linus Torvalds's avatar
Linus Torvalds committed
163 164 165
} cpuset_flagbits_t;

/* convenient tests for these bits */
Tejun Heo's avatar
Tejun Heo committed
166 167 168 169 170
static inline bool is_cpuset_online(const struct cpuset *cs)
{
	return test_bit(CS_ONLINE, &cs->flags);
}

Linus Torvalds's avatar
Linus Torvalds committed
171 172
static inline int is_cpu_exclusive(const struct cpuset *cs)
{
173
	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
Linus Torvalds's avatar
Linus Torvalds committed
174 175 176 177
}

static inline int is_mem_exclusive(const struct cpuset *cs)
{
178
	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
Linus Torvalds's avatar
Linus Torvalds committed
179 180
}

181 182 183 184 185
static inline int is_mem_hardwall(const struct cpuset *cs)
{
	return test_bit(CS_MEM_HARDWALL, &cs->flags);
}

Paul Jackson's avatar
Paul Jackson committed
186 187 188 189 190
static inline int is_sched_load_balance(const struct cpuset *cs)
{
	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
}

191 192
static inline int is_memory_migrate(const struct cpuset *cs)
{
193
	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
194 195
}

196 197 198 199 200 201 202 203 204 205
static inline int is_spread_page(const struct cpuset *cs)
{
	return test_bit(CS_SPREAD_PAGE, &cs->flags);
}

static inline int is_spread_slab(const struct cpuset *cs)
{
	return test_bit(CS_SPREAD_SLAB, &cs->flags);
}

Linus Torvalds's avatar
Linus Torvalds committed
206
static struct cpuset top_cpuset = {
Tejun Heo's avatar
Tejun Heo committed
207 208
	.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
		  (1 << CS_MEM_EXCLUSIVE)),
Linus Torvalds's avatar
Linus Torvalds committed
209 210
};

211 212 213 214 215 216 217 218 219 220 221 222 223
/**
 * cpuset_for_each_child - traverse online children of a cpuset
 * @child_cs: loop cursor pointing to the current child
 * @pos_cgrp: used for iteration
 * @parent_cs: target cpuset to walk children of
 *
 * Walk @child_cs through the online children of @parent_cs.  Must be used
 * with RCU read locked.
 */
#define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs)		\
	cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup)	\
		if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp)))))

224 225 226 227 228 229 230 231 232 233 234 235 236 237
/**
 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
 * @des_cs: loop cursor pointing to the current descendant
 * @pos_cgrp: used for iteration
 * @root_cs: target cpuset to walk ancestor of
 *
 * Walk @des_cs through the online descendants of @root_cs.  Must be used
 * with RCU read locked.  The caller may modify @pos_cgrp by calling
 * cgroup_rightmost_descendant() to skip subtree.
 */
#define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs)	\
	cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \
		if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp)))))

Linus Torvalds's avatar
Linus Torvalds committed
238
/*
239 240 241 242 243 244 245 246 247 248 249 250 251 252
 * There are two global mutexes guarding cpuset structures - cpuset_mutex
 * and callback_mutex.  The latter may nest inside the former.  We also
 * require taking task_lock() when dereferencing a task's cpuset pointer.
 * See "The task_lock() exception", at the end of this comment.
 *
 * A task must hold both mutexes to modify cpusets.  If a task holds
 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
 * is the only task able to also acquire callback_mutex and be able to
 * modify cpusets.  It can perform various checks on the cpuset structure
 * first, knowing nothing will change.  It can also allocate memory while
 * just holding cpuset_mutex.  While it is performing these checks, various
 * callback routines can briefly acquire callback_mutex to query cpusets.
 * Once it is ready to make the changes, it takes callback_mutex, blocking
 * everyone else.
253 254
 *
 * Calls to the kernel memory allocator can not be made while holding
255
 * callback_mutex, as that would risk double tripping on callback_mutex
256 257 258
 * from one of the callbacks into the cpuset code from within
 * __alloc_pages().
 *
259
 * If a task is only holding callback_mutex, then it has read-only
260 261
 * access to cpusets.
 *
262 263 264
 * Now, the task_struct fields mems_allowed and mempolicy may be changed
 * by other task, we use alloc_lock in the task_struct fields to protect
 * them.
265
 *
266
 * The cpuset_common_file_read() handlers only hold callback_mutex across
267 268 269
 * small pieces of code, such as when reading out possibly multi-word
 * cpumasks and nodemasks.
 *
270 271
 * Accessing a task's cpuset should be done in accordance with the
 * guidelines for accessing subsystem state in kernel/cgroup.c
Linus Torvalds's avatar
Linus Torvalds committed
272 273
 */

274
static DEFINE_MUTEX(cpuset_mutex);
275
static DEFINE_MUTEX(callback_mutex);
276

277 278 279 280 281 282
/*
 * CPU / memory hotplug is handled asynchronously.
 */
static void cpuset_hotplug_workfn(struct work_struct *work);
static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);

283 284
static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);

285 286
/*
 * This is ugly, but preserves the userspace API for existing cpuset
287
 * users. If someone tries to mount the "cpuset" filesystem, we
288 289
 * silently switch it to mount "cgroup" instead
 */
Al Viro's avatar
Al Viro committed
290 291
static struct dentry *cpuset_mount(struct file_system_type *fs_type,
			 int flags, const char *unused_dev_name, void *data)
Linus Torvalds's avatar
Linus Torvalds committed
292
{
293
	struct file_system_type *cgroup_fs = get_fs_type("cgroup");
Al Viro's avatar
Al Viro committed
294
	struct dentry *ret = ERR_PTR(-ENODEV);
295 296 297 298
	if (cgroup_fs) {
		char mountopts[] =
			"cpuset,noprefix,"
			"release_agent=/sbin/cpuset_release_agent";
Al Viro's avatar
Al Viro committed
299 300
		ret = cgroup_fs->mount(cgroup_fs, flags,
					   unused_dev_name, mountopts);
301 302 303
		put_filesystem(cgroup_fs);
	}
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
304 305 306 307
}

static struct file_system_type cpuset_fs_type = {
	.name = "cpuset",
Al Viro's avatar
Al Viro committed
308
	.mount = cpuset_mount,
Linus Torvalds's avatar
Linus Torvalds committed
309 310 311
};

/*
312
 * Return in pmask the portion of a cpusets's cpus_allowed that
Linus Torvalds's avatar
Linus Torvalds committed
313
 * are online.  If none are online, walk up the cpuset hierarchy
314 315
 * until we find one that does have some online cpus.  The top
 * cpuset always has some cpus online.
Linus Torvalds's avatar
Linus Torvalds committed
316 317
 *
 * One way or another, we guarantee to return some non-empty subset
318
 * of cpu_online_mask.
Linus Torvalds's avatar
Linus Torvalds committed
319
 *
320
 * Call with callback_mutex held.
Linus Torvalds's avatar
Linus Torvalds committed
321
 */
322 323
static void guarantee_online_cpus(const struct cpuset *cs,
				  struct cpumask *pmask)
Linus Torvalds's avatar
Linus Torvalds committed
324
{
325
	while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
Tejun Heo's avatar
Tejun Heo committed
326
		cs = parent_cs(cs);
327
	cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
Linus Torvalds's avatar
Linus Torvalds committed
328 329 330 331
}

/*
 * Return in *pmask the portion of a cpusets's mems_allowed that
332 333
 * are online, with memory.  If none are online with memory, walk
 * up the cpuset hierarchy until we find one that does have some
334
 * online mems.  The top cpuset always has some mems online.
Linus Torvalds's avatar
Linus Torvalds committed
335 336
 *
 * One way or another, we guarantee to return some non-empty subset
337
 * of node_states[N_MEMORY].
Linus Torvalds's avatar
Linus Torvalds committed
338
 *
339
 * Call with callback_mutex held.
Linus Torvalds's avatar
Linus Torvalds committed
340 341 342
 */
static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
{
343
	while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY]))
Tejun Heo's avatar
Tejun Heo committed
344
		cs = parent_cs(cs);
345
	nodes_and(*pmask, cs->mems_allowed, node_states[N_MEMORY]);
Linus Torvalds's avatar
Linus Torvalds committed
346 347
}

348 349 350
/*
 * update task's spread flag if cpuset's page/slab spread flag is set
 *
351
 * Called with callback_mutex/cpuset_mutex held
352 353 354 355 356 357 358 359 360 361 362 363 364 365
 */
static void cpuset_update_task_spread_flag(struct cpuset *cs,
					struct task_struct *tsk)
{
	if (is_spread_page(cs))
		tsk->flags |= PF_SPREAD_PAGE;
	else
		tsk->flags &= ~PF_SPREAD_PAGE;
	if (is_spread_slab(cs))
		tsk->flags |= PF_SPREAD_SLAB;
	else
		tsk->flags &= ~PF_SPREAD_SLAB;
}

Linus Torvalds's avatar
Linus Torvalds committed
366 367 368 369 370
/*
 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
 *
 * One cpuset is a subset of another if all its allowed CPUs and
 * Memory Nodes are a subset of the other, and its exclusive flags
371
 * are only set if the other's are set.  Call holding cpuset_mutex.
Linus Torvalds's avatar
Linus Torvalds committed
372 373 374 375
 */

static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
{
376
	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
Linus Torvalds's avatar
Linus Torvalds committed
377 378 379 380 381
		nodes_subset(p->mems_allowed, q->mems_allowed) &&
		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
		is_mem_exclusive(p) <= is_mem_exclusive(q);
}

382 383 384 385 386 387
/**
 * alloc_trial_cpuset - allocate a trial cpuset
 * @cs: the cpuset that the trial cpuset duplicates
 */
static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
{
388 389 390 391 392 393 394 395 396 397 398 399 400
	struct cpuset *trial;

	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
	if (!trial)
		return NULL;

	if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
		kfree(trial);
		return NULL;
	}
	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);

	return trial;
401 402 403 404 405 406 407 408
}

/**
 * free_trial_cpuset - free the trial cpuset
 * @trial: the trial cpuset to be freed
 */
static void free_trial_cpuset(struct cpuset *trial)
{
409
	free_cpumask_var(trial->cpus_allowed);
410 411 412
	kfree(trial);
}

Linus Torvalds's avatar
Linus Torvalds committed
413 414 415 416 417 418 419
/*
 * validate_change() - Used to validate that any proposed cpuset change
 *		       follows the structural rules for cpusets.
 *
 * If we replaced the flag and mask values of the current cpuset
 * (cur) with those values in the trial cpuset (trial), would
 * our various subset and exclusive rules still be valid?  Presumes
420
 * cpuset_mutex held.
Linus Torvalds's avatar
Linus Torvalds committed
421 422 423 424 425 426 427 428 429 430 431 432 433 434
 *
 * 'cur' is the address of an actual, in-use cpuset.  Operations
 * such as list traversal that depend on the actual address of the
 * cpuset in the list must use cur below, not trial.
 *
 * 'trial' is the address of bulk structure copy of cur, with
 * perhaps one or more of the fields cpus_allowed, mems_allowed,
 * or flags changed to new, trial values.
 *
 * Return 0 if valid, -errno if not.
 */

static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
{
Li Zefan's avatar
Li Zefan committed
435
	struct cgroup *cgrp;
Linus Torvalds's avatar
Linus Torvalds committed
436
	struct cpuset *c, *par;
437 438 439
	int ret;

	rcu_read_lock();
Linus Torvalds's avatar
Linus Torvalds committed
440 441

	/* Each of our child cpusets must be a subset of us */
442
	ret = -EBUSY;
Li Zefan's avatar
Li Zefan committed
443
	cpuset_for_each_child(c, cgrp, cur)
444 445
		if (!is_cpuset_subset(c, trial))
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
446 447

	/* Remaining checks don't apply to root cpuset */
448
	ret = 0;
449
	if (cur == &top_cpuset)
450
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
451

Tejun Heo's avatar
Tejun Heo committed
452
	par = parent_cs(cur);
453

Linus Torvalds's avatar
Linus Torvalds committed
454
	/* We must be a subset of our parent cpuset */
455
	ret = -EACCES;
Linus Torvalds's avatar
Linus Torvalds committed
456
	if (!is_cpuset_subset(trial, par))
457
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
458

459 460 461 462
	/*
	 * If either I or some sibling (!= me) is exclusive, we can't
	 * overlap
	 */
463
	ret = -EINVAL;
Li Zefan's avatar
Li Zefan committed
464
	cpuset_for_each_child(c, cgrp, par) {
Linus Torvalds's avatar
Linus Torvalds committed
465 466
		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
		    c != cur &&
467
		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
468
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
469 470 471
		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
		    c != cur &&
		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
472
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
473 474
	}

475 476 477 478
	/*
	 * Cpusets with tasks - existing or newly being attached - can't
	 * have empty cpus_allowed or mems_allowed.
	 */
479
	ret = -ENOSPC;
480
	if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) &&
481
	    (cpumask_empty(trial->cpus_allowed) &&
482 483
	     nodes_empty(trial->mems_allowed)))
		goto out;
484

485 486 487 488
	ret = 0;
out:
	rcu_read_unlock();
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
489 490
}

491
#ifdef CONFIG_SMP
Paul Jackson's avatar
Paul Jackson committed
492
/*
493
 * Helper routine for generate_sched_domains().
Paul Jackson's avatar
Paul Jackson committed
494 495 496 497
 * Do cpusets a, b have overlapping cpus_allowed masks?
 */
static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
{
498
	return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
Paul Jackson's avatar
Paul Jackson committed
499 500
}

501 502 503 504 505 506 507 508
static void
update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
{
	if (dattr->relax_domain_level < c->relax_domain_level)
		dattr->relax_domain_level = c->relax_domain_level;
	return;
}

509 510
static void update_domain_attr_tree(struct sched_domain_attr *dattr,
				    struct cpuset *root_cs)
511
{
512 513
	struct cpuset *cp;
	struct cgroup *pos_cgrp;
514

515 516 517 518 519
	rcu_read_lock();
	cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
		/* skip the whole subtree if @cp doesn't have any CPU */
		if (cpumask_empty(cp->cpus_allowed)) {
			pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
520
			continue;
521
		}
522 523 524 525

		if (is_sched_load_balance(cp))
			update_domain_attr(dattr, cp);
	}
526
	rcu_read_unlock();
527 528
}

Paul Jackson's avatar
Paul Jackson committed
529
/*
530 531 532 533 534
 * generate_sched_domains()
 *
 * This function builds a partial partition of the systems CPUs
 * A 'partial partition' is a set of non-overlapping subsets whose
 * union is a subset of that set.
535
 * The output of this function needs to be passed to kernel/sched/core.c
536 537 538
 * partition_sched_domains() routine, which will rebuild the scheduler's
 * load balancing domains (sched domains) as specified by that partial
 * partition.
Paul Jackson's avatar
Paul Jackson committed
539
 *
540
 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
Paul Jackson's avatar
Paul Jackson committed
541 542 543 544 545 546 547
 * for a background explanation of this.
 *
 * Does not return errors, on the theory that the callers of this
 * routine would rather not worry about failures to rebuild sched
 * domains when operating in the severe memory shortage situations
 * that could cause allocation failures below.
 *
548
 * Must be called with cpuset_mutex held.
Paul Jackson's avatar
Paul Jackson committed
549 550
 *
 * The three key local variables below are:
551
 *    q  - a linked-list queue of cpuset pointers, used to implement a
Paul Jackson's avatar
Paul Jackson committed
552 553 554 555 556 557 558 559 560 561 562 563
 *	   top-down scan of all cpusets.  This scan loads a pointer
 *	   to each cpuset marked is_sched_load_balance into the
 *	   array 'csa'.  For our purposes, rebuilding the schedulers
 *	   sched domains, we can ignore !is_sched_load_balance cpusets.
 *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
 *	   that need to be load balanced, for convenient iterative
 *	   access by the subsequent code that finds the best partition,
 *	   i.e the set of domains (subsets) of CPUs such that the
 *	   cpus_allowed of every cpuset marked is_sched_load_balance
 *	   is a subset of one of these domains, while there are as
 *	   many such domains as possible, each as small as possible.
 * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
564
 *	   the kernel/sched/core.c routine partition_sched_domains() in a
Paul Jackson's avatar
Paul Jackson committed
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
 *	   convenient format, that can be easily compared to the prior
 *	   value to determine what partition elements (sched domains)
 *	   were changed (added or removed.)
 *
 * Finding the best partition (set of domains):
 *	The triple nested loops below over i, j, k scan over the
 *	load balanced cpusets (using the array of cpuset pointers in
 *	csa[]) looking for pairs of cpusets that have overlapping
 *	cpus_allowed, but which don't have the same 'pn' partition
 *	number and gives them in the same partition number.  It keeps
 *	looping on the 'restart' label until it can no longer find
 *	any such pairs.
 *
 *	The union of the cpus_allowed masks from the set of
 *	all cpusets having the same 'pn' value then form the one
 *	element of the partition (one sched domain) to be passed to
 *	partition_sched_domains().
 */
583
static int generate_sched_domains(cpumask_var_t **domains,
584
			struct sched_domain_attr **attributes)
Paul Jackson's avatar
Paul Jackson committed
585 586 587 588 589
{
	struct cpuset *cp;	/* scans q */
	struct cpuset **csa;	/* array of all cpuset ptrs */
	int csn;		/* how many cpuset ptrs in csa so far */
	int i, j, k;		/* indices for partition finding loops */
590
	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
591
	struct sched_domain_attr *dattr;  /* attributes for custom domains */
592
	int ndoms = 0;		/* number of sched domains in result */
593
	int nslot;		/* next empty doms[] struct cpumask slot */
594
	struct cgroup *pos_cgrp;
Paul Jackson's avatar
Paul Jackson committed
595 596

	doms = NULL;
597
	dattr = NULL;
598
	csa = NULL;
Paul Jackson's avatar
Paul Jackson committed
599 600 601

	/* Special case for the 99% of systems with one, full, sched domain */
	if (is_sched_load_balance(&top_cpuset)) {
602 603
		ndoms = 1;
		doms = alloc_sched_domains(ndoms);
Paul Jackson's avatar
Paul Jackson committed
604
		if (!doms)
605 606
			goto done;

607 608 609
		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
		if (dattr) {
			*dattr = SD_ATTR_INIT;
610
			update_domain_attr_tree(dattr, &top_cpuset);
611
		}
612
		cpumask_copy(doms[0], top_cpuset.cpus_allowed);
613 614

		goto done;
Paul Jackson's avatar
Paul Jackson committed
615 616 617 618 619 620 621
	}

	csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
	if (!csa)
		goto done;
	csn = 0;

622 623
	rcu_read_lock();
	cpuset_for_each_descendant_pre(cp, pos_cgrp, &top_cpuset) {
624
		/*
625 626 627 628 629 630
		 * Continue traversing beyond @cp iff @cp has some CPUs and
		 * isn't load balancing.  The former is obvious.  The
		 * latter: All child cpusets contain a subset of the
		 * parent's cpus, so just skip them, and then we call
		 * update_domain_attr_tree() to calc relax_domain_level of
		 * the corresponding sched domain.
631
		 */
632 633
		if (!cpumask_empty(cp->cpus_allowed) &&
		    !is_sched_load_balance(cp))
634
			continue;
635

636 637 638 639 640 641 642
		if (is_sched_load_balance(cp))
			csa[csn++] = cp;

		/* skip @cp's subtree */
		pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
	}
	rcu_read_unlock();
Paul Jackson's avatar
Paul Jackson committed
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670

	for (i = 0; i < csn; i++)
		csa[i]->pn = i;
	ndoms = csn;

restart:
	/* Find the best partition (set of sched domains) */
	for (i = 0; i < csn; i++) {
		struct cpuset *a = csa[i];
		int apn = a->pn;

		for (j = 0; j < csn; j++) {
			struct cpuset *b = csa[j];
			int bpn = b->pn;

			if (apn != bpn && cpusets_overlap(a, b)) {
				for (k = 0; k < csn; k++) {
					struct cpuset *c = csa[k];

					if (c->pn == bpn)
						c->pn = apn;
				}
				ndoms--;	/* one less element */
				goto restart;
			}
		}
	}

671 672 673 674
	/*
	 * Now we know how many domains to create.
	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
	 */
675
	doms = alloc_sched_domains(ndoms);
676
	if (!doms)
677 678 679 680 681 682
		goto done;

	/*
	 * The rest of the code, including the scheduler, can deal with
	 * dattr==NULL case. No need to abort if alloc fails.
	 */
683
	dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
Paul Jackson's avatar
Paul Jackson committed
684 685 686

	for (nslot = 0, i = 0; i < csn; i++) {
		struct cpuset *a = csa[i];
687
		struct cpumask *dp;
Paul Jackson's avatar
Paul Jackson committed
688 689
		int apn = a->pn;

690 691 692 693 694
		if (apn < 0) {
			/* Skip completed partitions */
			continue;
		}

695
		dp = doms[nslot];
696 697 698 699 700 701 702 703 704 705

		if (nslot == ndoms) {
			static int warnings = 10;
			if (warnings) {
				printk(KERN_WARNING
				 "rebuild_sched_domains confused:"
				  " nslot %d, ndoms %d, csn %d, i %d,"
				  " apn %d\n",
				  nslot, ndoms, csn, i, apn);
				warnings--;
Paul Jackson's avatar
Paul Jackson committed
706
			}
707 708
			continue;
		}
Paul Jackson's avatar
Paul Jackson committed
709

710
		cpumask_clear(dp);
711 712 713 714 715 716
		if (dattr)
			*(dattr + nslot) = SD_ATTR_INIT;
		for (j = i; j < csn; j++) {
			struct cpuset *b = csa[j];

			if (apn == b->pn) {
717
				cpumask_or(dp, dp, b->cpus_allowed);
718 719 720 721 722
				if (dattr)
					update_domain_attr_tree(dattr + nslot, b);

				/* Done with this partition */
				b->pn = -1;
Paul Jackson's avatar
Paul Jackson committed
723 724
			}
		}
725
		nslot++;
Paul Jackson's avatar
Paul Jackson committed
726 727 728
	}
	BUG_ON(nslot != ndoms);

729 730 731
done:
	kfree(csa);

732 733 734 735 736 737 738
	/*
	 * Fallback to the default domain if kmalloc() failed.
	 * See comments in partition_sched_domains().
	 */
	if (doms == NULL)
		ndoms = 1;

739 740 741 742 743 744 745 746
	*domains    = doms;
	*attributes = dattr;
	return ndoms;
}

/*
 * Rebuild scheduler domains.
 *
747 748 749 750 751
 * If the flag 'sched_load_balance' of any cpuset with non-empty
 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
 * which has that flag enabled, or if any cpuset with a non-empty
 * 'cpus' is removed, then call this routine to rebuild the
 * scheduler's dynamic sched domains.
752
 *
753
 * Call with cpuset_mutex held.  Takes get_online_cpus().
754
 */
755
static void rebuild_sched_domains_locked(void)
756 757
{
	struct sched_domain_attr *attr;
758
	cpumask_var_t *doms;
759 760
	int ndoms;

761
	lockdep_assert_held(&cpuset_mutex);
762
	get_online_cpus();
763

764 765 766 767 768 769 770 771
	/*
	 * We have raced with CPU hotplug. Don't do anything to avoid
	 * passing doms with offlined cpu to partition_sched_domains().
	 * Anyways, hotplug work item will rebuild sched domains.
	 */
	if (!cpumask_equal(top_cpuset.cpus_allowed, cpu_active_mask))
		goto out;

772 773 774 775 776
	/* Generate domain masks and attrs */
	ndoms = generate_sched_domains(&doms, &attr);

	/* Have scheduler rebuild the domains */
	partition_sched_domains(ndoms, doms, attr);
777
out:
778
	put_online_cpus();
779
}
780
#else /* !CONFIG_SMP */
781
static void rebuild_sched_domains_locked(void)
782 783 784
{
}
#endif /* CONFIG_SMP */
Paul Jackson's avatar
Paul Jackson committed
785

786 787
void rebuild_sched_domains(void)
{
788
	mutex_lock(&cpuset_mutex);
789
	rebuild_sched_domains_locked();
790
	mutex_unlock(&cpuset_mutex);
Paul Jackson's avatar
Paul Jackson committed
791 792
}

793 794 795
/*
 * effective_cpumask_cpuset - return nearest ancestor with non-empty cpus
 * @cs: the cpuset in interest
796
 *
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
 * A cpuset's effective cpumask is the cpumask of the nearest ancestor
 * with non-empty cpus. We use effective cpumask whenever:
 * - we update tasks' cpus_allowed. (they take on the ancestor's cpumask
 *   if the cpuset they reside in has no cpus)
 * - we want to retrieve task_cs(tsk)'s cpus_allowed.
 *
 * Called with cpuset_mutex held. cpuset_cpus_allowed_fallback() is an
 * exception. See comments there.
 */
static struct cpuset *effective_cpumask_cpuset(struct cpuset *cs)
{
	while (cpumask_empty(cs->cpus_allowed))
		cs = parent_cs(cs);
	return cs;
}

/*
 * effective_nodemask_cpuset - return nearest ancestor with non-empty mems
 * @cs: the cpuset in interest
 *
 * A cpuset's effective nodemask is the nodemask of the nearest ancestor
 * with non-empty memss. We use effective nodemask whenever:
 * - we update tasks' mems_allowed. (they take on the ancestor's nodemask
 *   if the cpuset they reside in has no mems)
 * - we want to retrieve task_cs(tsk)'s mems_allowed.
 *
 * Called with cpuset_mutex held.
824
 */
825
static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
826
{
827 828 829
	while (nodes_empty(cs->mems_allowed))
		cs = parent_cs(cs);
	return cs;
830
}
831

832 833 834 835 836 837 838 839 840
/**
 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
 * @tsk: task to test
 * @scan: struct cgroup_scanner containing the cgroup of the task
 *
 * Called by cgroup_scan_tasks() for each task in a cgroup whose
 * cpus_allowed mask needs to be changed.
 *
 * We don't need to re-check for the cgroup/cpuset membership, since we're
841
 * holding cpuset_mutex at this point.
842
 */
843 844
static void cpuset_change_cpumask(struct task_struct *tsk,
				  struct cgroup_scanner *scan)
845
{
846 847
	struct cpuset *cpus_cs;

Li Zefan's avatar
Li Zefan committed
848
	cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cgrp));
849
	set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
850 851
}

852 853 854
/**
 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
855
 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
856
 *
857
 * Called with cpuset_mutex held
858 859 860 861
 *
 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
 * calling callback functions for each.
 *
862 863
 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
 * if @heap != NULL.
864
 */
865
static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
866 867 868
{
	struct cgroup_scanner scan;

Li Zefan's avatar
Li Zefan committed
869
	scan.cgrp = cs->css.cgroup;
870
	scan.test_task = NULL;
871
	scan.process_task = cpuset_change_cpumask;
872 873
	scan.heap = heap;
	cgroup_scan_tasks(&scan);
874 875
}

876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
/*
 * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
 * @root_cs: the root cpuset of the hierarchy
 * @update_root: update root cpuset or not?
 * @heap: the heap used by cgroup_scan_tasks()
 *
 * This will update cpumasks of tasks in @root_cs and all other empty cpusets
 * which take on cpumask of @root_cs.
 *
 * Called with cpuset_mutex held
 */
static void update_tasks_cpumask_hier(struct cpuset *root_cs,
				      bool update_root, struct ptr_heap *heap)
{
	struct cpuset *cp;
	struct cgroup *pos_cgrp;

	if (update_root)
		update_tasks_cpumask(root_cs, heap);

	rcu_read_lock();
	cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
		/* skip the whole subtree if @cp have some CPU */
		if (!cpumask_empty(cp->cpus_allowed)) {
			pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
			continue;
		}
		if (!css_tryget(&cp->css))
			continue;
		rcu_read_unlock();

		update_tasks_cpumask(cp, heap);

		rcu_read_lock();
		css_put(&cp->css);
	}
	rcu_read_unlock();
}

915 916 917 918 919
/**
 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
 * @cs: the cpuset to consider
 * @buf: buffer of cpu numbers written to this cpuset
 */
920 921
static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
			  const char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
922
{
923
	struct ptr_heap heap;
924 925
	int retval;
	int is_load_balanced;
Linus Torvalds's avatar
Linus Torvalds committed
926

927
	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
928 929 930
	if (cs == &top_cpuset)
		return -EACCES;

931
	/*
932
	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
933 934 935
	 * Since cpulist_parse() fails on an empty mask, we special case
	 * that parsing.  The validate_change() call ensures that cpusets
	 * with tasks have cpus.
936
	 */
937
	if (!*buf) {
938
		cpumask_clear(trialcs->cpus_allowed);
939
	} else {
940
		retval = cpulist_parse(buf, trialcs->cpus_allowed);
941 942
		if (retval < 0)
			return retval;
943

944
		if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
945
			return -EINVAL;
946
	}
Paul Jackson's avatar
Paul Jackson committed
947

Paul Menage's avatar
Paul Menage committed
948
	/* Nothing to do if the cpus didn't change */
949
	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
Paul Menage's avatar
Paul Menage committed
950
		return 0;
951

952 953 954 955
	retval = validate_change(cs, trialcs);
	if (retval < 0)
		return retval;

956 957 958 959
	retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
	if (retval)
		return retval;

960
	is_load_balanced = is_sched_load_balance(trialcs);
Paul Jackson's avatar
Paul Jackson committed
961

962
	mutex_lock(&callback_mutex);
963
	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
964
	mutex_unlock(&callback_mutex);
Paul Jackson's avatar
Paul Jackson committed
965

966
	update_tasks_cpumask_hier(cs, true, &heap);
967 968

	heap_free(&heap);
969

Paul Menage's avatar
Paul Menage committed
970
	if (is_load_balanced)
971
		rebuild_sched_domains_locked();
972
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
973 974
}

975 976 977 978 979 980 981 982
/*
 * cpuset_migrate_mm
 *
 *    Migrate memory region from one set of nodes to another.
 *
 *    Temporarilly set tasks mems_allowed to target nodes of migration,
 *    so that the migration code can allocate pages on these nodes.
 *
983
 *    Call holding cpuset_mutex, so current's cpuset won't change
984
 *    during this call, as manage_mutex holds off any cpuset_attach()
985 986
 *    calls.  Therefore we don't need to take task_lock around the
 *    call to guarantee_online_mems(), as we know no one is changing
987
 *    our task's cpuset.
988 989 990 991 992 993 994 995 996 997 998
 *
 *    While the mm_struct we are migrating is typically from some
 *    other task, the task_struct mems_allowed that we are hacking
 *    is for our current task, which must allocate new pages for that
 *    migrating memory region.
 */

static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
							const nodemask_t *to)
{
	struct task_struct *tsk = current;
999
	struct cpuset *mems_cs;
1000 1001 1002 1003 1004

	tsk->mems_allowed = *to;

	do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);

1005 1006
	mems_cs = effective_nodemask_cpuset(task_cs(tsk));
	guarantee_online_mems(mems_cs, &tsk->mems_allowed);
1007 1008
}

1009
/*
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
 * @tsk: the task to change
 * @newmems: new nodes that the task will be set
 *
 * In order to avoid seeing no nodes if the old and new nodes are disjoint,
 * we structure updates as setting all new allowed nodes, then clearing newly
 * disallowed ones.
 */
static void cpuset_change_task_nodemask(struct task_struct *tsk,
					nodemask_t *newmems)
{
1021
	bool need_loop;
1022

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
	/*
	 * Allow tasks that have access to memory reserves because they have
	 * been OOM killed to get memory anywhere.
	 */
	if (unlikely(test_thread_flag(TIF_MEMDIE)))
		return;
	if (current->flags & PF_EXITING) /* Let dying task have memory */
		return;

	task_lock(tsk);
1033 1034 1035 1036 1037 1038 1039 1040
	/*
	 * Determine if a loop is necessary if another thread is doing
	 * get_mems_allowed().  If at least one node remains unchanged and
	 * tsk does not have a mempolicy, then an empty nodemask will not be
	 * possible when mems_allowed is larger than a word.
	 */
	need_loop = task_has_mempolicy(tsk) ||
			!nodes_intersects(*newmems, tsk->mems_allowed);
1041

1042 1043
	if (need_loop)
		write_seqcount_begin(&tsk->mems_allowed_seq);
1044

1045 1046
	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
1047 1048

	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1049
	tsk->mems_allowed = *newmems;
1050 1051 1052 1053

	if (need_loop)
		write_seqcount_end(&tsk->mems_allowed_seq);

1054
	task_unlock(tsk);
1055 1056 1057 1058 1059
}

/*
 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
1060
 * memory_migrate flag is set. Called with cpuset_mutex held.
1061 1062 1063 1064
 */
static void cpuset_change_nodemask(struct task_struct *p,
				   struct cgroup_scanner *scan)
{
Li Zefan's avatar
Li Zefan committed
1065
	struct cpuset *cs = cgroup_cs(scan->cgrp);
1066 1067
	struct mm_struct *mm;
	int migrate;
1068
	nodemask_t *newmems = scan->data;
1069

1070
	cpuset_change_task_nodemask(p, newmems);
1071

1072 1073 1074 1075 1076 1077 1078 1079
	mm = get_task_mm(p);
	if (!mm)
		return;

	migrate = is_memory_migrate(cs);

	mpol_rebind_mm(mm, &cs->mems_allowed);
	if (migrate)
1080
		cpuset_migrate_mm(mm, &cs->old_mems_allowed, newmems);
1081 1082 1083
	mmput(mm);
}

1084 1085
static void *cpuset_being_rebound;

1086 1087 1088
/**
 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1089
 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1090
 *
1091
 * Called with cpuset_mutex held
1092 1093
 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
 * if @heap != NULL.
1094
 */
1095
static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
Linus Torvalds's avatar
Linus Torvalds committed
1096
{
1097
	static nodemask_t newmems;	/* protected by cpuset_mutex */
1098
	struct cgroup_scanner scan;
1099
	struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
1100

1101
	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
1102

1103
	guarantee_online_mems(mems_cs, &newmems);