cpuset.c 74.8 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6
/*
 *  kernel/cpuset.c
 *
 *  Processor and Memory placement constraints for sets of tasks.
 *
 *  Copyright (C) 2003 BULL SA.
Paul Jackson's avatar
Paul Jackson committed
7
 *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
8
 *  Copyright (C) 2006 Google, Inc
Linus Torvalds's avatar
Linus Torvalds committed
9 10 11 12
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *
13
 *  2003-10-10 Written by Simon Derr.
Linus Torvalds's avatar
Linus Torvalds committed
14
 *  2003-10-22 Updates by Stephen Hemminger.
15
 *  2004 May-July Rework by Paul Jackson.
16
 *  2006 Rework by Paul Menage to use generic cgroups
17 18
 *  2008 Rework of the scheduler domains and CPU hotplug handling
 *       by Max Krasnyansky
Linus Torvalds's avatar
Linus Torvalds committed
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpuset.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/list.h>
37
#include <linux/mempolicy.h>
Linus Torvalds's avatar
Linus Torvalds committed
38
#include <linux/mm.h>
39
#include <linux/memory.h>
40
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
41 42 43 44
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
45
#include <linux/rcupdate.h>
Linus Torvalds's avatar
Linus Torvalds committed
46 47
#include <linux/sched.h>
#include <linux/seq_file.h>
48
#include <linux/security.h>
Linus Torvalds's avatar
Linus Torvalds committed
49 50 51 52 53 54 55 56 57
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/backing-dev.h>
#include <linux/sort.h>

#include <asm/uaccess.h>
Arun Sharma's avatar
Arun Sharma committed
58
#include <linux/atomic.h>
59
#include <linux/mutex.h>
60 61
#include <linux/workqueue.h>
#include <linux/cgroup.h>
62
#include <linux/wait.h>
Linus Torvalds's avatar
Linus Torvalds committed
63

64 65 66 67 68
/*
 * Tracks how many cpusets are currently defined in system.
 * When there is only one cpuset (the root cpuset) we can
 * short circuit some hooks.
 */
69
int number_of_cpusets __read_mostly;
70

71
/* Forward declare cgroup structures */
72 73 74
struct cgroup_subsys cpuset_subsys;
struct cpuset;

75 76 77 78 79 80 81 82 83
/* See "Frequency meter" comments, below. */

struct fmeter {
	int cnt;		/* unprocessed events count */
	int val;		/* most recent output value */
	time_t time;		/* clock (secs) when val computed */
	spinlock_t lock;	/* guards read or write of above */
};

Linus Torvalds's avatar
Linus Torvalds committed
84
struct cpuset {
85 86
	struct cgroup_subsys_state css;

Linus Torvalds's avatar
Linus Torvalds committed
87
	unsigned long flags;		/* "unsigned long" so bitops work */
88
	cpumask_var_t cpus_allowed;	/* CPUs allowed to tasks in cpuset */
Linus Torvalds's avatar
Linus Torvalds committed
89 90
	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */

91 92 93 94 95 96 97 98 99 100 101 102
	/*
	 * This is old Memory Nodes tasks took on.
	 *
	 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
	 * - A new cpuset's old_mems_allowed is initialized when some
	 *   task is moved into it.
	 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
	 *   cpuset.mems_allowed and have tasks' nodemask updated, and
	 *   then old_mems_allowed is updated to mems_allowed.
	 */
	nodemask_t old_mems_allowed;

103
	struct fmeter fmeter;		/* memory_pressure filter */
Paul Jackson's avatar
Paul Jackson committed
104

105 106 107 108 109 110
	/*
	 * Tasks are being attached to this cpuset.  Used to prevent
	 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
	 */
	int attach_in_progress;

Paul Jackson's avatar
Paul Jackson committed
111 112
	/* partition number for rebuild_sched_domains() */
	int pn;
113

114 115
	/* for custom sched domain */
	int relax_domain_level;
Linus Torvalds's avatar
Linus Torvalds committed
116 117
};

118 119 120 121 122 123 124 125 126 127 128 129 130 131
/* Retrieve the cpuset for a cgroup */
static inline struct cpuset *cgroup_cs(struct cgroup *cont)
{
	return container_of(cgroup_subsys_state(cont, cpuset_subsys_id),
			    struct cpuset, css);
}

/* Retrieve the cpuset for a task */
static inline struct cpuset *task_cs(struct task_struct *task)
{
	return container_of(task_subsys_state(task, cpuset_subsys_id),
			    struct cpuset, css);
}

Tejun Heo's avatar
Tejun Heo committed
132 133 134 135 136 137 138 139 140
static inline struct cpuset *parent_cs(const struct cpuset *cs)
{
	struct cgroup *pcgrp = cs->css.cgroup->parent;

	if (pcgrp)
		return cgroup_cs(pcgrp);
	return NULL;
}

141 142 143 144 145 146 147 148 149 150 151 152 153
#ifdef CONFIG_NUMA
static inline bool task_has_mempolicy(struct task_struct *task)
{
	return task->mempolicy;
}
#else
static inline bool task_has_mempolicy(struct task_struct *task)
{
	return false;
}
#endif


Linus Torvalds's avatar
Linus Torvalds committed
154 155
/* bits in struct cpuset flags field */
typedef enum {
Tejun Heo's avatar
Tejun Heo committed
156
	CS_ONLINE,
Linus Torvalds's avatar
Linus Torvalds committed
157 158
	CS_CPU_EXCLUSIVE,
	CS_MEM_EXCLUSIVE,
159
	CS_MEM_HARDWALL,
160
	CS_MEMORY_MIGRATE,
Paul Jackson's avatar
Paul Jackson committed
161
	CS_SCHED_LOAD_BALANCE,
162 163
	CS_SPREAD_PAGE,
	CS_SPREAD_SLAB,
Linus Torvalds's avatar
Linus Torvalds committed
164 165 166
} cpuset_flagbits_t;

/* convenient tests for these bits */
Tejun Heo's avatar
Tejun Heo committed
167 168 169 170 171
static inline bool is_cpuset_online(const struct cpuset *cs)
{
	return test_bit(CS_ONLINE, &cs->flags);
}

Linus Torvalds's avatar
Linus Torvalds committed
172 173
static inline int is_cpu_exclusive(const struct cpuset *cs)
{
174
	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
Linus Torvalds's avatar
Linus Torvalds committed
175 176 177 178
}

static inline int is_mem_exclusive(const struct cpuset *cs)
{
179
	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
Linus Torvalds's avatar
Linus Torvalds committed
180 181
}

182 183 184 185 186
static inline int is_mem_hardwall(const struct cpuset *cs)
{
	return test_bit(CS_MEM_HARDWALL, &cs->flags);
}

Paul Jackson's avatar
Paul Jackson committed
187 188 189 190 191
static inline int is_sched_load_balance(const struct cpuset *cs)
{
	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
}

192 193
static inline int is_memory_migrate(const struct cpuset *cs)
{
194
	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
195 196
}

197 198 199 200 201 202 203 204 205 206
static inline int is_spread_page(const struct cpuset *cs)
{
	return test_bit(CS_SPREAD_PAGE, &cs->flags);
}

static inline int is_spread_slab(const struct cpuset *cs)
{
	return test_bit(CS_SPREAD_SLAB, &cs->flags);
}

Linus Torvalds's avatar
Linus Torvalds committed
207
static struct cpuset top_cpuset = {
Tejun Heo's avatar
Tejun Heo committed
208 209
	.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
		  (1 << CS_MEM_EXCLUSIVE)),
Linus Torvalds's avatar
Linus Torvalds committed
210 211
};

212 213 214 215 216 217 218 219 220 221 222 223 224
/**
 * cpuset_for_each_child - traverse online children of a cpuset
 * @child_cs: loop cursor pointing to the current child
 * @pos_cgrp: used for iteration
 * @parent_cs: target cpuset to walk children of
 *
 * Walk @child_cs through the online children of @parent_cs.  Must be used
 * with RCU read locked.
 */
#define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs)		\
	cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup)	\
		if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp)))))

225 226 227 228 229 230 231 232 233 234 235 236 237 238
/**
 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
 * @des_cs: loop cursor pointing to the current descendant
 * @pos_cgrp: used for iteration
 * @root_cs: target cpuset to walk ancestor of
 *
 * Walk @des_cs through the online descendants of @root_cs.  Must be used
 * with RCU read locked.  The caller may modify @pos_cgrp by calling
 * cgroup_rightmost_descendant() to skip subtree.
 */
#define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs)	\
	cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \
		if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp)))))

Linus Torvalds's avatar
Linus Torvalds committed
239
/*
240 241 242 243 244 245 246 247 248 249 250 251 252 253
 * There are two global mutexes guarding cpuset structures - cpuset_mutex
 * and callback_mutex.  The latter may nest inside the former.  We also
 * require taking task_lock() when dereferencing a task's cpuset pointer.
 * See "The task_lock() exception", at the end of this comment.
 *
 * A task must hold both mutexes to modify cpusets.  If a task holds
 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
 * is the only task able to also acquire callback_mutex and be able to
 * modify cpusets.  It can perform various checks on the cpuset structure
 * first, knowing nothing will change.  It can also allocate memory while
 * just holding cpuset_mutex.  While it is performing these checks, various
 * callback routines can briefly acquire callback_mutex to query cpusets.
 * Once it is ready to make the changes, it takes callback_mutex, blocking
 * everyone else.
254 255
 *
 * Calls to the kernel memory allocator can not be made while holding
256
 * callback_mutex, as that would risk double tripping on callback_mutex
257 258 259
 * from one of the callbacks into the cpuset code from within
 * __alloc_pages().
 *
260
 * If a task is only holding callback_mutex, then it has read-only
261 262
 * access to cpusets.
 *
263 264 265
 * Now, the task_struct fields mems_allowed and mempolicy may be changed
 * by other task, we use alloc_lock in the task_struct fields to protect
 * them.
266
 *
267
 * The cpuset_common_file_read() handlers only hold callback_mutex across
268 269 270
 * small pieces of code, such as when reading out possibly multi-word
 * cpumasks and nodemasks.
 *
271 272
 * Accessing a task's cpuset should be done in accordance with the
 * guidelines for accessing subsystem state in kernel/cgroup.c
Linus Torvalds's avatar
Linus Torvalds committed
273 274
 */

275
static DEFINE_MUTEX(cpuset_mutex);
276
static DEFINE_MUTEX(callback_mutex);
277

278 279 280 281 282 283
/*
 * CPU / memory hotplug is handled asynchronously.
 */
static void cpuset_hotplug_workfn(struct work_struct *work);
static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);

284 285
static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);

286 287
/*
 * This is ugly, but preserves the userspace API for existing cpuset
288
 * users. If someone tries to mount the "cpuset" filesystem, we
289 290
 * silently switch it to mount "cgroup" instead
 */
Al Viro's avatar
Al Viro committed
291 292
static struct dentry *cpuset_mount(struct file_system_type *fs_type,
			 int flags, const char *unused_dev_name, void *data)
Linus Torvalds's avatar
Linus Torvalds committed
293
{
294
	struct file_system_type *cgroup_fs = get_fs_type("cgroup");
Al Viro's avatar
Al Viro committed
295
	struct dentry *ret = ERR_PTR(-ENODEV);
296 297 298 299
	if (cgroup_fs) {
		char mountopts[] =
			"cpuset,noprefix,"
			"release_agent=/sbin/cpuset_release_agent";
Al Viro's avatar
Al Viro committed
300 301
		ret = cgroup_fs->mount(cgroup_fs, flags,
					   unused_dev_name, mountopts);
302 303 304
		put_filesystem(cgroup_fs);
	}
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
305 306 307 308
}

static struct file_system_type cpuset_fs_type = {
	.name = "cpuset",
Al Viro's avatar
Al Viro committed
309
	.mount = cpuset_mount,
Linus Torvalds's avatar
Linus Torvalds committed
310 311 312
};

/*
313
 * Return in pmask the portion of a cpusets's cpus_allowed that
Linus Torvalds's avatar
Linus Torvalds committed
314
 * are online.  If none are online, walk up the cpuset hierarchy
315 316
 * until we find one that does have some online cpus.  The top
 * cpuset always has some cpus online.
Linus Torvalds's avatar
Linus Torvalds committed
317 318
 *
 * One way or another, we guarantee to return some non-empty subset
319
 * of cpu_online_mask.
Linus Torvalds's avatar
Linus Torvalds committed
320
 *
321
 * Call with callback_mutex held.
Linus Torvalds's avatar
Linus Torvalds committed
322
 */
323 324
static void guarantee_online_cpus(const struct cpuset *cs,
				  struct cpumask *pmask)
Linus Torvalds's avatar
Linus Torvalds committed
325
{
326
	while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
Tejun Heo's avatar
Tejun Heo committed
327
		cs = parent_cs(cs);
328
	cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
Linus Torvalds's avatar
Linus Torvalds committed
329 330 331 332
}

/*
 * Return in *pmask the portion of a cpusets's mems_allowed that
333 334
 * are online, with memory.  If none are online with memory, walk
 * up the cpuset hierarchy until we find one that does have some
335
 * online mems.  The top cpuset always has some mems online.
Linus Torvalds's avatar
Linus Torvalds committed
336 337
 *
 * One way or another, we guarantee to return some non-empty subset
338
 * of node_states[N_MEMORY].
Linus Torvalds's avatar
Linus Torvalds committed
339
 *
340
 * Call with callback_mutex held.
Linus Torvalds's avatar
Linus Torvalds committed
341 342 343
 */
static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
{
344
	while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY]))
Tejun Heo's avatar
Tejun Heo committed
345
		cs = parent_cs(cs);
346
	nodes_and(*pmask, cs->mems_allowed, node_states[N_MEMORY]);
Linus Torvalds's avatar
Linus Torvalds committed
347 348
}

349 350 351
/*
 * update task's spread flag if cpuset's page/slab spread flag is set
 *
352
 * Called with callback_mutex/cpuset_mutex held
353 354 355 356 357 358 359 360 361 362 363 364 365 366
 */
static void cpuset_update_task_spread_flag(struct cpuset *cs,
					struct task_struct *tsk)
{
	if (is_spread_page(cs))
		tsk->flags |= PF_SPREAD_PAGE;
	else
		tsk->flags &= ~PF_SPREAD_PAGE;
	if (is_spread_slab(cs))
		tsk->flags |= PF_SPREAD_SLAB;
	else
		tsk->flags &= ~PF_SPREAD_SLAB;
}

Linus Torvalds's avatar
Linus Torvalds committed
367 368 369 370 371
/*
 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
 *
 * One cpuset is a subset of another if all its allowed CPUs and
 * Memory Nodes are a subset of the other, and its exclusive flags
372
 * are only set if the other's are set.  Call holding cpuset_mutex.
Linus Torvalds's avatar
Linus Torvalds committed
373 374 375 376
 */

static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
{
377
	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
Linus Torvalds's avatar
Linus Torvalds committed
378 379 380 381 382
		nodes_subset(p->mems_allowed, q->mems_allowed) &&
		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
		is_mem_exclusive(p) <= is_mem_exclusive(q);
}

383 384 385 386 387 388
/**
 * alloc_trial_cpuset - allocate a trial cpuset
 * @cs: the cpuset that the trial cpuset duplicates
 */
static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
{
389 390 391 392 393 394 395 396 397 398 399 400 401
	struct cpuset *trial;

	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
	if (!trial)
		return NULL;

	if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
		kfree(trial);
		return NULL;
	}
	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);

	return trial;
402 403 404 405 406 407 408 409
}

/**
 * free_trial_cpuset - free the trial cpuset
 * @trial: the trial cpuset to be freed
 */
static void free_trial_cpuset(struct cpuset *trial)
{
410
	free_cpumask_var(trial->cpus_allowed);
411 412 413
	kfree(trial);
}

Linus Torvalds's avatar
Linus Torvalds committed
414 415 416 417 418 419 420
/*
 * validate_change() - Used to validate that any proposed cpuset change
 *		       follows the structural rules for cpusets.
 *
 * If we replaced the flag and mask values of the current cpuset
 * (cur) with those values in the trial cpuset (trial), would
 * our various subset and exclusive rules still be valid?  Presumes
421
 * cpuset_mutex held.
Linus Torvalds's avatar
Linus Torvalds committed
422 423 424 425 426 427 428 429 430 431 432 433 434 435
 *
 * 'cur' is the address of an actual, in-use cpuset.  Operations
 * such as list traversal that depend on the actual address of the
 * cpuset in the list must use cur below, not trial.
 *
 * 'trial' is the address of bulk structure copy of cur, with
 * perhaps one or more of the fields cpus_allowed, mems_allowed,
 * or flags changed to new, trial values.
 *
 * Return 0 if valid, -errno if not.
 */

static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
{
436
	struct cgroup *cont;
Linus Torvalds's avatar
Linus Torvalds committed
437
	struct cpuset *c, *par;
438 439 440
	int ret;

	rcu_read_lock();
Linus Torvalds's avatar
Linus Torvalds committed
441 442

	/* Each of our child cpusets must be a subset of us */
443 444 445 446
	ret = -EBUSY;
	cpuset_for_each_child(c, cont, cur)
		if (!is_cpuset_subset(c, trial))
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
447 448

	/* Remaining checks don't apply to root cpuset */
449
	ret = 0;
450
	if (cur == &top_cpuset)
451
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
452

Tejun Heo's avatar
Tejun Heo committed
453
	par = parent_cs(cur);
454

Linus Torvalds's avatar
Linus Torvalds committed
455
	/* We must be a subset of our parent cpuset */
456
	ret = -EACCES;
Linus Torvalds's avatar
Linus Torvalds committed
457
	if (!is_cpuset_subset(trial, par))
458
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
459

460 461 462 463
	/*
	 * If either I or some sibling (!= me) is exclusive, we can't
	 * overlap
	 */
464 465
	ret = -EINVAL;
	cpuset_for_each_child(c, cont, par) {
Linus Torvalds's avatar
Linus Torvalds committed
466 467
		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
		    c != cur &&
468
		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
469
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
470 471 472
		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
		    c != cur &&
		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
473
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
474 475
	}

476 477 478 479
	/*
	 * Cpusets with tasks - existing or newly being attached - can't
	 * have empty cpus_allowed or mems_allowed.
	 */
480
	ret = -ENOSPC;
481
	if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) &&
482 483 484
	    (cpumask_empty(trial->cpus_allowed) ||
	     nodes_empty(trial->mems_allowed)))
		goto out;
485

486 487 488 489
	ret = 0;
out:
	rcu_read_unlock();
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
490 491
}

492
#ifdef CONFIG_SMP
Paul Jackson's avatar
Paul Jackson committed
493
/*
494
 * Helper routine for generate_sched_domains().
Paul Jackson's avatar
Paul Jackson committed
495 496 497 498
 * Do cpusets a, b have overlapping cpus_allowed masks?
 */
static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
{
499
	return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
Paul Jackson's avatar
Paul Jackson committed
500 501
}

502 503 504 505 506 507 508 509
static void
update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
{
	if (dattr->relax_domain_level < c->relax_domain_level)
		dattr->relax_domain_level = c->relax_domain_level;
	return;
}

510 511
static void update_domain_attr_tree(struct sched_domain_attr *dattr,
				    struct cpuset *root_cs)
512
{
513 514
	struct cpuset *cp;
	struct cgroup *pos_cgrp;
515

516 517 518 519 520
	rcu_read_lock();
	cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
		/* skip the whole subtree if @cp doesn't have any CPU */
		if (cpumask_empty(cp->cpus_allowed)) {
			pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
521
			continue;
522
		}
523 524 525 526

		if (is_sched_load_balance(cp))
			update_domain_attr(dattr, cp);
	}
527
	rcu_read_unlock();
528 529
}

Paul Jackson's avatar
Paul Jackson committed
530
/*
531 532 533 534 535 536 537 538 539
 * generate_sched_domains()
 *
 * This function builds a partial partition of the systems CPUs
 * A 'partial partition' is a set of non-overlapping subsets whose
 * union is a subset of that set.
 * The output of this function needs to be passed to kernel/sched.c
 * partition_sched_domains() routine, which will rebuild the scheduler's
 * load balancing domains (sched domains) as specified by that partial
 * partition.
Paul Jackson's avatar
Paul Jackson committed
540
 *
541
 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
Paul Jackson's avatar
Paul Jackson committed
542 543 544 545 546 547 548
 * for a background explanation of this.
 *
 * Does not return errors, on the theory that the callers of this
 * routine would rather not worry about failures to rebuild sched
 * domains when operating in the severe memory shortage situations
 * that could cause allocation failures below.
 *
549
 * Must be called with cpuset_mutex held.
Paul Jackson's avatar
Paul Jackson committed
550 551
 *
 * The three key local variables below are:
552
 *    q  - a linked-list queue of cpuset pointers, used to implement a
Paul Jackson's avatar
Paul Jackson committed
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
 *	   top-down scan of all cpusets.  This scan loads a pointer
 *	   to each cpuset marked is_sched_load_balance into the
 *	   array 'csa'.  For our purposes, rebuilding the schedulers
 *	   sched domains, we can ignore !is_sched_load_balance cpusets.
 *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
 *	   that need to be load balanced, for convenient iterative
 *	   access by the subsequent code that finds the best partition,
 *	   i.e the set of domains (subsets) of CPUs such that the
 *	   cpus_allowed of every cpuset marked is_sched_load_balance
 *	   is a subset of one of these domains, while there are as
 *	   many such domains as possible, each as small as possible.
 * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
 *	   the kernel/sched.c routine partition_sched_domains() in a
 *	   convenient format, that can be easily compared to the prior
 *	   value to determine what partition elements (sched domains)
 *	   were changed (added or removed.)
 *
 * Finding the best partition (set of domains):
 *	The triple nested loops below over i, j, k scan over the
 *	load balanced cpusets (using the array of cpuset pointers in
 *	csa[]) looking for pairs of cpusets that have overlapping
 *	cpus_allowed, but which don't have the same 'pn' partition
 *	number and gives them in the same partition number.  It keeps
 *	looping on the 'restart' label until it can no longer find
 *	any such pairs.
 *
 *	The union of the cpus_allowed masks from the set of
 *	all cpusets having the same 'pn' value then form the one
 *	element of the partition (one sched domain) to be passed to
 *	partition_sched_domains().
 */
584
static int generate_sched_domains(cpumask_var_t **domains,
585
			struct sched_domain_attr **attributes)
Paul Jackson's avatar
Paul Jackson committed
586 587 588 589 590
{
	struct cpuset *cp;	/* scans q */
	struct cpuset **csa;	/* array of all cpuset ptrs */
	int csn;		/* how many cpuset ptrs in csa so far */
	int i, j, k;		/* indices for partition finding loops */
591
	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
592
	struct sched_domain_attr *dattr;  /* attributes for custom domains */
593
	int ndoms = 0;		/* number of sched domains in result */
594
	int nslot;		/* next empty doms[] struct cpumask slot */
595
	struct cgroup *pos_cgrp;
Paul Jackson's avatar
Paul Jackson committed
596 597

	doms = NULL;
598
	dattr = NULL;
599
	csa = NULL;
Paul Jackson's avatar
Paul Jackson committed
600 601 602

	/* Special case for the 99% of systems with one, full, sched domain */
	if (is_sched_load_balance(&top_cpuset)) {
603 604
		ndoms = 1;
		doms = alloc_sched_domains(ndoms);
Paul Jackson's avatar
Paul Jackson committed
605
		if (!doms)
606 607
			goto done;

608 609 610
		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
		if (dattr) {
			*dattr = SD_ATTR_INIT;
611
			update_domain_attr_tree(dattr, &top_cpuset);
612
		}
613
		cpumask_copy(doms[0], top_cpuset.cpus_allowed);
614 615

		goto done;
Paul Jackson's avatar
Paul Jackson committed
616 617 618 619 620 621 622
	}

	csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
	if (!csa)
		goto done;
	csn = 0;

623 624
	rcu_read_lock();
	cpuset_for_each_descendant_pre(cp, pos_cgrp, &top_cpuset) {
625
		/*
626 627 628 629 630 631
		 * Continue traversing beyond @cp iff @cp has some CPUs and
		 * isn't load balancing.  The former is obvious.  The
		 * latter: All child cpusets contain a subset of the
		 * parent's cpus, so just skip them, and then we call
		 * update_domain_attr_tree() to calc relax_domain_level of
		 * the corresponding sched domain.
632
		 */
633 634
		if (!cpumask_empty(cp->cpus_allowed) &&
		    !is_sched_load_balance(cp))
635
			continue;
636

637 638 639 640 641 642 643
		if (is_sched_load_balance(cp))
			csa[csn++] = cp;

		/* skip @cp's subtree */
		pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
	}
	rcu_read_unlock();
Paul Jackson's avatar
Paul Jackson committed
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671

	for (i = 0; i < csn; i++)
		csa[i]->pn = i;
	ndoms = csn;

restart:
	/* Find the best partition (set of sched domains) */
	for (i = 0; i < csn; i++) {
		struct cpuset *a = csa[i];
		int apn = a->pn;

		for (j = 0; j < csn; j++) {
			struct cpuset *b = csa[j];
			int bpn = b->pn;

			if (apn != bpn && cpusets_overlap(a, b)) {
				for (k = 0; k < csn; k++) {
					struct cpuset *c = csa[k];

					if (c->pn == bpn)
						c->pn = apn;
				}
				ndoms--;	/* one less element */
				goto restart;
			}
		}
	}

672 673 674 675
	/*
	 * Now we know how many domains to create.
	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
	 */
676
	doms = alloc_sched_domains(ndoms);
677
	if (!doms)
678 679 680 681 682 683
		goto done;

	/*
	 * The rest of the code, including the scheduler, can deal with
	 * dattr==NULL case. No need to abort if alloc fails.
	 */
684
	dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
Paul Jackson's avatar
Paul Jackson committed
685 686 687

	for (nslot = 0, i = 0; i < csn; i++) {
		struct cpuset *a = csa[i];
688
		struct cpumask *dp;
Paul Jackson's avatar
Paul Jackson committed
689 690
		int apn = a->pn;

691 692 693 694 695
		if (apn < 0) {
			/* Skip completed partitions */
			continue;
		}

696
		dp = doms[nslot];
697 698 699 700 701 702 703 704 705 706

		if (nslot == ndoms) {
			static int warnings = 10;
			if (warnings) {
				printk(KERN_WARNING
				 "rebuild_sched_domains confused:"
				  " nslot %d, ndoms %d, csn %d, i %d,"
				  " apn %d\n",
				  nslot, ndoms, csn, i, apn);
				warnings--;
Paul Jackson's avatar
Paul Jackson committed
707
			}
708 709
			continue;
		}
Paul Jackson's avatar
Paul Jackson committed
710

711
		cpumask_clear(dp);
712 713 714 715 716 717
		if (dattr)
			*(dattr + nslot) = SD_ATTR_INIT;
		for (j = i; j < csn; j++) {
			struct cpuset *b = csa[j];

			if (apn == b->pn) {
718
				cpumask_or(dp, dp, b->cpus_allowed);
719 720 721 722 723
				if (dattr)
					update_domain_attr_tree(dattr + nslot, b);

				/* Done with this partition */
				b->pn = -1;
Paul Jackson's avatar
Paul Jackson committed
724 725
			}
		}
726
		nslot++;
Paul Jackson's avatar
Paul Jackson committed
727 728 729
	}
	BUG_ON(nslot != ndoms);

730 731 732
done:
	kfree(csa);

733 734 735 736 737 738 739
	/*
	 * Fallback to the default domain if kmalloc() failed.
	 * See comments in partition_sched_domains().
	 */
	if (doms == NULL)
		ndoms = 1;

740 741 742 743 744 745 746 747
	*domains    = doms;
	*attributes = dattr;
	return ndoms;
}

/*
 * Rebuild scheduler domains.
 *
748 749 750 751 752
 * If the flag 'sched_load_balance' of any cpuset with non-empty
 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
 * which has that flag enabled, or if any cpuset with a non-empty
 * 'cpus' is removed, then call this routine to rebuild the
 * scheduler's dynamic sched domains.
753
 *
754
 * Call with cpuset_mutex held.  Takes get_online_cpus().
755
 */
756
static void rebuild_sched_domains_locked(void)
757 758
{
	struct sched_domain_attr *attr;
759
	cpumask_var_t *doms;
760 761
	int ndoms;

762
	lockdep_assert_held(&cpuset_mutex);
763
	get_online_cpus();
764

765 766 767 768 769 770 771 772
	/*
	 * We have raced with CPU hotplug. Don't do anything to avoid
	 * passing doms with offlined cpu to partition_sched_domains().
	 * Anyways, hotplug work item will rebuild sched domains.
	 */
	if (!cpumask_equal(top_cpuset.cpus_allowed, cpu_active_mask))
		goto out;

773 774 775 776 777
	/* Generate domain masks and attrs */
	ndoms = generate_sched_domains(&doms, &attr);

	/* Have scheduler rebuild the domains */
	partition_sched_domains(ndoms, doms, attr);
778
out:
779
	put_online_cpus();
780
}
781
#else /* !CONFIG_SMP */
782
static void rebuild_sched_domains_locked(void)
783 784 785
{
}
#endif /* CONFIG_SMP */
Paul Jackson's avatar
Paul Jackson committed
786

787 788
void rebuild_sched_domains(void)
{
789
	mutex_lock(&cpuset_mutex);
790
	rebuild_sched_domains_locked();
791
	mutex_unlock(&cpuset_mutex);
Paul Jackson's avatar
Paul Jackson committed
792 793
}

794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
/*
 * effective_cpumask_cpuset - return nearest ancestor with non-empty cpus
 * @cs: the cpuset in interest
 *
 * A cpuset's effective cpumask is the cpumask of the nearest ancestor
 * with non-empty cpus. We use effective cpumask whenever:
 * - we update tasks' cpus_allowed. (they take on the ancestor's cpumask
 *   if the cpuset they reside in has no cpus)
 * - we want to retrieve task_cs(tsk)'s cpus_allowed.
 *
 * Called with cpuset_mutex held. cpuset_cpus_allowed_fallback() is an
 * exception. See comments there.
 */
static struct cpuset *effective_cpumask_cpuset(struct cpuset *cs)
{
	while (cpumask_empty(cs->cpus_allowed))
		cs = parent_cs(cs);
	return cs;
}

/*
 * effective_nodemask_cpuset - return nearest ancestor with non-empty mems
 * @cs: the cpuset in interest
 *
 * A cpuset's effective nodemask is the nodemask of the nearest ancestor
 * with non-empty memss. We use effective nodemask whenever:
 * - we update tasks' mems_allowed. (they take on the ancestor's nodemask
 *   if the cpuset they reside in has no mems)
 * - we want to retrieve task_cs(tsk)'s mems_allowed.
 *
 * Called with cpuset_mutex held.
 */
static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
{
	while (nodes_empty(cs->mems_allowed))
		cs = parent_cs(cs);
	return cs;
}

833 834 835 836 837 838 839 840 841
/**
 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
 * @tsk: task to test
 * @scan: struct cgroup_scanner containing the cgroup of the task
 *
 * Called by cgroup_scan_tasks() for each task in a cgroup whose
 * cpus_allowed mask needs to be changed.
 *
 * We don't need to re-check for the cgroup/cpuset membership, since we're
842
 * holding cpuset_mutex at this point.
843
 */
844 845
static void cpuset_change_cpumask(struct task_struct *tsk,
				  struct cgroup_scanner *scan)
846
{
847 848 849 850
	struct cpuset *cpus_cs;

	cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cg));
	set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
851 852
}

853 854 855
/**
 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
856
 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
857
 *
858
 * Called with cpuset_mutex held
859 860 861 862
 *
 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
 * calling callback functions for each.
 *
863 864
 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
 * if @heap != NULL.
865
 */
866
static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
867 868 869 870
{
	struct cgroup_scanner scan;

	scan.cg = cs->css.cgroup;
871
	scan.test_task = NULL;
872
	scan.process_task = cpuset_change_cpumask;
873 874
	scan.heap = heap;
	cgroup_scan_tasks(&scan);
875 876
}

877 878 879 880 881
/**
 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
 * @cs: the cpuset to consider
 * @buf: buffer of cpu numbers written to this cpuset
 */
882 883
static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
			  const char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
884
{
885
	struct ptr_heap heap;
886 887
	int retval;
	int is_load_balanced;
Linus Torvalds's avatar
Linus Torvalds committed
888

889
	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
890 891 892
	if (cs == &top_cpuset)
		return -EACCES;

893
	/*
894
	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
895 896 897
	 * Since cpulist_parse() fails on an empty mask, we special case
	 * that parsing.  The validate_change() call ensures that cpusets
	 * with tasks have cpus.
898
	 */
899
	if (!*buf) {
900
		cpumask_clear(trialcs->cpus_allowed);
901
	} else {
902
		retval = cpulist_parse(buf, trialcs->cpus_allowed);
903 904
		if (retval < 0)
			return retval;
905

906
		if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
907
			return -EINVAL;
908
	}
Paul Jackson's avatar
Paul Jackson committed
909

Paul Menage's avatar
Paul Menage committed
910
	/* Nothing to do if the cpus didn't change */
911
	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
Paul Menage's avatar
Paul Menage committed
912
		return 0;
913

914 915 916 917
	retval = validate_change(cs, trialcs);
	if (retval < 0)
		return retval;

918 919 920 921
	retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
	if (retval)
		return retval;

922
	is_load_balanced = is_sched_load_balance(trialcs);
Paul Jackson's avatar
Paul Jackson committed
923

924
	mutex_lock(&callback_mutex);
925
	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
926
	mutex_unlock(&callback_mutex);
Paul Jackson's avatar
Paul Jackson committed
927

Paul Menage's avatar
Paul Menage committed
928 929
	/*
	 * Scan tasks in the cpuset, and update the cpumasks of any
930
	 * that need an update.
Paul Menage's avatar
Paul Menage committed
931
	 */
932 933 934
	update_tasks_cpumask(cs, &heap);

	heap_free(&heap);
935

Paul Menage's avatar
Paul Menage committed
936
	if (is_load_balanced)
937
		rebuild_sched_domains_locked();
938
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
939 940
}

941 942 943 944 945 946 947 948
/*
 * cpuset_migrate_mm
 *
 *    Migrate memory region from one set of nodes to another.
 *
 *    Temporarilly set tasks mems_allowed to target nodes of migration,
 *    so that the migration code can allocate pages on these nodes.
 *
949
 *    Call holding cpuset_mutex, so current's cpuset won't change
950
 *    during this call, as manage_mutex holds off any cpuset_attach()
951 952
 *    calls.  Therefore we don't need to take task_lock around the
 *    call to guarantee_online_mems(), as we know no one is changing
953
 *    our task's cpuset.
954 955 956 957 958 959 960 961 962 963 964
 *
 *    While the mm_struct we are migrating is typically from some
 *    other task, the task_struct mems_allowed that we are hacking
 *    is for our current task, which must allocate new pages for that
 *    migrating memory region.
 */

static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
							const nodemask_t *to)
{
	struct task_struct *tsk = current;
965
	struct cpuset *mems_cs;
966 967 968 969 970

	tsk->mems_allowed = *to;

	do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);

971 972
	mems_cs = effective_nodemask_cpuset(task_cs(tsk));
	guarantee_online_mems(mems_cs, &tsk->mems_allowed);
973 974
}

975
/*
976 977 978 979 980 981 982 983 984 985 986
 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
 * @tsk: the task to change
 * @newmems: new nodes that the task will be set
 *
 * In order to avoid seeing no nodes if the old and new nodes are disjoint,
 * we structure updates as setting all new allowed nodes, then clearing newly
 * disallowed ones.
 */
static void cpuset_change_task_nodemask(struct task_struct *tsk,
					nodemask_t *newmems)
{
987
	bool need_loop;
988

989 990 991 992 993 994 995 996 997 998
	/*
	 * Allow tasks that have access to memory reserves because they have
	 * been OOM killed to get memory anywhere.
	 */
	if (unlikely(test_thread_flag(TIF_MEMDIE)))
		return;
	if (current->flags & PF_EXITING) /* Let dying task have memory */
		return;

	task_lock(tsk);
999 1000 1001 1002 1003 1004 1005 1006
	/*
	 * Determine if a loop is necessary if another thread is doing
	 * get_mems_allowed().  If at least one node remains unchanged and
	 * tsk does not have a mempolicy, then an empty nodemask will not be
	 * possible when mems_allowed is larger than a word.
	 */
	need_loop = task_has_mempolicy(tsk) ||
			!nodes_intersects(*newmems, tsk->mems_allowed);
1007

1008 1009
	if (need_loop)
		write_seqcount_begin(&tsk->mems_allowed_seq);
1010

1011 1012
	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
1013 1014

	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1015
	tsk->mems_allowed = *newmems;
1016 1017 1018 1019

	if (need_loop)
		write_seqcount_end(&tsk->mems_allowed_seq);

1020
	task_unlock(tsk);
1021 1022 1023 1024 1025
}

/*
 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
1026
 * memory_migrate flag is set. Called with cpuset_mutex held.
1027 1028 1029 1030
 */
static void cpuset_change_nodemask(struct task_struct *p,
				   struct cgroup_scanner *scan)
{
1031
	struct cpuset *cs = cgroup_cs(scan->cg);
1032 1033
	struct mm_struct *mm;
	int migrate;
1034
	nodemask_t *newmems = scan->data;
1035

1036
	cpuset_change_task_nodemask(p, newmems);
1037

1038 1039 1040 1041 1042 1043 1044 1045
	mm = get_task_mm(p);
	if (!mm)
		return;

	migrate = is_memory_migrate(cs);

	mpol_rebind_mm(mm, &cs->mems_allowed);
	if (migrate)
1046
		cpuset_migrate_mm(mm, &cs->old_mems_allowed, newmems);
1047 1048 1049
	mmput(mm);
}

1050 1051
static void *cpuset_being_rebound;

1052 1053 1054
/**
 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1055
 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1056
 *
1057
 * Called with cpuset_mutex held
1058 1059
 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
 * if @heap != NULL.
1060
 */
1061
static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
Linus Torvalds's avatar
Linus Torvalds committed
1062
{
1063
	static nodemask_t newmems;	/* protected by cpuset_mutex */
1064
	struct cgroup_scanner scan;
1065
	struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
1066

1067
	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
1068

1069
	guarantee_online_mems(mems_cs, &newmems);
1070

1071 1072 1073
	scan.cg = cs->css.cgroup;
	scan.test_task = NULL;
	scan.process_task = cpuset_change_nodemask;
1074
	scan.heap = heap;
1075
	scan.data = &newmems;
1076 1077

	/*
1078 1079 1080 1081
	 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
	 * take while holding tasklist_lock.  Forks can happen - the
	 * mpol_dup() cpuset_being_rebound check will catch such forks,
	 * and rebind their vma mempolicies too.  Because we still hold
1082
	 * the global cpuset_mutex, we know that no other rebind effort
1083
	 * will be contending for the global variable cpuset_being_rebound.
1084
	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1085
	 * is idempotent.  Also migrate pages in each mm to new nodes.
1086
	 */
1087
	cgroup_scan_tasks(&scan);
1088

1089 1090 1091 1092 1093 1094
	/*
	 * All the tasks' nodemasks have been updated, update
	 * cs->old_mems_allowed.
	 */
	cs->old_mems_allowed = newmems;

1095
	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
1096
	cpuset_being_rebound = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
1097 1098
}

1099 1100 1101
/*
 * Handle user request to change the 'mems' memory placement
 * of a cpuset.  Needs to validate the request, update the
1102 1103 1104 1105
 * cpusets mems_allowed, and for each task in the cpuset,
 * update mems_allowed and rebind task's mempolicy and any vma
 * mempolicies and if the cpuset is marked 'memory_migrate',
 * migrate the tasks pages to the new memory.
1106
 *
1107
 * Call with cpuset_mutex held.  May take callback_mutex during call.
1108 1109 1110 1111
 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
 * their mempolicies to the cpusets new mems_allowed.
 */