cpuset.c 77.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6
/*
 *  kernel/cpuset.c
 *
 *  Processor and Memory placement constraints for sets of tasks.
 *
 *  Copyright (C) 2003 BULL SA.
Paul Jackson's avatar
Paul Jackson committed
7
 *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
8
 *  Copyright (C) 2006 Google, Inc
Linus Torvalds's avatar
Linus Torvalds committed
9 10 11 12
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *
13
 *  2003-10-10 Written by Simon Derr.
Linus Torvalds's avatar
Linus Torvalds committed
14
 *  2003-10-22 Updates by Stephen Hemminger.
15
 *  2004 May-July Rework by Paul Jackson.
16
 *  2006 Rework by Paul Menage to use generic cgroups
17 18
 *  2008 Rework of the scheduler domains and CPU hotplug handling
 *       by Max Krasnyansky
Linus Torvalds's avatar
Linus Torvalds committed
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpuset.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/list.h>
37
#include <linux/mempolicy.h>
Linus Torvalds's avatar
Linus Torvalds committed
38
#include <linux/mm.h>
39
#include <linux/memory.h>
40
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
41 42 43 44
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
45
#include <linux/rcupdate.h>
Linus Torvalds's avatar
Linus Torvalds committed
46 47
#include <linux/sched.h>
#include <linux/seq_file.h>
48
#include <linux/security.h>
Linus Torvalds's avatar
Linus Torvalds committed
49 50 51 52 53 54 55 56 57
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/backing-dev.h>
#include <linux/sort.h>

#include <asm/uaccess.h>
Arun Sharma's avatar
Arun Sharma committed
58
#include <linux/atomic.h>
59
#include <linux/mutex.h>
60 61
#include <linux/workqueue.h>
#include <linux/cgroup.h>
62
#include <linux/wait.h>
Linus Torvalds's avatar
Linus Torvalds committed
63

64 65 66 67 68
/*
 * Tracks how many cpusets are currently defined in system.
 * When there is only one cpuset (the root cpuset) we can
 * short circuit some hooks.
 */
69
int number_of_cpusets __read_mostly;
70

71
/* Forward declare cgroup structures */
72 73
struct cgroup_subsys cpuset_subsys;

74 75 76 77 78 79 80 81 82
/* See "Frequency meter" comments, below. */

struct fmeter {
	int cnt;		/* unprocessed events count */
	int val;		/* most recent output value */
	time_t time;		/* clock (secs) when val computed */
	spinlock_t lock;	/* guards read or write of above */
};

Linus Torvalds's avatar
Linus Torvalds committed
83
struct cpuset {
84 85
	struct cgroup_subsys_state css;

Linus Torvalds's avatar
Linus Torvalds committed
86
	unsigned long flags;		/* "unsigned long" so bitops work */
87
	cpumask_var_t cpus_allowed;	/* CPUs allowed to tasks in cpuset */
Linus Torvalds's avatar
Linus Torvalds committed
88 89
	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */

90 91 92 93 94 95 96 97 98 99 100 101
	/*
	 * This is old Memory Nodes tasks took on.
	 *
	 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
	 * - A new cpuset's old_mems_allowed is initialized when some
	 *   task is moved into it.
	 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
	 *   cpuset.mems_allowed and have tasks' nodemask updated, and
	 *   then old_mems_allowed is updated to mems_allowed.
	 */
	nodemask_t old_mems_allowed;

102
	struct fmeter fmeter;		/* memory_pressure filter */
Paul Jackson's avatar
Paul Jackson committed
103

104 105 106 107 108 109
	/*
	 * Tasks are being attached to this cpuset.  Used to prevent
	 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
	 */
	int attach_in_progress;

Paul Jackson's avatar
Paul Jackson committed
110 111
	/* partition number for rebuild_sched_domains() */
	int pn;
112

113 114
	/* for custom sched domain */
	int relax_domain_level;
Linus Torvalds's avatar
Linus Torvalds committed
115 116
};

117 118 119 120 121
static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
{
	return css ? container_of(css, struct cpuset, css) : NULL;
}

122
/* Retrieve the cpuset for a cgroup */
Li Zefan's avatar
Li Zefan committed
123
static inline struct cpuset *cgroup_cs(struct cgroup *cgrp)
124
{
125
	return css_cs(cgroup_css(cgrp, cpuset_subsys_id));
126 127 128 129 130
}

/* Retrieve the cpuset for a task */
static inline struct cpuset *task_cs(struct task_struct *task)
{
131
	return css_cs(task_css(task, cpuset_subsys_id));
132 133
}

134
static inline struct cpuset *parent_cs(struct cpuset *cs)
Tejun Heo's avatar
Tejun Heo committed
135
{
Tejun Heo's avatar
Tejun Heo committed
136
	return css_cs(css_parent(&cs->css));
Tejun Heo's avatar
Tejun Heo committed
137 138
}

139 140 141 142 143 144 145 146 147 148 149 150 151
#ifdef CONFIG_NUMA
static inline bool task_has_mempolicy(struct task_struct *task)
{
	return task->mempolicy;
}
#else
static inline bool task_has_mempolicy(struct task_struct *task)
{
	return false;
}
#endif


Linus Torvalds's avatar
Linus Torvalds committed
152 153
/* bits in struct cpuset flags field */
typedef enum {
Tejun Heo's avatar
Tejun Heo committed
154
	CS_ONLINE,
Linus Torvalds's avatar
Linus Torvalds committed
155 156
	CS_CPU_EXCLUSIVE,
	CS_MEM_EXCLUSIVE,
157
	CS_MEM_HARDWALL,
158
	CS_MEMORY_MIGRATE,
Paul Jackson's avatar
Paul Jackson committed
159
	CS_SCHED_LOAD_BALANCE,
160 161
	CS_SPREAD_PAGE,
	CS_SPREAD_SLAB,
Linus Torvalds's avatar
Linus Torvalds committed
162 163 164
} cpuset_flagbits_t;

/* convenient tests for these bits */
Tejun Heo's avatar
Tejun Heo committed
165 166 167 168 169
static inline bool is_cpuset_online(const struct cpuset *cs)
{
	return test_bit(CS_ONLINE, &cs->flags);
}

Linus Torvalds's avatar
Linus Torvalds committed
170 171
static inline int is_cpu_exclusive(const struct cpuset *cs)
{
172
	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
Linus Torvalds's avatar
Linus Torvalds committed
173 174 175 176
}

static inline int is_mem_exclusive(const struct cpuset *cs)
{
177
	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
Linus Torvalds's avatar
Linus Torvalds committed
178 179
}

180 181 182 183 184
static inline int is_mem_hardwall(const struct cpuset *cs)
{
	return test_bit(CS_MEM_HARDWALL, &cs->flags);
}

Paul Jackson's avatar
Paul Jackson committed
185 186 187 188 189
static inline int is_sched_load_balance(const struct cpuset *cs)
{
	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
}

190 191
static inline int is_memory_migrate(const struct cpuset *cs)
{
192
	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
193 194
}

195 196 197 198 199 200 201 202 203 204
static inline int is_spread_page(const struct cpuset *cs)
{
	return test_bit(CS_SPREAD_PAGE, &cs->flags);
}

static inline int is_spread_slab(const struct cpuset *cs)
{
	return test_bit(CS_SPREAD_SLAB, &cs->flags);
}

Linus Torvalds's avatar
Linus Torvalds committed
205
static struct cpuset top_cpuset = {
Tejun Heo's avatar
Tejun Heo committed
206 207
	.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
		  (1 << CS_MEM_EXCLUSIVE)),
Linus Torvalds's avatar
Linus Torvalds committed
208 209
};

210 211 212 213 214 215 216 217 218 219 220 221 222
/**
 * cpuset_for_each_child - traverse online children of a cpuset
 * @child_cs: loop cursor pointing to the current child
 * @pos_cgrp: used for iteration
 * @parent_cs: target cpuset to walk children of
 *
 * Walk @child_cs through the online children of @parent_cs.  Must be used
 * with RCU read locked.
 */
#define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs)		\
	cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup)	\
		if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp)))))

223 224 225 226 227 228 229 230 231 232 233 234 235 236
/**
 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
 * @des_cs: loop cursor pointing to the current descendant
 * @pos_cgrp: used for iteration
 * @root_cs: target cpuset to walk ancestor of
 *
 * Walk @des_cs through the online descendants of @root_cs.  Must be used
 * with RCU read locked.  The caller may modify @pos_cgrp by calling
 * cgroup_rightmost_descendant() to skip subtree.
 */
#define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs)	\
	cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \
		if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp)))))

Linus Torvalds's avatar
Linus Torvalds committed
237
/*
238 239 240 241 242 243 244 245 246 247 248 249 250 251
 * There are two global mutexes guarding cpuset structures - cpuset_mutex
 * and callback_mutex.  The latter may nest inside the former.  We also
 * require taking task_lock() when dereferencing a task's cpuset pointer.
 * See "The task_lock() exception", at the end of this comment.
 *
 * A task must hold both mutexes to modify cpusets.  If a task holds
 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
 * is the only task able to also acquire callback_mutex and be able to
 * modify cpusets.  It can perform various checks on the cpuset structure
 * first, knowing nothing will change.  It can also allocate memory while
 * just holding cpuset_mutex.  While it is performing these checks, various
 * callback routines can briefly acquire callback_mutex to query cpusets.
 * Once it is ready to make the changes, it takes callback_mutex, blocking
 * everyone else.
252 253
 *
 * Calls to the kernel memory allocator can not be made while holding
254
 * callback_mutex, as that would risk double tripping on callback_mutex
255 256 257
 * from one of the callbacks into the cpuset code from within
 * __alloc_pages().
 *
258
 * If a task is only holding callback_mutex, then it has read-only
259 260
 * access to cpusets.
 *
261 262 263
 * Now, the task_struct fields mems_allowed and mempolicy may be changed
 * by other task, we use alloc_lock in the task_struct fields to protect
 * them.
264
 *
265
 * The cpuset_common_file_read() handlers only hold callback_mutex across
266 267 268
 * small pieces of code, such as when reading out possibly multi-word
 * cpumasks and nodemasks.
 *
269 270
 * Accessing a task's cpuset should be done in accordance with the
 * guidelines for accessing subsystem state in kernel/cgroup.c
Linus Torvalds's avatar
Linus Torvalds committed
271 272
 */

273
static DEFINE_MUTEX(cpuset_mutex);
274
static DEFINE_MUTEX(callback_mutex);
275

276 277 278 279 280 281
/*
 * CPU / memory hotplug is handled asynchronously.
 */
static void cpuset_hotplug_workfn(struct work_struct *work);
static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);

282 283
static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);

284 285
/*
 * This is ugly, but preserves the userspace API for existing cpuset
286
 * users. If someone tries to mount the "cpuset" filesystem, we
287 288
 * silently switch it to mount "cgroup" instead
 */
Al Viro's avatar
Al Viro committed
289 290
static struct dentry *cpuset_mount(struct file_system_type *fs_type,
			 int flags, const char *unused_dev_name, void *data)
Linus Torvalds's avatar
Linus Torvalds committed
291
{
292
	struct file_system_type *cgroup_fs = get_fs_type("cgroup");
Al Viro's avatar
Al Viro committed
293
	struct dentry *ret = ERR_PTR(-ENODEV);
294 295 296 297
	if (cgroup_fs) {
		char mountopts[] =
			"cpuset,noprefix,"
			"release_agent=/sbin/cpuset_release_agent";
Al Viro's avatar
Al Viro committed
298 299
		ret = cgroup_fs->mount(cgroup_fs, flags,
					   unused_dev_name, mountopts);
300 301 302
		put_filesystem(cgroup_fs);
	}
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
303 304 305 306
}

static struct file_system_type cpuset_fs_type = {
	.name = "cpuset",
Al Viro's avatar
Al Viro committed
307
	.mount = cpuset_mount,
Linus Torvalds's avatar
Linus Torvalds committed
308 309 310
};

/*
311
 * Return in pmask the portion of a cpusets's cpus_allowed that
Linus Torvalds's avatar
Linus Torvalds committed
312
 * are online.  If none are online, walk up the cpuset hierarchy
313 314
 * until we find one that does have some online cpus.  The top
 * cpuset always has some cpus online.
Linus Torvalds's avatar
Linus Torvalds committed
315 316
 *
 * One way or another, we guarantee to return some non-empty subset
317
 * of cpu_online_mask.
Linus Torvalds's avatar
Linus Torvalds committed
318
 *
319
 * Call with callback_mutex held.
Linus Torvalds's avatar
Linus Torvalds committed
320
 */
321
static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
Linus Torvalds's avatar
Linus Torvalds committed
322
{
323
	while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
Tejun Heo's avatar
Tejun Heo committed
324
		cs = parent_cs(cs);
325
	cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
Linus Torvalds's avatar
Linus Torvalds committed
326 327 328 329
}

/*
 * Return in *pmask the portion of a cpusets's mems_allowed that
330 331
 * are online, with memory.  If none are online with memory, walk
 * up the cpuset hierarchy until we find one that does have some
332
 * online mems.  The top cpuset always has some mems online.
Linus Torvalds's avatar
Linus Torvalds committed
333 334
 *
 * One way or another, we guarantee to return some non-empty subset
335
 * of node_states[N_MEMORY].
Linus Torvalds's avatar
Linus Torvalds committed
336
 *
337
 * Call with callback_mutex held.
Linus Torvalds's avatar
Linus Torvalds committed
338
 */
339
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
Linus Torvalds's avatar
Linus Torvalds committed
340
{
341
	while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY]))
Tejun Heo's avatar
Tejun Heo committed
342
		cs = parent_cs(cs);
343
	nodes_and(*pmask, cs->mems_allowed, node_states[N_MEMORY]);
Linus Torvalds's avatar
Linus Torvalds committed
344 345
}

346 347 348
/*
 * update task's spread flag if cpuset's page/slab spread flag is set
 *
349
 * Called with callback_mutex/cpuset_mutex held
350 351 352 353 354 355 356 357 358 359 360 361 362 363
 */
static void cpuset_update_task_spread_flag(struct cpuset *cs,
					struct task_struct *tsk)
{
	if (is_spread_page(cs))
		tsk->flags |= PF_SPREAD_PAGE;
	else
		tsk->flags &= ~PF_SPREAD_PAGE;
	if (is_spread_slab(cs))
		tsk->flags |= PF_SPREAD_SLAB;
	else
		tsk->flags &= ~PF_SPREAD_SLAB;
}

Linus Torvalds's avatar
Linus Torvalds committed
364 365 366 367 368
/*
 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
 *
 * One cpuset is a subset of another if all its allowed CPUs and
 * Memory Nodes are a subset of the other, and its exclusive flags
369
 * are only set if the other's are set.  Call holding cpuset_mutex.
Linus Torvalds's avatar
Linus Torvalds committed
370 371 372 373
 */

static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
{
374
	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
Linus Torvalds's avatar
Linus Torvalds committed
375 376 377 378 379
		nodes_subset(p->mems_allowed, q->mems_allowed) &&
		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
		is_mem_exclusive(p) <= is_mem_exclusive(q);
}

380 381 382 383
/**
 * alloc_trial_cpuset - allocate a trial cpuset
 * @cs: the cpuset that the trial cpuset duplicates
 */
384
static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
385
{
386 387 388 389 390 391 392 393 394 395 396 397 398
	struct cpuset *trial;

	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
	if (!trial)
		return NULL;

	if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
		kfree(trial);
		return NULL;
	}
	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);

	return trial;
399 400 401 402 403 404 405 406
}

/**
 * free_trial_cpuset - free the trial cpuset
 * @trial: the trial cpuset to be freed
 */
static void free_trial_cpuset(struct cpuset *trial)
{
407
	free_cpumask_var(trial->cpus_allowed);
408 409 410
	kfree(trial);
}

Linus Torvalds's avatar
Linus Torvalds committed
411 412 413 414 415 416 417
/*
 * validate_change() - Used to validate that any proposed cpuset change
 *		       follows the structural rules for cpusets.
 *
 * If we replaced the flag and mask values of the current cpuset
 * (cur) with those values in the trial cpuset (trial), would
 * our various subset and exclusive rules still be valid?  Presumes
418
 * cpuset_mutex held.
Linus Torvalds's avatar
Linus Torvalds committed
419 420 421 422 423 424 425 426 427 428 429 430
 *
 * 'cur' is the address of an actual, in-use cpuset.  Operations
 * such as list traversal that depend on the actual address of the
 * cpuset in the list must use cur below, not trial.
 *
 * 'trial' is the address of bulk structure copy of cur, with
 * perhaps one or more of the fields cpus_allowed, mems_allowed,
 * or flags changed to new, trial values.
 *
 * Return 0 if valid, -errno if not.
 */

431
static int validate_change(struct cpuset *cur, struct cpuset *trial)
Linus Torvalds's avatar
Linus Torvalds committed
432
{
Li Zefan's avatar
Li Zefan committed
433
	struct cgroup *cgrp;
Linus Torvalds's avatar
Linus Torvalds committed
434
	struct cpuset *c, *par;
435 436 437
	int ret;

	rcu_read_lock();
Linus Torvalds's avatar
Linus Torvalds committed
438 439

	/* Each of our child cpusets must be a subset of us */
440
	ret = -EBUSY;
Li Zefan's avatar
Li Zefan committed
441
	cpuset_for_each_child(c, cgrp, cur)
442 443
		if (!is_cpuset_subset(c, trial))
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
444 445

	/* Remaining checks don't apply to root cpuset */
446
	ret = 0;
447
	if (cur == &top_cpuset)
448
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
449

Tejun Heo's avatar
Tejun Heo committed
450
	par = parent_cs(cur);
451

Linus Torvalds's avatar
Linus Torvalds committed
452
	/* We must be a subset of our parent cpuset */
453
	ret = -EACCES;
Linus Torvalds's avatar
Linus Torvalds committed
454
	if (!is_cpuset_subset(trial, par))
455
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
456

457 458 459 460
	/*
	 * If either I or some sibling (!= me) is exclusive, we can't
	 * overlap
	 */
461
	ret = -EINVAL;
Li Zefan's avatar
Li Zefan committed
462
	cpuset_for_each_child(c, cgrp, par) {
Linus Torvalds's avatar
Linus Torvalds committed
463 464
		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
		    c != cur &&
465
		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
466
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
467 468 469
		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
		    c != cur &&
		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
470
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
471 472
	}

473 474 475 476
	/*
	 * Cpusets with tasks - existing or newly being attached - can't
	 * have empty cpus_allowed or mems_allowed.
	 */
477
	ret = -ENOSPC;
478
	if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) &&
479
	    (cpumask_empty(trial->cpus_allowed) &&
480 481
	     nodes_empty(trial->mems_allowed)))
		goto out;
482

483 484 485 486
	ret = 0;
out:
	rcu_read_unlock();
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
487 488
}

489
#ifdef CONFIG_SMP
Paul Jackson's avatar
Paul Jackson committed
490
/*
491
 * Helper routine for generate_sched_domains().
Paul Jackson's avatar
Paul Jackson committed
492 493 494 495
 * Do cpusets a, b have overlapping cpus_allowed masks?
 */
static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
{
496
	return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
Paul Jackson's avatar
Paul Jackson committed
497 498
}

499 500 501 502 503 504 505 506
static void
update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
{
	if (dattr->relax_domain_level < c->relax_domain_level)
		dattr->relax_domain_level = c->relax_domain_level;
	return;
}

507 508
static void update_domain_attr_tree(struct sched_domain_attr *dattr,
				    struct cpuset *root_cs)
509
{
510 511
	struct cpuset *cp;
	struct cgroup *pos_cgrp;
512

513 514 515 516 517
	rcu_read_lock();
	cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
		/* skip the whole subtree if @cp doesn't have any CPU */
		if (cpumask_empty(cp->cpus_allowed)) {
			pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
518
			continue;
519
		}
520 521 522 523

		if (is_sched_load_balance(cp))
			update_domain_attr(dattr, cp);
	}
524
	rcu_read_unlock();
525 526
}

Paul Jackson's avatar
Paul Jackson committed
527
/*
528 529 530 531 532
 * generate_sched_domains()
 *
 * This function builds a partial partition of the systems CPUs
 * A 'partial partition' is a set of non-overlapping subsets whose
 * union is a subset of that set.
533
 * The output of this function needs to be passed to kernel/sched/core.c
534 535 536
 * partition_sched_domains() routine, which will rebuild the scheduler's
 * load balancing domains (sched domains) as specified by that partial
 * partition.
Paul Jackson's avatar
Paul Jackson committed
537
 *
538
 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
Paul Jackson's avatar
Paul Jackson committed
539 540 541 542 543 544 545
 * for a background explanation of this.
 *
 * Does not return errors, on the theory that the callers of this
 * routine would rather not worry about failures to rebuild sched
 * domains when operating in the severe memory shortage situations
 * that could cause allocation failures below.
 *
546
 * Must be called with cpuset_mutex held.
Paul Jackson's avatar
Paul Jackson committed
547 548
 *
 * The three key local variables below are:
549
 *    q  - a linked-list queue of cpuset pointers, used to implement a
Paul Jackson's avatar
Paul Jackson committed
550 551 552 553 554 555 556 557 558 559 560 561
 *	   top-down scan of all cpusets.  This scan loads a pointer
 *	   to each cpuset marked is_sched_load_balance into the
 *	   array 'csa'.  For our purposes, rebuilding the schedulers
 *	   sched domains, we can ignore !is_sched_load_balance cpusets.
 *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
 *	   that need to be load balanced, for convenient iterative
 *	   access by the subsequent code that finds the best partition,
 *	   i.e the set of domains (subsets) of CPUs such that the
 *	   cpus_allowed of every cpuset marked is_sched_load_balance
 *	   is a subset of one of these domains, while there are as
 *	   many such domains as possible, each as small as possible.
 * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
562
 *	   the kernel/sched/core.c routine partition_sched_domains() in a
Paul Jackson's avatar
Paul Jackson committed
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
 *	   convenient format, that can be easily compared to the prior
 *	   value to determine what partition elements (sched domains)
 *	   were changed (added or removed.)
 *
 * Finding the best partition (set of domains):
 *	The triple nested loops below over i, j, k scan over the
 *	load balanced cpusets (using the array of cpuset pointers in
 *	csa[]) looking for pairs of cpusets that have overlapping
 *	cpus_allowed, but which don't have the same 'pn' partition
 *	number and gives them in the same partition number.  It keeps
 *	looping on the 'restart' label until it can no longer find
 *	any such pairs.
 *
 *	The union of the cpus_allowed masks from the set of
 *	all cpusets having the same 'pn' value then form the one
 *	element of the partition (one sched domain) to be passed to
 *	partition_sched_domains().
 */
581
static int generate_sched_domains(cpumask_var_t **domains,
582
			struct sched_domain_attr **attributes)
Paul Jackson's avatar
Paul Jackson committed
583 584 585 586 587
{
	struct cpuset *cp;	/* scans q */
	struct cpuset **csa;	/* array of all cpuset ptrs */
	int csn;		/* how many cpuset ptrs in csa so far */
	int i, j, k;		/* indices for partition finding loops */
588
	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
589
	struct sched_domain_attr *dattr;  /* attributes for custom domains */
590
	int ndoms = 0;		/* number of sched domains in result */
591
	int nslot;		/* next empty doms[] struct cpumask slot */
592
	struct cgroup *pos_cgrp;
Paul Jackson's avatar
Paul Jackson committed
593 594

	doms = NULL;
595
	dattr = NULL;
596
	csa = NULL;
Paul Jackson's avatar
Paul Jackson committed
597 598 599

	/* Special case for the 99% of systems with one, full, sched domain */
	if (is_sched_load_balance(&top_cpuset)) {
600 601
		ndoms = 1;
		doms = alloc_sched_domains(ndoms);
Paul Jackson's avatar
Paul Jackson committed
602
		if (!doms)
603 604
			goto done;

605 606 607
		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
		if (dattr) {
			*dattr = SD_ATTR_INIT;
608
			update_domain_attr_tree(dattr, &top_cpuset);
609
		}
610
		cpumask_copy(doms[0], top_cpuset.cpus_allowed);
611 612

		goto done;
Paul Jackson's avatar
Paul Jackson committed
613 614 615 616 617 618 619
	}

	csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
	if (!csa)
		goto done;
	csn = 0;

620 621
	rcu_read_lock();
	cpuset_for_each_descendant_pre(cp, pos_cgrp, &top_cpuset) {
622
		/*
623 624 625 626 627 628
		 * Continue traversing beyond @cp iff @cp has some CPUs and
		 * isn't load balancing.  The former is obvious.  The
		 * latter: All child cpusets contain a subset of the
		 * parent's cpus, so just skip them, and then we call
		 * update_domain_attr_tree() to calc relax_domain_level of
		 * the corresponding sched domain.
629
		 */
630 631
		if (!cpumask_empty(cp->cpus_allowed) &&
		    !is_sched_load_balance(cp))
632
			continue;
633

634 635 636 637 638 639 640
		if (is_sched_load_balance(cp))
			csa[csn++] = cp;

		/* skip @cp's subtree */
		pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
	}
	rcu_read_unlock();
Paul Jackson's avatar
Paul Jackson committed
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668

	for (i = 0; i < csn; i++)
		csa[i]->pn = i;
	ndoms = csn;

restart:
	/* Find the best partition (set of sched domains) */
	for (i = 0; i < csn; i++) {
		struct cpuset *a = csa[i];
		int apn = a->pn;

		for (j = 0; j < csn; j++) {
			struct cpuset *b = csa[j];
			int bpn = b->pn;

			if (apn != bpn && cpusets_overlap(a, b)) {
				for (k = 0; k < csn; k++) {
					struct cpuset *c = csa[k];

					if (c->pn == bpn)
						c->pn = apn;
				}
				ndoms--;	/* one less element */
				goto restart;
			}
		}
	}

669 670 671 672
	/*
	 * Now we know how many domains to create.
	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
	 */
673
	doms = alloc_sched_domains(ndoms);
674
	if (!doms)
675 676 677 678 679 680
		goto done;

	/*
	 * The rest of the code, including the scheduler, can deal with
	 * dattr==NULL case. No need to abort if alloc fails.
	 */
681
	dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
Paul Jackson's avatar
Paul Jackson committed
682 683 684

	for (nslot = 0, i = 0; i < csn; i++) {
		struct cpuset *a = csa[i];
685
		struct cpumask *dp;
Paul Jackson's avatar
Paul Jackson committed
686 687
		int apn = a->pn;

688 689 690 691 692
		if (apn < 0) {
			/* Skip completed partitions */
			continue;
		}

693
		dp = doms[nslot];
694 695 696 697 698 699 700 701 702 703

		if (nslot == ndoms) {
			static int warnings = 10;
			if (warnings) {
				printk(KERN_WARNING
				 "rebuild_sched_domains confused:"
				  " nslot %d, ndoms %d, csn %d, i %d,"
				  " apn %d\n",
				  nslot, ndoms, csn, i, apn);
				warnings--;
Paul Jackson's avatar
Paul Jackson committed
704
			}
705 706
			continue;
		}
Paul Jackson's avatar
Paul Jackson committed
707

708
		cpumask_clear(dp);
709 710 711 712 713 714
		if (dattr)
			*(dattr + nslot) = SD_ATTR_INIT;
		for (j = i; j < csn; j++) {
			struct cpuset *b = csa[j];

			if (apn == b->pn) {
715
				cpumask_or(dp, dp, b->cpus_allowed);
716 717 718 719 720
				if (dattr)
					update_domain_attr_tree(dattr + nslot, b);

				/* Done with this partition */
				b->pn = -1;
Paul Jackson's avatar
Paul Jackson committed
721 722
			}
		}
723
		nslot++;
Paul Jackson's avatar
Paul Jackson committed
724 725 726
	}
	BUG_ON(nslot != ndoms);

727 728 729
done:
	kfree(csa);

730 731 732 733 734 735 736
	/*
	 * Fallback to the default domain if kmalloc() failed.
	 * See comments in partition_sched_domains().
	 */
	if (doms == NULL)
		ndoms = 1;

737 738 739 740 741 742 743 744
	*domains    = doms;
	*attributes = dattr;
	return ndoms;
}

/*
 * Rebuild scheduler domains.
 *
745 746 747 748 749
 * If the flag 'sched_load_balance' of any cpuset with non-empty
 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
 * which has that flag enabled, or if any cpuset with a non-empty
 * 'cpus' is removed, then call this routine to rebuild the
 * scheduler's dynamic sched domains.
750
 *
751
 * Call with cpuset_mutex held.  Takes get_online_cpus().
752
 */
753
static void rebuild_sched_domains_locked(void)
754 755
{
	struct sched_domain_attr *attr;
756
	cpumask_var_t *doms;
757 758
	int ndoms;

759
	lockdep_assert_held(&cpuset_mutex);
760
	get_online_cpus();
761

762 763 764 765 766 767 768 769
	/*
	 * We have raced with CPU hotplug. Don't do anything to avoid
	 * passing doms with offlined cpu to partition_sched_domains().
	 * Anyways, hotplug work item will rebuild sched domains.
	 */
	if (!cpumask_equal(top_cpuset.cpus_allowed, cpu_active_mask))
		goto out;

770 771 772 773 774
	/* Generate domain masks and attrs */
	ndoms = generate_sched_domains(&doms, &attr);

	/* Have scheduler rebuild the domains */
	partition_sched_domains(ndoms, doms, attr);
775
out:
776
	put_online_cpus();
777
}
778
#else /* !CONFIG_SMP */
779
static void rebuild_sched_domains_locked(void)
780 781 782
{
}
#endif /* CONFIG_SMP */
Paul Jackson's avatar
Paul Jackson committed
783

784 785
void rebuild_sched_domains(void)
{
786
	mutex_lock(&cpuset_mutex);
787
	rebuild_sched_domains_locked();
788
	mutex_unlock(&cpuset_mutex);
Paul Jackson's avatar
Paul Jackson committed
789 790
}

791 792 793
/*
 * effective_cpumask_cpuset - return nearest ancestor with non-empty cpus
 * @cs: the cpuset in interest
794
 *
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
 * A cpuset's effective cpumask is the cpumask of the nearest ancestor
 * with non-empty cpus. We use effective cpumask whenever:
 * - we update tasks' cpus_allowed. (they take on the ancestor's cpumask
 *   if the cpuset they reside in has no cpus)
 * - we want to retrieve task_cs(tsk)'s cpus_allowed.
 *
 * Called with cpuset_mutex held. cpuset_cpus_allowed_fallback() is an
 * exception. See comments there.
 */
static struct cpuset *effective_cpumask_cpuset(struct cpuset *cs)
{
	while (cpumask_empty(cs->cpus_allowed))
		cs = parent_cs(cs);
	return cs;
}

/*
 * effective_nodemask_cpuset - return nearest ancestor with non-empty mems
 * @cs: the cpuset in interest
 *
 * A cpuset's effective nodemask is the nodemask of the nearest ancestor
 * with non-empty memss. We use effective nodemask whenever:
 * - we update tasks' mems_allowed. (they take on the ancestor's nodemask
 *   if the cpuset they reside in has no mems)
 * - we want to retrieve task_cs(tsk)'s mems_allowed.
 *
 * Called with cpuset_mutex held.
822
 */
823
static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
824
{
825 826 827
	while (nodes_empty(cs->mems_allowed))
		cs = parent_cs(cs);
	return cs;
828
}
829

830 831 832 833 834 835 836 837 838
/**
 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
 * @tsk: task to test
 * @scan: struct cgroup_scanner containing the cgroup of the task
 *
 * Called by cgroup_scan_tasks() for each task in a cgroup whose
 * cpus_allowed mask needs to be changed.
 *
 * We don't need to re-check for the cgroup/cpuset membership, since we're
839
 * holding cpuset_mutex at this point.
840
 */
841 842
static void cpuset_change_cpumask(struct task_struct *tsk,
				  struct cgroup_scanner *scan)
843
{
844 845
	struct cpuset *cpus_cs;

Li Zefan's avatar
Li Zefan committed
846
	cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cgrp));
847
	set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
848 849
}

850 851 852
/**
 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
853
 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
854
 *
855
 * Called with cpuset_mutex held
856 857 858 859
 *
 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
 * calling callback functions for each.
 *
860 861
 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
 * if @heap != NULL.
862
 */
863
static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
864 865 866
{
	struct cgroup_scanner scan;

Li Zefan's avatar
Li Zefan committed
867
	scan.cgrp = cs->css.cgroup;
868
	scan.test_task = NULL;
869
	scan.process_task = cpuset_change_cpumask;
870 871
	scan.heap = heap;
	cgroup_scan_tasks(&scan);
872 873
}

874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
/*
 * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
 * @root_cs: the root cpuset of the hierarchy
 * @update_root: update root cpuset or not?
 * @heap: the heap used by cgroup_scan_tasks()
 *
 * This will update cpumasks of tasks in @root_cs and all other empty cpusets
 * which take on cpumask of @root_cs.
 *
 * Called with cpuset_mutex held
 */
static void update_tasks_cpumask_hier(struct cpuset *root_cs,
				      bool update_root, struct ptr_heap *heap)
{
	struct cpuset *cp;
	struct cgroup *pos_cgrp;

	if (update_root)
		update_tasks_cpumask(root_cs, heap);

	rcu_read_lock();
	cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
		/* skip the whole subtree if @cp have some CPU */
		if (!cpumask_empty(cp->cpus_allowed)) {
			pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
			continue;
		}
		if (!css_tryget(&cp->css))
			continue;
		rcu_read_unlock();

		update_tasks_cpumask(cp, heap);

		rcu_read_lock();
		css_put(&cp->css);
	}
	rcu_read_unlock();
}

913 914 915 916 917
/**
 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
 * @cs: the cpuset to consider
 * @buf: buffer of cpu numbers written to this cpuset
 */
918 919
static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
			  const char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
920
{
921
	struct ptr_heap heap;
922 923
	int retval;
	int is_load_balanced;
Linus Torvalds's avatar
Linus Torvalds committed
924

925
	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
926 927 928
	if (cs == &top_cpuset)
		return -EACCES;

929
	/*
930
	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
931 932 933
	 * Since cpulist_parse() fails on an empty mask, we special case
	 * that parsing.  The validate_change() call ensures that cpusets
	 * with tasks have cpus.
934
	 */
935
	if (!*buf) {
936
		cpumask_clear(trialcs->cpus_allowed);
937
	} else {
938
		retval = cpulist_parse(buf, trialcs->cpus_allowed);
939 940
		if (retval < 0)
			return retval;
941

942
		if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
943
			return -EINVAL;
944
	}
Paul Jackson's avatar
Paul Jackson committed
945

Paul Menage's avatar
Paul Menage committed
946
	/* Nothing to do if the cpus didn't change */
947
	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
Paul Menage's avatar
Paul Menage committed
948
		return 0;
949

950 951 952 953
	retval = validate_change(cs, trialcs);
	if (retval < 0)
		return retval;

954 955 956 957
	retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
	if (retval)
		return retval;

958
	is_load_balanced = is_sched_load_balance(trialcs);
Paul Jackson's avatar
Paul Jackson committed
959

960
	mutex_lock(&callback_mutex);
961
	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
962
	mutex_unlock(&callback_mutex);
Paul Jackson's avatar
Paul Jackson committed
963

964
	update_tasks_cpumask_hier(cs, true, &heap);
965 966

	heap_free(&heap);
967

Paul Menage's avatar
Paul Menage committed
968
	if (is_load_balanced)
969
		rebuild_sched_domains_locked();
970
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
971 972
}

973 974 975 976 977 978 979 980
/*
 * cpuset_migrate_mm
 *
 *    Migrate memory region from one set of nodes to another.
 *
 *    Temporarilly set tasks mems_allowed to target nodes of migration,
 *    so that the migration code can allocate pages on these nodes.
 *
981
 *    Call holding cpuset_mutex, so current's cpuset won't change
982
 *    during this call, as manage_mutex holds off any cpuset_attach()
983 984
 *    calls.  Therefore we don't need to take task_lock around the
 *    call to guarantee_online_mems(), as we know no one is changing
985
 *    our task's cpuset.
986 987 988 989 990 991 992 993 994 995 996
 *
 *    While the mm_struct we are migrating is typically from some
 *    other task, the task_struct mems_allowed that we are hacking
 *    is for our current task, which must allocate new pages for that
 *    migrating memory region.
 */

static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
							const nodemask_t *to)
{
	struct task_struct *tsk = current;
997
	struct cpuset *mems_cs;
998 999 1000 1001 1002

	tsk->mems_allowed = *to;

	do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);

1003 1004
	mems_cs = effective_nodemask_cpuset(task_cs(tsk));
	guarantee_online_mems(mems_cs, &tsk->mems_allowed);
1005 1006
}

1007
/*
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
 * @tsk: the task to change
 * @newmems: new nodes that the task will be set
 *
 * In order to avoid seeing no nodes if the old and new nodes are disjoint,
 * we structure updates as setting all new allowed nodes, then clearing newly
 * disallowed ones.
 */
static void cpuset_change_task_nodemask(struct task_struct *tsk,
					nodemask_t *newmems)
{
1019
	bool need_loop;
1020

1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
	/*
	 * Allow tasks that have access to memory reserves because they have
	 * been OOM killed to get memory anywhere.
	 */
	if (unlikely(test_thread_flag(TIF_MEMDIE)))
		return;
	if (current->flags & PF_EXITING) /* Let dying task have memory */
		return;

	task_lock(tsk);
1031 1032 1033 1034 1035 1036 1037 1038
	/*
	 * Determine if a loop is necessary if another thread is doing
	 * get_mems_allowed().  If at least one node remains unchanged and
	 * tsk does not have a mempolicy, then an empty nodemask will not be
	 * possible when mems_allowed is larger than a word.
	 */
	need_loop = task_has_mempolicy(tsk) ||
			!nodes_intersects(*newmems, tsk->mems_allowed);
1039

1040 1041
	if (need_loop)
		write_seqcount_begin(&tsk->mems_allowed_seq);
1042

1043 1044
	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
1045 1046

	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1047
	tsk->mems_allowed = *newmems;
1048 1049 1050 1051

	if (need_loop)
		write_seqcount_end(&tsk->mems_allowed_seq);

1052
	task_unlock(tsk);
1053 1054 1055 1056 1057
}

/*
 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
1058
 * memory_migrate flag is set. Called with cpuset_mutex held.
1059 1060 1061 1062
 */
static void cpuset_change_nodemask(struct task_struct *p,
				   struct cgroup_scanner *scan)
{
Li Zefan's avatar
Li Zefan committed
1063
	struct cpuset *cs = cgroup_cs(scan->cgrp);
1064 1065
	struct mm_struct *mm;
	int migrate;
1066
	nodemask_t *newmems = scan->data;
1067

1068
	cpuset_change_task_nodemask(p, newmems);
1069

1070 1071 1072 1073 1074 1075 1076 1077
	mm = get_task_mm(p);
	if (!mm)
		return;

	migrate = is_memory_migrate(cs);

	mpol_rebind_mm(mm, &cs->mems_allowed);
	if (migrate)
1078
		cpuset_migrate_mm(mm, &cs->old_mems_allowed, newmems);
1079 1080 1081
	mmput(mm);
}

1082 1083
static void *cpuset_being_rebound;

1084 1085 1086
/**
 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1087
 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1088
 *
1089
 * Called with cpuset_mutex held
1090 1091
 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
 * if @heap != NULL.
1092
 */
1093
static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
Linus Torvalds's avatar
Linus Torvalds committed
1094
{