blk-cgroup.c 37.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * Common Block IO controller cgroup interface
 *
 * Based on ideas and code from CFQ, CFS and BFQ:
 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
 *
 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
 *		      Paolo Valente <paolo.valente@unimore.it>
 *
 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
 * 	              Nauman Rafique <nauman@google.com>
12 13 14 15
 *
 * For policy-specific per-blkcg data:
 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
 *                    Arianna Avanzini <avanzini.arianna@gmail.com>
16 17
 */
#include <linux/ioprio.h>
18
#include <linux/kdev_t.h>
19
#include <linux/module.h>
20
#include <linux/sched/signal.h>
21
#include <linux/err.h>
22
#include <linux/blkdev.h>
23
#include <linux/backing-dev.h>
24
#include <linux/slab.h>
25
#include <linux/genhd.h>
26
#include <linux/delay.h>
Tejun Heo's avatar
Tejun Heo committed
27
#include <linux/atomic.h>
28
#include <linux/ctype.h>
29
#include <linux/blk-cgroup.h>
30
#include "blk.h"
31

32 33
#define MAX_KEY_LEN 100

34 35 36 37 38 39 40 41
/*
 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
 * policy [un]register operations including cgroup file additions /
 * removals.  Putting cgroup file registration outside blkcg_pol_mutex
 * allows grabbing it from cgroup callbacks.
 */
static DEFINE_MUTEX(blkcg_pol_register_mutex);
42
static DEFINE_MUTEX(blkcg_pol_mutex);
43

44
struct blkcg blkcg_root;
45
EXPORT_SYMBOL_GPL(blkcg_root);
46

Tejun Heo's avatar
Tejun Heo committed
47 48
struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;

49
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
50

51 52
static LIST_HEAD(all_blkcgs);		/* protected by blkcg_pol_mutex */

53
static bool blkcg_policy_enabled(struct request_queue *q,
54
				 const struct blkcg_policy *pol)
55 56 57 58
{
	return pol && test_bit(pol->plid, q->blkcg_pols);
}

59 60 61 62 63 64
/**
 * blkg_free - free a blkg
 * @blkg: blkg to free
 *
 * Free @blkg which may be partially allocated.
 */
65
static void blkg_free(struct blkcg_gq *blkg)
66
{
67
	int i;
68 69 70 71

	if (!blkg)
		return;

72
	for (i = 0; i < BLKCG_MAX_POLS; i++)
73 74
		if (blkg->pd[i])
			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
75

76
	if (blkg->blkcg != &blkcg_root)
77
		blk_exit_rl(blkg->q, &blkg->rl);
78 79 80

	blkg_rwstat_exit(&blkg->stat_ios);
	blkg_rwstat_exit(&blkg->stat_bytes);
81
	kfree(blkg);
82 83 84 85 86 87
}

/**
 * blkg_alloc - allocate a blkg
 * @blkcg: block cgroup the new blkg is associated with
 * @q: request_queue the new blkg is associated with
88
 * @gfp_mask: allocation mask to use
89
 *
90
 * Allocate a new blkg assocating @blkcg and @q.
91
 */
92 93
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
				   gfp_t gfp_mask)
94
{
95
	struct blkcg_gq *blkg;
96
	int i;
97 98

	/* alloc and init base part */
99
	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
100 101 102
	if (!blkg)
		return NULL;

103 104 105 106
	if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
	    blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
		goto err_free;

107
	blkg->q = q;
108
	INIT_LIST_HEAD(&blkg->q_node);
109
	blkg->blkcg = blkcg;
110
	atomic_set(&blkg->refcnt, 1);
111

112 113 114 115 116 117 118
	/* root blkg uses @q->root_rl, init rl only for !root blkgs */
	if (blkcg != &blkcg_root) {
		if (blk_init_rl(&blkg->rl, q, gfp_mask))
			goto err_free;
		blkg->rl.blkg = blkg;
	}

119
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
120
		struct blkcg_policy *pol = blkcg_policy[i];
121
		struct blkg_policy_data *pd;
122

123
		if (!blkcg_policy_enabled(q, pol))
124 125 126
			continue;

		/* alloc per-policy data and attach it to blkg */
127
		pd = pol->pd_alloc_fn(gfp_mask, q->node);
128 129
		if (!pd)
			goto err_free;
130

131 132
		blkg->pd[i] = pd;
		pd->blkg = blkg;
133
		pd->plid = i;
134 135
	}

136
	return blkg;
137 138 139 140

err_free:
	blkg_free(blkg);
	return NULL;
141 142
}

143 144
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
				      struct request_queue *q, bool update_hint)
145
{
146
	struct blkcg_gq *blkg;
147

148
	/*
149 150 151 152
	 * Hint didn't match.  Look up from the radix tree.  Note that the
	 * hint can only be updated under queue_lock as otherwise @blkg
	 * could have already been removed from blkg_tree.  The caller is
	 * responsible for grabbing queue_lock if @update_hint.
153 154
	 */
	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
155 156 157 158 159
	if (blkg && blkg->q == q) {
		if (update_hint) {
			lockdep_assert_held(q->queue_lock);
			rcu_assign_pointer(blkcg->blkg_hint, blkg);
		}
160
		return blkg;
161
	}
162

163 164
	return NULL;
}
165
EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
166

167
/*
168 169
 * If @new_blkg is %NULL, this function tries to allocate a new one as
 * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
170
 */
171
static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
172 173
				    struct request_queue *q,
				    struct blkcg_gq *new_blkg)
174
{
175
	struct blkcg_gq *blkg;
176
	struct bdi_writeback_congested *wb_congested;
177
	int i, ret;
178

179 180 181
	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

182
	/* blkg holds a reference to blkcg */
183
	if (!css_tryget_online(&blkcg->css)) {
184
		ret = -ENODEV;
185
		goto err_free_blkg;
186
	}
187

188
	wb_congested = wb_congested_get_create(q->backing_dev_info,
189 190 191
					       blkcg->css.id,
					       GFP_NOWAIT | __GFP_NOWARN);
	if (!wb_congested) {
192
		ret = -ENOMEM;
193
		goto err_put_css;
194 195
	}

196 197 198 199 200 201
	/* allocate */
	if (!new_blkg) {
		new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
		if (unlikely(!new_blkg)) {
			ret = -ENOMEM;
			goto err_put_congested;
202 203
		}
	}
204 205
	blkg = new_blkg;
	blkg->wb_congested = wb_congested;
206

207
	/* link parent */
208 209 210
	if (blkcg_parent(blkcg)) {
		blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
		if (WARN_ON_ONCE(!blkg->parent)) {
211
			ret = -ENODEV;
212
			goto err_put_congested;
213 214 215 216
		}
		blkg_get(blkg->parent);
	}

217 218 219 220 221
	/* invoke per-policy init */
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];

		if (blkg->pd[i] && pol->pd_init_fn)
222
			pol->pd_init_fn(blkg->pd[i]);
223 224 225
	}

	/* insert */
226
	spin_lock(&blkcg->lock);
227 228 229 230
	ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
	if (likely(!ret)) {
		hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
		list_add(&blkg->q_node, &q->blkg_list);
231 232 233 234 235

		for (i = 0; i < BLKCG_MAX_POLS; i++) {
			struct blkcg_policy *pol = blkcg_policy[i];

			if (blkg->pd[i] && pol->pd_online_fn)
236
				pol->pd_online_fn(blkg->pd[i]);
237
		}
238
	}
239
	blkg->online = true;
240
	spin_unlock(&blkcg->lock);
241

242
	if (!ret)
243
		return blkg;
244

245 246 247 248
	/* @blkg failed fully initialized, use the usual release path */
	blkg_put(blkg);
	return ERR_PTR(ret);

249 250 251
err_put_congested:
	wb_congested_put(wb_congested);
err_put_css:
252
	css_put(&blkcg->css);
253
err_free_blkg:
254
	blkg_free(new_blkg);
255
	return ERR_PTR(ret);
256
}
257

258
/**
259
 * blkg_lookup_create - lookup blkg, try to create one if not there
260 261 262 263
 * @blkcg: blkcg of interest
 * @q: request_queue of interest
 *
 * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
264 265 266
 * create one.  blkg creation is performed recursively from blkcg_root such
 * that all non-root blkg's have access to the parent blkg.  This function
 * should be called under RCU read lock and @q->queue_lock.
267 268 269 270 271
 *
 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
 * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
 * dead and bypassing, returns ERR_PTR(-EBUSY).
 */
272 273
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
				    struct request_queue *q)
274
{
275 276 277 278 279
	struct blkcg_gq *blkg;

	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

280 281 282 283 284 285 286
	/*
	 * This could be the first entry point of blkcg implementation and
	 * we shouldn't allow anything to go through for a bypassing queue.
	 */
	if (unlikely(blk_queue_bypass(q)))
		return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);

287 288 289 290
	blkg = __blkg_lookup(blkcg, q, true);
	if (blkg)
		return blkg;

291 292 293 294 295 296 297 298 299 300 301 302 303
	/*
	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
	 * non-root blkgs have access to their parents.
	 */
	while (true) {
		struct blkcg *pos = blkcg;
		struct blkcg *parent = blkcg_parent(blkcg);

		while (parent && !__blkg_lookup(parent, q, false)) {
			pos = parent;
			parent = blkcg_parent(parent);
		}

304
		blkg = blkg_create(pos, q, NULL);
305 306 307
		if (pos == blkcg || IS_ERR(blkg))
			return blkg;
	}
308
}
309

310
static void blkg_destroy(struct blkcg_gq *blkg)
311
{
312
	struct blkcg *blkcg = blkg->blkcg;
313
	struct blkcg_gq *parent = blkg->parent;
314
	int i;
315

316
	lockdep_assert_held(blkg->q->queue_lock);
317
	lockdep_assert_held(&blkcg->lock);
318 319

	/* Something wrong if we are trying to remove same group twice */
320
	WARN_ON_ONCE(list_empty(&blkg->q_node));
321
	WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
322

323 324 325 326
	for (i = 0; i < BLKCG_MAX_POLS; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];

		if (blkg->pd[i] && pol->pd_offline_fn)
327
			pol->pd_offline_fn(blkg->pd[i]);
328
	}
329 330 331 332 333 334

	if (parent) {
		blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
		blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
	}

335 336
	blkg->online = false;

337
	radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
338
	list_del_init(&blkg->q_node);
339
	hlist_del_init_rcu(&blkg->blkcg_node);
340

341 342 343 344 345
	/*
	 * Both setting lookup hint to and clearing it from @blkg are done
	 * under queue_lock.  If it's not pointing to @blkg now, it never
	 * will.  Hint assignment itself can race safely.
	 */
346
	if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
347 348
		rcu_assign_pointer(blkcg->blkg_hint, NULL);

349 350 351 352 353 354 355
	/*
	 * Put the reference taken at the time of creation so that when all
	 * queues are gone, group can be destroyed.
	 */
	blkg_put(blkg);
}

356 357 358 359
/**
 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 * @q: request_queue of interest
 *
360
 * Destroy all blkgs associated with @q.
361
 */
362
static void blkg_destroy_all(struct request_queue *q)
363
{
364
	struct blkcg_gq *blkg, *n;
365

366
	lockdep_assert_held(q->queue_lock);
367

368
	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
369
		struct blkcg *blkcg = blkg->blkcg;
370

371 372 373
		spin_lock(&blkcg->lock);
		blkg_destroy(blkg);
		spin_unlock(&blkcg->lock);
374
	}
375 376 377

	q->root_blkg = NULL;
	q->root_rl.blkg = NULL;
378 379
}

380 381 382 383 384 385 386 387 388
/*
 * A group is RCU protected, but having an rcu lock does not mean that one
 * can access all the fields of blkg and assume these are valid.  For
 * example, don't try to follow throtl_data and request queue links.
 *
 * Having a reference to blkg under an rcu allows accesses to only values
 * local to groups like group stats and group rate limits.
 */
void __blkg_release_rcu(struct rcu_head *rcu_head)
389
{
390
	struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
391

392
	/* release the blkcg and parent blkg refs this blkg has been holding */
393
	css_put(&blkg->blkcg->css);
394
	if (blkg->parent)
395
		blkg_put(blkg->parent);
396

397 398
	wb_congested_put(blkg->wb_congested);

399
	blkg_free(blkg);
400
}
401
EXPORT_SYMBOL_GPL(__blkg_release_rcu);
402

403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
/*
 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 * because the root blkg uses @q->root_rl instead of its own rl.
 */
struct request_list *__blk_queue_next_rl(struct request_list *rl,
					 struct request_queue *q)
{
	struct list_head *ent;
	struct blkcg_gq *blkg;

	/*
	 * Determine the current blkg list_head.  The first entry is
	 * root_rl which is off @q->blkg_list and mapped to the head.
	 */
	if (rl == &q->root_rl) {
		ent = &q->blkg_list;
419 420 421
		/* There are no more block groups, hence no request lists */
		if (list_empty(ent))
			return NULL;
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
	} else {
		blkg = container_of(rl, struct blkcg_gq, rl);
		ent = &blkg->q_node;
	}

	/* walk to the next list_head, skip root blkcg */
	ent = ent->next;
	if (ent == &q->root_blkg->q_node)
		ent = ent->next;
	if (ent == &q->blkg_list)
		return NULL;

	blkg = container_of(ent, struct blkcg_gq, q_node);
	return &blkg->rl;
}

438 439
static int blkcg_reset_stats(struct cgroup_subsys_state *css,
			     struct cftype *cftype, u64 val)
440
{
441
	struct blkcg *blkcg = css_to_blkcg(css);
442
	struct blkcg_gq *blkg;
443
	int i;
444

445
	mutex_lock(&blkcg_pol_mutex);
446
	spin_lock_irq(&blkcg->lock);
Tejun Heo's avatar
Tejun Heo committed
447 448 449 450 451 452

	/*
	 * Note that stat reset is racy - it doesn't synchronize against
	 * stat updates.  This is a debug feature which shouldn't exist
	 * anyway.  If you get hit by a race, retry.
	 */
453
	hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
454 455 456
		blkg_rwstat_reset(&blkg->stat_bytes);
		blkg_rwstat_reset(&blkg->stat_ios);

457
		for (i = 0; i < BLKCG_MAX_POLS; i++) {
458
			struct blkcg_policy *pol = blkcg_policy[i];
459

460 461
			if (blkg->pd[i] && pol->pd_reset_stats_fn)
				pol->pd_reset_stats_fn(blkg->pd[i]);
462
		}
463
	}
464

465
	spin_unlock_irq(&blkcg->lock);
466
	mutex_unlock(&blkcg_pol_mutex);
467 468 469
	return 0;
}

470
const char *blkg_dev_name(struct blkcg_gq *blkg)
471
{
472
	/* some drivers (floppy) instantiate a queue w/o disk registered */
473 474
	if (blkg->q->backing_dev_info->dev)
		return dev_name(blkg->q->backing_dev_info->dev);
475
	return NULL;
476
}
477
EXPORT_SYMBOL_GPL(blkg_dev_name);
478

479 480 481 482 483 484 485 486 487 488 489
/**
 * blkcg_print_blkgs - helper for printing per-blkg data
 * @sf: seq_file to print to
 * @blkcg: blkcg of interest
 * @prfill: fill function to print out a blkg
 * @pol: policy in question
 * @data: data to be passed to @prfill
 * @show_total: to print out sum of prfill return values or not
 *
 * This function invokes @prfill on each blkg of @blkcg if pd for the
 * policy specified by @pol exists.  @prfill is invoked with @sf, the
490 491 492
 * policy data and @data and the matching queue lock held.  If @show_total
 * is %true, the sum of the return values from @prfill is printed with
 * "Total" label at the end.
493 494 495 496
 *
 * This is to be used to construct print functions for
 * cftype->read_seq_string method.
 */
497
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
498 499
		       u64 (*prfill)(struct seq_file *,
				     struct blkg_policy_data *, int),
500
		       const struct blkcg_policy *pol, int data,
501
		       bool show_total)
502
{
503
	struct blkcg_gq *blkg;
504
	u64 total = 0;
505

506
	rcu_read_lock();
507
	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
508
		spin_lock_irq(blkg->q->queue_lock);
509
		if (blkcg_policy_enabled(blkg->q, pol))
510
			total += prfill(sf, blkg->pd[pol->plid], data);
511 512 513
		spin_unlock_irq(blkg->q->queue_lock);
	}
	rcu_read_unlock();
514 515 516 517

	if (show_total)
		seq_printf(sf, "Total %llu\n", (unsigned long long)total);
}
518
EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
519 520 521 522

/**
 * __blkg_prfill_u64 - prfill helper for a single u64 value
 * @sf: seq_file to print to
523
 * @pd: policy private data of interest
524 525
 * @v: value to print
 *
526
 * Print @v to @sf for the device assocaited with @pd.
527
 */
528
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
529
{
530
	const char *dname = blkg_dev_name(pd->blkg);
531 532 533 534 535 536 537

	if (!dname)
		return 0;

	seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
	return v;
}
538
EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
539 540 541 542

/**
 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 * @sf: seq_file to print to
543
 * @pd: policy private data of interest
544 545
 * @rwstat: rwstat to print
 *
546
 * Print @rwstat to @sf for the device assocaited with @pd.
547
 */
548
u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
549
			 const struct blkg_rwstat *rwstat)
550 551 552 553 554 555 556
{
	static const char *rwstr[] = {
		[BLKG_RWSTAT_READ]	= "Read",
		[BLKG_RWSTAT_WRITE]	= "Write",
		[BLKG_RWSTAT_SYNC]	= "Sync",
		[BLKG_RWSTAT_ASYNC]	= "Async",
	};
557
	const char *dname = blkg_dev_name(pd->blkg);
558 559 560 561 562 563 564 565
	u64 v;
	int i;

	if (!dname)
		return 0;

	for (i = 0; i < BLKG_RWSTAT_NR; i++)
		seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
566
			   (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
567

568 569
	v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
		atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
570 571 572
	seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
	return v;
}
573
EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
574

575 576 577
/**
 * blkg_prfill_stat - prfill callback for blkg_stat
 * @sf: seq_file to print to
578 579
 * @pd: policy private data of interest
 * @off: offset to the blkg_stat in @pd
580 581 582
 *
 * prfill callback for printing a blkg_stat.
 */
583
u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
584
{
585
	return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
586
}
587
EXPORT_SYMBOL_GPL(blkg_prfill_stat);
588

589 590 591
/**
 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 * @sf: seq_file to print to
592 593
 * @pd: policy private data of interest
 * @off: offset to the blkg_rwstat in @pd
594 595 596
 *
 * prfill callback for printing a blkg_rwstat.
 */
597 598
u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
		       int off)
599
{
600
	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
601

602
	return __blkg_prfill_rwstat(sf, pd, &rwstat);
603
}
604
EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
605

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
				    struct blkg_policy_data *pd, int off)
{
	struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);

	return __blkg_prfill_rwstat(sf, pd, &rwstat);
}

/**
 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
 * @sf: seq_file to print to
 * @v: unused
 *
 * To be used as cftype->seq_show to print blkg->stat_bytes.
 * cftype->private must be set to the blkcg_policy.
 */
int blkg_print_stat_bytes(struct seq_file *sf, void *v)
{
	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
			  offsetof(struct blkcg_gq, stat_bytes), true);
	return 0;
}
EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);

/**
 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
 * @sf: seq_file to print to
 * @v: unused
 *
 * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
 * must be set to the blkcg_policy.
 */
int blkg_print_stat_ios(struct seq_file *sf, void *v)
{
	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
			  blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
			  offsetof(struct blkcg_gq, stat_ios), true);
	return 0;
}
EXPORT_SYMBOL_GPL(blkg_print_stat_ios);

static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
					      struct blkg_policy_data *pd,
					      int off)
{
	struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
							      NULL, off);
	return __blkg_prfill_rwstat(sf, pd, &rwstat);
}

/**
 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
 * @sf: seq_file to print to
 * @v: unused
 */
int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
{
	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
			  blkg_prfill_rwstat_field_recursive,
			  (void *)seq_cft(sf)->private,
			  offsetof(struct blkcg_gq, stat_bytes), true);
	return 0;
}
EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);

/**
 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
 * @sf: seq_file to print to
 * @v: unused
 */
int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
{
	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
			  blkg_prfill_rwstat_field_recursive,
			  (void *)seq_cft(sf)->private,
			  offsetof(struct blkcg_gq, stat_ios), true);
	return 0;
}
EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);

687 688
/**
 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
689 690 691
 * @blkg: blkg of interest
 * @pol: blkcg_policy which contains the blkg_stat
 * @off: offset to the blkg_stat in blkg_policy_data or @blkg
692
 *
693 694 695 696 697 698
 * Collect the blkg_stat specified by @blkg, @pol and @off and all its
 * online descendants and their aux counts.  The caller must be holding the
 * queue lock for online tests.
 *
 * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
 * at @off bytes into @blkg's blkg_policy_data of the policy.
699
 */
700 701
u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
			    struct blkcg_policy *pol, int off)
702 703
{
	struct blkcg_gq *pos_blkg;
704
	struct cgroup_subsys_state *pos_css;
705
	u64 sum = 0;
706

707
	lockdep_assert_held(blkg->q->queue_lock);
708 709

	rcu_read_lock();
710 711 712 713 714
	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
		struct blkg_stat *stat;

		if (!pos_blkg->online)
			continue;
715

716 717 718 719 720 721
		if (pol)
			stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
		else
			stat = (void *)blkg + off;

		sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
722 723 724 725 726 727 728 729 730
	}
	rcu_read_unlock();

	return sum;
}
EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);

/**
 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
731 732 733
 * @blkg: blkg of interest
 * @pol: blkcg_policy which contains the blkg_rwstat
 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
734
 *
735 736 737 738 739 740
 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
 * online descendants and their aux counts.  The caller must be holding the
 * queue lock for online tests.
 *
 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
 * is at @off bytes into @blkg's blkg_policy_data of the policy.
741
 */
742 743
struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
					     struct blkcg_policy *pol, int off)
744 745
{
	struct blkcg_gq *pos_blkg;
746
	struct cgroup_subsys_state *pos_css;
747
	struct blkg_rwstat sum = { };
748 749
	int i;

750
	lockdep_assert_held(blkg->q->queue_lock);
751 752

	rcu_read_lock();
753
	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
754
		struct blkg_rwstat *rwstat;
755 756 757 758

		if (!pos_blkg->online)
			continue;

759 760 761 762 763
		if (pol)
			rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
		else
			rwstat = (void *)pos_blkg + off;

764
		for (i = 0; i < BLKG_RWSTAT_NR; i++)
765 766 767
			atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
				percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
				&sum.aux_cnt[i]);
768 769 770 771 772 773 774
	}
	rcu_read_unlock();

	return sum;
}
EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);

775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
/* Performs queue bypass and policy enabled checks then looks up blkg. */
static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
					  const struct blkcg_policy *pol,
					  struct request_queue *q)
{
	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);

	if (!blkcg_policy_enabled(q, pol))
		return ERR_PTR(-EOPNOTSUPP);

	/*
	 * This could be the first entry point of blkcg implementation and
	 * we shouldn't allow anything to go through for a bypassing queue.
	 */
	if (unlikely(blk_queue_bypass(q)))
		return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);

	return __blkg_lookup(blkcg, q, true /* update_hint */);
}

796 797 798
/**
 * blkg_conf_prep - parse and prepare for per-blkg config update
 * @blkcg: target block cgroup
799
 * @pol: target policy
800 801 802 803
 * @input: input string
 * @ctx: blkg_conf_ctx to be filled
 *
 * Parse per-blkg config update from @input and initialize @ctx with the
804 805 806
 * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
 * part of @input following MAJ:MIN.  This function returns with RCU read
 * lock and queue lock held and must be paired with blkg_conf_finish().
807
 */
808
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
809
		   char *input, struct blkg_conf_ctx *ctx)
810
	__acquires(rcu) __acquires(disk->queue->queue_lock)
811
{
812
	struct gendisk *disk;
813
	struct request_queue *q;
814
	struct blkcg_gq *blkg;
815
	struct module *owner;
816
	unsigned int major, minor;
817 818
	int key_len, part, ret;
	char *body;
819

820
	if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
821
		return -EINVAL;
822

823 824 825 826 827
	body = input + key_len;
	if (!isspace(*body))
		return -EINVAL;
	body = skip_spaces(body);

828
	disk = get_gendisk(MKDEV(major, minor), &part);
829
	if (!disk)
830
		return -ENODEV;
831
	if (part) {
832 833
		ret = -ENODEV;
		goto fail;
834
	}
835

836
	q = disk->queue;
837

838 839
	rcu_read_lock();
	spin_lock_irq(q->queue_lock);
840

841
	blkg = blkg_lookup_check(blkcg, pol, q);
842 843
	if (IS_ERR(blkg)) {
		ret = PTR_ERR(blkg);
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
		goto fail_unlock;
	}

	if (blkg)
		goto success;

	/*
	 * Create blkgs walking down from blkcg_root to @blkcg, so that all
	 * non-root blkgs have access to their parents.
	 */
	while (true) {
		struct blkcg *pos = blkcg;
		struct blkcg *parent;
		struct blkcg_gq *new_blkg;

		parent = blkcg_parent(blkcg);
		while (parent && !__blkg_lookup(parent, q, false)) {
			pos = parent;
			parent = blkcg_parent(parent);
		}

		/* Drop locks to do new blkg allocation with GFP_KERNEL. */
		spin_unlock_irq(q->queue_lock);
867
		rcu_read_unlock();
868 869 870 871 872

		new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
		if (unlikely(!new_blkg)) {
			ret = -ENOMEM;
			goto fail;
873
		}
874

875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
		rcu_read_lock();
		spin_lock_irq(q->queue_lock);

		blkg = blkg_lookup_check(pos, pol, q);
		if (IS_ERR(blkg)) {
			ret = PTR_ERR(blkg);
			goto fail_unlock;
		}

		if (blkg) {
			blkg_free(new_blkg);
		} else {
			blkg = blkg_create(pos, q, new_blkg);
			if (unlikely(IS_ERR(blkg))) {
				ret = PTR_ERR(blkg);
				goto fail_unlock;
			}
		}

		if (pos == blkcg)
			goto success;
	}
success:
898 899
	ctx->disk = disk;
	ctx->blkg = blkg;
900
	ctx->body = body;
901
	return 0;
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920

fail_unlock:
	spin_unlock_irq(q->queue_lock);
	rcu_read_unlock();
fail:
	owner = disk->fops->owner;
	put_disk(disk);
	module_put(owner);
	/*
	 * If queue was bypassing, we should retry.  Do so after a
	 * short msleep().  It isn't strictly necessary but queue
	 * can be bypassing for some time and it's always nice to
	 * avoid busy looping.
	 */
	if (ret == -EBUSY) {
		msleep(10);
		ret = restart_syscall();
	}
	return ret;
921
}
922
EXPORT_SYMBOL_GPL(blkg_conf_prep);
923

924 925 926 927 928 929 930
/**
 * blkg_conf_finish - finish up per-blkg config update
 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 *
 * Finish up after per-blkg config update.  This function must be paired
 * with blkg_conf_prep().
 */
931
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
932
	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
933
{
934 935
	struct module *owner;

936
	spin_unlock_irq(ctx->disk->queue->queue_lock);
937
	rcu_read_unlock();
938
	owner = ctx->disk->fops->owner;
939
	put_disk(ctx->disk);
940
	module_put(owner);
941
}
942
EXPORT_SYMBOL_GPL(blkg_conf_finish);
943

944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
static int blkcg_print_stat(struct seq_file *sf, void *v)
{
	struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
	struct blkcg_gq *blkg;

	rcu_read_lock();

	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
		const char *dname;
		struct blkg_rwstat rwstat;
		u64 rbytes, wbytes, rios, wios;

		dname = blkg_dev_name(blkg);
		if (!dname)
			continue;

		spin_lock_irq(blkg->q->queue_lock);

		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
					offsetof(struct blkcg_gq, stat_bytes));
		rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
		wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);

		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
					offsetof(struct blkcg_gq, stat_ios));
		rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
		wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);

		spin_unlock_irq(blkg->q->queue_lock);

		if (rbytes || wbytes || rios || wios)
			seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n",
				   dname, rbytes, wbytes, rios, wios);
	}

	rcu_read_unlock();
	return 0;
}

983
static struct cftype blkcg_files[] = {
984 985
	{
		.name = "stat",
986
		.flags = CFTYPE_NOT_ON_ROOT,
987 988 989 990 991
		.seq_show = blkcg_print_stat,
	},
	{ }	/* terminate */
};

992
static struct cftype blkcg_legacy_files[] = {
993 994
	{
		.name = "reset_stats",
995
		.write_u64 = blkcg_reset_stats,
996
	},
997
	{ }	/* terminate */
998 999
};

1000
/**
1001
 * blkcg_css_offline - cgroup css_offline callback
1002
 * @css: css of interest
1003
 *
1004 1005
 * This function is called when @css is about to go away and responsible
 * for shooting down all blkgs associated with @css.  blkgs should be
1006 1007 1008 1009 1010
 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 * inside q lock, this function performs reverse double lock dancing.
 *
 * This is the blkcg counterpart of ioc_release_fn().
 */
1011
static void blkcg_css_offline(struct cgroup_subsys_state *css)
1012
{
1013
	struct blkcg *blkcg = css_to_blkcg(css);
1014

1015
	spin_lock_irq(&blkcg->lock);
1016

1017
	while (!hlist_empty(&blkcg->blkg_list)) {
1018 1019
		struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
						struct blkcg_gq, blkcg_node);
1020
		struct request_queue *q = blkg->q;
1021

1022 1023 1024 1025 1026 1027
		if (spin_trylock(q->queue_lock)) {
			blkg_destroy(blkg);
			spin_unlock(q->queue_lock);
		} else {
			spin_unlock_irq(&blkcg->lock);
			cpu_relax();
1028
			spin_lock_irq(&blkcg->lock);
1029
		}
1030
	}
1031

1032
	spin_unlock_irq(&blkcg->lock);
1033 1034

	wb_blkcg_offline(blkcg);
1035 1036
}

1037
static void blkcg_css_free(struct cgroup_subsys_state *css)
1038
{
1039
	struct blkcg *blkcg = css_to_blkcg(css);
1040
	int i;
1041

1042
	mutex_lock(&blkcg_pol_mutex);
1043

1044 1045
	list_del(&blkcg->all_blkcgs_node);

1046
	for (i = 0; i < BLKCG_MAX_POLS; i++)
1047 1048 1049 1050 1051
		if (blkcg->cpd[i])
			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);

	mutex_unlock(&blkcg_pol_mutex);

1052
	kfree(blkcg);
1053 1054
}

1055 1056
static struct cgroup_subsys_state *
blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1057
{
1058
	struct blkcg *blkcg;
1059 1060
	struct cgroup_subsys_state *ret;
	int i;
1061

1062 1063
	mutex_lock(&blkcg_pol_mutex);

1064
	if (!parent_css) {
1065
		blkcg = &blkcg_root;
1066 1067 1068 1069
	} else {
		blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
		if (!blkcg) {
			ret = ERR_PTR(-ENOMEM);
1070
			goto unlock;
1071
		}
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
	}

	for (i = 0; i < BLKCG_MAX_POLS ; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];
		struct blkcg_policy_data *cpd;

		/*
		 * If the policy hasn't been attached yet, wait for it
		 * to be attached before doing anything else. Otherwise,
		 * check if the policy requires any specific per-cgroup
		 * data: if it does, allocate and initialize it.
		 */
1084
		if (!pol || !pol->cpd_alloc_fn)
1085 1086
			continue;

1087
		cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1088 1089 1090 1091
		if (!cpd) {
			ret = ERR_PTR(-ENOMEM);
			goto free_pd_blkcg;
		}
1092 1093
		blkcg->cpd[i] = cpd;
		cpd->blkcg = blkcg;
1094
		cpd->plid = i;
1095 1096
		if (pol->cpd_init_fn)
			pol->cpd_init_fn(cpd);
1097
	}
1098 1099

	spin_lock_init(&blkcg->lock);
1100
	INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1101
	INIT_HLIST_HEAD(&blkcg->blkg_list);
1102 1103 1104
#ifdef CONFIG_CGROUP_WRITEBACK
	INIT_LIST_HEAD(&blkcg->cgwb_list);
#endif
1105 1106 1107
	list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);

	mutex_unlock(&blkcg_pol_mutex);
1108
	return &blkcg->css;
1109 1110 1111

free_pd_blkcg:
	for (i--; i >= 0; i--)
1112 1113
		if (blkcg->cpd[i])
			blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1114 1115 1116 1117

	if (blkcg != &blkcg_root)
		kfree(blkcg);
unlock:
1118
	mutex_unlock(&blkcg_pol_mutex);
1119
	return ret;
1120 1121
}

1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
/**
 * blkcg_init_queue - initialize blkcg part of request queue
 * @q: request_queue to initialize
 *
 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 * part of new request_queue @q.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int blkcg_init_queue(struct request_queue *q)
{
1134 1135
	struct blkcg_gq *new_blkg, *blkg;
	bool preloaded;
1136 1137
	int ret;

1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
	new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
	if (!new_blkg)
		return -ENOMEM;

	preloaded = !radix_tree_preload(GFP_KERNEL);

	/*
	 * Make sure the root blkg exists and count the existing blkgs.  As
	 * @q is bypassing at this point, blkg_lookup_create() can't be
	 * used.  Open code insertion.
	 */
1149 1150
	rcu_read_lock();
	spin_lock_irq(q->queue_lock);
1151
	blkg = blkg_create(&blkcg_root, q, new_blkg);
1152 1153 1154
	spin_unlock_irq(q->queue_lock);
	rcu_read_unlock();

1155 1156 1157
	if (preloaded)
		radix_tree_preload_end();

1158
	if (IS_ERR(blkg))
1159 1160 1161 1162
		return PTR_ERR(blkg);

	q->root_blkg = blkg;
	q->root_rl.blkg = blkg;
1163

1164 1165 1166 1167 1168 1169 1170
	ret = blk_throtl_init(q);
	if (ret) {
		spin_lock_irq(q->queue_lock);
		blkg_destroy_all(q);
		spin_unlock_irq(q->queue_lock);
	}
	return ret;
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
}

/**
 * blkcg_drain_queue - drain blkcg part of request_queue
 * @q: request_queue to drain
 *
 * Called from blk_drain_queue().  Responsible for draining blkcg part.
 */
void blkcg_drain_queue(struct request_queue *q)
{
	lockdep_assert_held(q->queue_lock);

1183 1184 1185 1186 1187 1188 1189
	/*
	 * @q could be exiting and already have destroyed all blkgs as
	 * indicated by NULL root_blkg.  If so, don't confuse policies.
	 */
	if (!q->root_blkg)
		return;

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
	blk_throtl_drain(q);
}

/**
 * blkcg_exit_queue - exit and release blkcg part of request_queue
 * @q: request_queue being released
 *
 * Called from blk_release_queue().  Responsible for exiting blkcg part.
 */
void blkcg_exit_queue(struct request_queue *q)
{
1201
	spin_lock_irq(q->queue_lock);
1202
	blkg_destroy_all(q);
1203 1204
	spin_unlock_irq(q->queue_lock);

1205 1206 1207
	blk_throtl_exit(q);
}

1208 1209 1210 1211 1212 1213
/*
 * We cannot support shared io contexts, as we have no mean to support
 * two tasks with the same ioc in two different groups without major rework
 * of the main cic data structures.  For now we allow a task to change
 * its cgroup only if it's the only owner of its ioc.
 */
1214
static int blkcg_can_attach(struct cgroup_taskset *tset)
1215
{
1216
	struct task_struct *task;
1217
	struct cgroup_subsys_state *dst_css;
1218 1219 1220 1221
	struct io_context *ioc;
	int ret = 0;

	/* task_lock() is needed to avoid races with exit_io_context() */
1222
	cgroup_taskset_for_each(task, dst_css, tset) {
1223 1224 1225 1226 1227 1228 1229 1230
		task_lock(task);
		ioc = task->io_context;
		if (ioc && atomic_read(&ioc->nr_tasks) > 1)
			ret = -EINVAL;
		task_unlock(task);
		if (ret)
			break;
	}
1231 1232 1233
	return ret;
}

1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
static void blkcg_bind(struct cgroup_subsys_state *root_css)
{
	int i;

	mutex_lock(&blkcg_pol_mutex);

	for (i = 0; i < BLKCG_MAX_POLS; i++) {
		struct blkcg_policy *pol = blkcg_policy[i];
		struct blkcg *blkcg;

		if (!pol || !pol->cpd_bind_fn)
			continue;

		list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
			if (blkcg->cpd[pol->plid])
				pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
	}
	mutex_unlock(&blkcg_pol_mutex);
}

1254
struct cgroup_subsys io_cgrp_subsys = {
1255 1256 1257
	.css_alloc = blkcg_css_alloc,
	.css_offline = blkcg_css_offline,
	.css_free = blkcg_css_free,
1258
	.can_attach = blkcg_can_attach,
1259
	.bind = blkcg_bind,
1260
	.dfl_cftypes = blkcg_files,
1261
	.legacy_cftypes = blkcg_legacy_files,
1262
	.legacy_name = "blkio",
1263 1264 1265 1266 1267 1268 1269 1270
#ifdef CONFIG_MEMCG
	/*
	 * This ensures that, if available, memcg is automatically enabled
	 * together on the default hierarchy so that the owner cgroup can
	 * be retrieved from writeback pages.
	 */
	.depends_on = 1 << memory_cgrp_id,
#endif
1271
};
1272
EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1273

1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
/**
 * blkcg_activate_policy - activate a blkcg policy on a request_queue
 * @q: request_queue of interest
 * @pol: blkcg policy to activate
 *
 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
 * bypass mode to populate its blkgs with policy_data for @pol.
 *
 * Activation happens with @q bypassed, so nobody would be accessing blkgs
 * from IO path.  Update of each blkg is protected by both queue and blkcg
 * locks so that holding either lock and testing blkcg_policy_enabled() is
 * always enough for dereferencing policy data.
 *
 * The caller is responsible for synchronizing [de]activations and policy
 * [un]registerations.  Returns 0 on success, -errno on failure.
 */
int blkcg_activate_policy(struct request_queue *q,
1291
			  const struct blkcg_policy *pol)
1292
{
1293
	struct blkg_policy_data *pd_prealloc = NULL;
1294
	struct blkcg_gq *blkg;
1295
	int ret;
1296 1297 1298 1299

	if (blkcg_policy_enabled(q, pol))
		return 0;

1300
	if (q->mq_ops)
1301