fs-writeback.c 71.8 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10
/*
 * fs/fs-writeback.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * Contains all the functions related to writing back and waiting
 * upon dirty inodes against superblocks, and writing back dirty
 * pages against inodes.  ie: data writeback.  Writeout of the
 * inode itself is not handled here.
 *
11
 * 10Apr2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
12 13 14 15 16
 *		Split out of fs/inode.c
 *		Additions for address_space-based writeback
 */

#include <linux/kernel.h>
17
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
18
#include <linux/spinlock.h>
19
#include <linux/slab.h>
Linus Torvalds's avatar
Linus Torvalds committed
20 21 22
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
23
#include <linux/pagemap.h>
24
#include <linux/kthread.h>
Linus Torvalds's avatar
Linus Torvalds committed
25 26 27
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
28
#include <linux/tracepoint.h>
29
#include <linux/device.h>
30
#include <linux/memcontrol.h>
31
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
32

33 34 35
/*
 * 4MB minimal write chunk size
 */
36
#define MIN_WRITEBACK_PAGES	(4096UL >> (PAGE_SHIFT - 10))
37

38 39 40 41
struct wb_completion {
	atomic_t		cnt;
};

42 43 44
/*
 * Passed into wb_writeback(), essentially a subset of writeback_control
 */
45
struct wb_writeback_work {
46 47
	long nr_pages;
	struct super_block *sb;
48
	unsigned long *older_than_this;
49
	enum writeback_sync_modes sync_mode;
50
	unsigned int tagged_writepages:1;
51 52 53
	unsigned int for_kupdate:1;
	unsigned int range_cyclic:1;
	unsigned int for_background:1;
54
	unsigned int for_sync:1;	/* sync(2) WB_SYNC_ALL writeback */
55
	unsigned int auto_free:1;	/* free on completion */
56
	enum wb_reason reason;		/* why was writeback initiated? */
57

58
	struct list_head list;		/* pending work list */
59
	struct wb_completion *done;	/* set if the caller waits */
60 61
};

62 63 64 65 66 67 68 69 70 71 72 73 74
/*
 * If one wants to wait for one or more wb_writeback_works, each work's
 * ->done should be set to a wb_completion defined using the following
 * macro.  Once all work items are issued with wb_queue_work(), the caller
 * can wait for the completion of all using wb_wait_for_completion().  Work
 * items which are waited upon aren't freed automatically on completion.
 */
#define DEFINE_WB_COMPLETION_ONSTACK(cmpl)				\
	struct wb_completion cmpl = {					\
		.cnt		= ATOMIC_INIT(1),			\
	}


75 76 77 78 79 80 81 82 83 84 85 86
/*
 * If an inode is constantly having its pages dirtied, but then the
 * updates stop dirtytime_expire_interval seconds in the past, it's
 * possible for the worst case time between when an inode has its
 * timestamps updated and when they finally get written out to be two
 * dirtytime_expire_intervals.  We set the default to 12 hours (in
 * seconds), which means most of the time inodes will have their
 * timestamps written to disk after 12 hours, but in the worst case a
 * few inodes might not their timestamps updated for 24 hours.
 */
unsigned int dirtytime_expire_interval = 12 * 60 * 60;

Nick Piggin's avatar
Nick Piggin committed
87 88
static inline struct inode *wb_inode(struct list_head *head)
{
89
	return list_entry(head, struct inode, i_io_list);
Nick Piggin's avatar
Nick Piggin committed
90 91
}

92 93 94 95 96 97 98 99
/*
 * Include the creation of the trace points after defining the
 * wb_writeback_work structure and inline functions so that the definition
 * remains local to this file.
 */
#define CREATE_TRACE_POINTS
#include <trace/events/writeback.h>

100 101
EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);

102 103 104 105 106 107
static bool wb_io_lists_populated(struct bdi_writeback *wb)
{
	if (wb_has_dirty_io(wb)) {
		return false;
	} else {
		set_bit(WB_has_dirty_io, &wb->state);
108
		WARN_ON_ONCE(!wb->avg_write_bandwidth);
109 110
		atomic_long_add(wb->avg_write_bandwidth,
				&wb->bdi->tot_write_bandwidth);
111 112 113 114 115 116 117
		return true;
	}
}

static void wb_io_lists_depopulated(struct bdi_writeback *wb)
{
	if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
118
	    list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
119
		clear_bit(WB_has_dirty_io, &wb->state);
120 121
		WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
					&wb->bdi->tot_write_bandwidth) < 0);
122
	}
123 124 125
}

/**
126
 * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
127 128 129 130
 * @inode: inode to be moved
 * @wb: target bdi_writeback
 * @head: one of @wb->b_{dirty|io|more_io}
 *
131
 * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
132 133 134
 * Returns %true if @inode is the first occupant of the !dirty_time IO
 * lists; otherwise, %false.
 */
135
static bool inode_io_list_move_locked(struct inode *inode,
136 137 138 139 140
				      struct bdi_writeback *wb,
				      struct list_head *head)
{
	assert_spin_locked(&wb->list_lock);

141
	list_move(&inode->i_io_list, head);
142 143 144 145 146 147 148 149 150 151

	/* dirty_time doesn't count as dirty_io until expiration */
	if (head != &wb->b_dirty_time)
		return wb_io_lists_populated(wb);

	wb_io_lists_depopulated(wb);
	return false;
}

/**
152
 * inode_io_list_del_locked - remove an inode from its bdi_writeback IO list
153 154 155 156 157 158
 * @inode: inode to be removed
 * @wb: bdi_writeback @inode is being removed from
 *
 * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and
 * clear %WB_has_dirty_io if all are empty afterwards.
 */
159
static void inode_io_list_del_locked(struct inode *inode,
160 161 162 163
				     struct bdi_writeback *wb)
{
	assert_spin_locked(&wb->list_lock);

164
	list_del_init(&inode->i_io_list);
165 166 167
	wb_io_lists_depopulated(wb);
}

168
static void wb_wakeup(struct bdi_writeback *wb)
Jan Kara's avatar
Jan Kara committed
169
{
170 171 172 173
	spin_lock_bh(&wb->work_lock);
	if (test_bit(WB_registered, &wb->state))
		mod_delayed_work(bdi_wq, &wb->dwork, 0);
	spin_unlock_bh(&wb->work_lock);
Jan Kara's avatar
Jan Kara committed
174 175
}

176 177 178 179 180 181 182 183 184 185 186
static void finish_writeback_work(struct bdi_writeback *wb,
				  struct wb_writeback_work *work)
{
	struct wb_completion *done = work->done;

	if (work->auto_free)
		kfree(work);
	if (done && atomic_dec_and_test(&done->cnt))
		wake_up_all(&wb->bdi->wb_waitq);
}

187 188
static void wb_queue_work(struct bdi_writeback *wb,
			  struct wb_writeback_work *work)
189
{
190
	trace_writeback_queue(wb, work);
191

192 193
	if (work->done)
		atomic_inc(&work->done->cnt);
194 195 196 197 198 199 200 201 202

	spin_lock_bh(&wb->work_lock);

	if (test_bit(WB_registered, &wb->state)) {
		list_add_tail(&work->list, &wb->work_list);
		mod_delayed_work(bdi_wq, &wb->dwork, 0);
	} else
		finish_writeback_work(wb, work);

203
	spin_unlock_bh(&wb->work_lock);
Linus Torvalds's avatar
Linus Torvalds committed
204 205
}

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
/**
 * wb_wait_for_completion - wait for completion of bdi_writeback_works
 * @bdi: bdi work items were issued to
 * @done: target wb_completion
 *
 * Wait for one or more work items issued to @bdi with their ->done field
 * set to @done, which should have been defined with
 * DEFINE_WB_COMPLETION_ONSTACK().  This function returns after all such
 * work items are completed.  Work items which are waited upon aren't freed
 * automatically on completion.
 */
static void wb_wait_for_completion(struct backing_dev_info *bdi,
				   struct wb_completion *done)
{
	atomic_dec(&done->cnt);		/* put down the initial count */
	wait_event(bdi->wb_waitq, !atomic_read(&done->cnt));
}

224 225
#ifdef CONFIG_CGROUP_WRITEBACK

226 227 228 229 230 231 232 233 234 235 236 237 238 239
/* parameters for foreign inode detection, see wb_detach_inode() */
#define WB_FRN_TIME_SHIFT	13	/* 1s = 2^13, upto 8 secs w/ 16bit */
#define WB_FRN_TIME_AVG_SHIFT	3	/* avg = avg * 7/8 + new * 1/8 */
#define WB_FRN_TIME_CUT_DIV	2	/* ignore rounds < avg / 2 */
#define WB_FRN_TIME_PERIOD	(2 * (1 << WB_FRN_TIME_SHIFT))	/* 2s */

#define WB_FRN_HIST_SLOTS	16	/* inode->i_wb_frn_history is 16bit */
#define WB_FRN_HIST_UNIT	(WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS)
					/* each slot's duration is 2s / 16 */
#define WB_FRN_HIST_THR_SLOTS	(WB_FRN_HIST_SLOTS / 2)
					/* if foreign slots >= 8, switch */
#define WB_FRN_HIST_MAX_SLOTS	(WB_FRN_HIST_THR_SLOTS / 2 + 1)
					/* one round can affect upto 5 slots */

240 241 242
static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
static struct workqueue_struct *isw_wq;

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
void __inode_attach_wb(struct inode *inode, struct page *page)
{
	struct backing_dev_info *bdi = inode_to_bdi(inode);
	struct bdi_writeback *wb = NULL;

	if (inode_cgwb_enabled(inode)) {
		struct cgroup_subsys_state *memcg_css;

		if (page) {
			memcg_css = mem_cgroup_css_from_page(page);
			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
		} else {
			/* must pin memcg_css, see wb_get_create() */
			memcg_css = task_get_css(current, memory_cgrp_id);
			wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
			css_put(memcg_css);
		}
	}

	if (!wb)
		wb = &bdi->wb;

	/*
	 * There may be multiple instances of this function racing to
	 * update the same inode.  Use cmpxchg() to tell the winner.
	 */
	if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
		wb_put(wb);
}

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
/**
 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
 * @inode: inode of interest with i_lock held
 *
 * Returns @inode's wb with its list_lock held.  @inode->i_lock must be
 * held on entry and is released on return.  The returned wb is guaranteed
 * to stay @inode's associated wb until its list_lock is released.
 */
static struct bdi_writeback *
locked_inode_to_wb_and_lock_list(struct inode *inode)
	__releases(&inode->i_lock)
	__acquires(&wb->list_lock)
{
	while (true) {
		struct bdi_writeback *wb = inode_to_wb(inode);

		/*
		 * inode_to_wb() association is protected by both
		 * @inode->i_lock and @wb->list_lock but list_lock nests
		 * outside i_lock.  Drop i_lock and verify that the
		 * association hasn't changed after acquiring list_lock.
		 */
		wb_get(wb);
		spin_unlock(&inode->i_lock);
		spin_lock(&wb->list_lock);

299
		/* i_wb may have changed inbetween, can't use inode_to_wb() */
300 301 302 303
		if (likely(wb == inode->i_wb)) {
			wb_put(wb);	/* @inode already has ref */
			return wb;
		}
304 305

		spin_unlock(&wb->list_lock);
306
		wb_put(wb);
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
		cpu_relax();
		spin_lock(&inode->i_lock);
	}
}

/**
 * inode_to_wb_and_lock_list - determine an inode's wb and lock it
 * @inode: inode of interest
 *
 * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
 * on entry.
 */
static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
	__acquires(&wb->list_lock)
{
	spin_lock(&inode->i_lock);
	return locked_inode_to_wb_and_lock_list(inode);
}

326 327 328 329 330 331 332 333
struct inode_switch_wbs_context {
	struct inode		*inode;
	struct bdi_writeback	*new_wb;

	struct rcu_head		rcu_head;
	struct work_struct	work;
};

334 335 336 337 338 339 340 341 342 343
static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
{
	down_write(&bdi->wb_switch_rwsem);
}

static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
{
	up_write(&bdi->wb_switch_rwsem);
}

344 345 346 347 348
static void inode_switch_wbs_work_fn(struct work_struct *work)
{
	struct inode_switch_wbs_context *isw =
		container_of(work, struct inode_switch_wbs_context, work);
	struct inode *inode = isw->inode;
349
	struct backing_dev_info *bdi = inode_to_bdi(inode);
350 351
	struct address_space *mapping = inode->i_mapping;
	struct bdi_writeback *old_wb = inode->i_wb;
352
	struct bdi_writeback *new_wb = isw->new_wb;
353 354 355
	struct radix_tree_iter iter;
	bool switched = false;
	void **slot;
356

357 358 359 360 361 362
	/*
	 * If @inode switches cgwb membership while sync_inodes_sb() is
	 * being issued, sync_inodes_sb() might miss it.  Synchronize.
	 */
	down_read(&bdi->wb_switch_rwsem);

363 364 365 366 367
	/*
	 * By the time control reaches here, RCU grace period has passed
	 * since I_WB_SWITCH assertion and all wb stat update transactions
	 * between unlocked_inode_to_wb_begin/end() are guaranteed to be
	 * synchronizing against mapping->tree_lock.
368 369 370 371
	 *
	 * Grabbing old_wb->list_lock, inode->i_lock and mapping->tree_lock
	 * gives us exclusion against all wb related operations on @inode
	 * including IO list manipulations and stat updates.
372
	 */
373 374 375 376 377 378 379
	if (old_wb < new_wb) {
		spin_lock(&old_wb->list_lock);
		spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
	} else {
		spin_lock(&new_wb->list_lock);
		spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
	}
380
	spin_lock(&inode->i_lock);
381 382 383 384
	spin_lock_irq(&mapping->tree_lock);

	/*
	 * Once I_FREEING is visible under i_lock, the eviction path owns
385
	 * the inode and we shouldn't modify ->i_io_list.
386 387 388 389 390 391 392 393 394 395 396 397 398 399
	 */
	if (unlikely(inode->i_state & I_FREEING))
		goto skip_switch;

	/*
	 * Count and transfer stats.  Note that PAGECACHE_TAG_DIRTY points
	 * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
	 * pages actually under underwriteback.
	 */
	radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 0,
				   PAGECACHE_TAG_DIRTY) {
		struct page *page = radix_tree_deref_slot_protected(slot,
							&mapping->tree_lock);
		if (likely(page) && PageDirty(page)) {
400 401
			dec_wb_stat(old_wb, WB_RECLAIMABLE);
			inc_wb_stat(new_wb, WB_RECLAIMABLE);
402 403 404 405 406 407 408 409 410
		}
	}

	radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 0,
				   PAGECACHE_TAG_WRITEBACK) {
		struct page *page = radix_tree_deref_slot_protected(slot,
							&mapping->tree_lock);
		if (likely(page)) {
			WARN_ON_ONCE(!PageWriteback(page));
411 412
			dec_wb_stat(old_wb, WB_WRITEBACK);
			inc_wb_stat(new_wb, WB_WRITEBACK);
413 414 415 416 417 418 419 420 421 422 423
		}
	}

	wb_get(new_wb);

	/*
	 * Transfer to @new_wb's IO list if necessary.  The specific list
	 * @inode was on is ignored and the inode is put on ->b_dirty which
	 * is always correct including from ->b_dirty_time.  The transfer
	 * preserves @inode->dirtied_when ordering.
	 */
424
	if (!list_empty(&inode->i_io_list)) {
425 426
		struct inode *pos;

427
		inode_io_list_del_locked(inode, old_wb);
428
		inode->i_wb = new_wb;
429
		list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
430 431 432
			if (time_after_eq(inode->dirtied_when,
					  pos->dirtied_when))
				break;
433
		inode_io_list_move_locked(inode, new_wb, pos->i_io_list.prev);
434 435 436
	} else {
		inode->i_wb = new_wb;
	}
437

438
	/* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */
439 440 441
	inode->i_wb_frn_winner = 0;
	inode->i_wb_frn_avg_time = 0;
	inode->i_wb_frn_history = 0;
442 443
	switched = true;
skip_switch:
444 445 446 447 448 449
	/*
	 * Paired with load_acquire in unlocked_inode_to_wb_begin() and
	 * ensures that the new wb is visible if they see !I_WB_SWITCH.
	 */
	smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);

450
	spin_unlock_irq(&mapping->tree_lock);
451
	spin_unlock(&inode->i_lock);
452 453
	spin_unlock(&new_wb->list_lock);
	spin_unlock(&old_wb->list_lock);
454

455 456
	up_read(&bdi->wb_switch_rwsem);

457 458 459 460
	if (switched) {
		wb_wakeup(new_wb);
		wb_put(old_wb);
	}
461
	wb_put(new_wb);
462 463

	iput(inode);
464
	kfree(isw);
465 466

	atomic_dec(&isw_nr_in_flight);
467 468 469 470 471 472 473 474 475
}

static void inode_switch_wbs_rcu_fn(struct rcu_head *rcu_head)
{
	struct inode_switch_wbs_context *isw = container_of(rcu_head,
				struct inode_switch_wbs_context, rcu_head);

	/* needs to grab bh-unsafe locks, bounce to work item */
	INIT_WORK(&isw->work, inode_switch_wbs_work_fn);
476
	queue_work(isw_wq, &isw->work);
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
}

/**
 * inode_switch_wbs - change the wb association of an inode
 * @inode: target inode
 * @new_wb_id: ID of the new wb
 *
 * Switch @inode's wb association to the wb identified by @new_wb_id.  The
 * switching is performed asynchronously and may fail silently.
 */
static void inode_switch_wbs(struct inode *inode, int new_wb_id)
{
	struct backing_dev_info *bdi = inode_to_bdi(inode);
	struct cgroup_subsys_state *memcg_css;
	struct inode_switch_wbs_context *isw;

	/* noop if seems to be already in progress */
	if (inode->i_state & I_WB_SWITCH)
		return;

497 498 499 500 501 502 503 504 505
	/*
	 * Avoid starting new switches while sync_inodes_sb() is in
	 * progress.  Otherwise, if the down_write protected issue path
	 * blocks heavily, we might end up starting a large number of
	 * switches which will block on the rwsem.
	 */
	if (!down_read_trylock(&bdi->wb_switch_rwsem))
		return;

506 507
	isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
	if (!isw)
508
		goto out_unlock;
509 510 511 512 513 514 515 516 517 518 519 520

	/* find and pin the new wb */
	rcu_read_lock();
	memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
	if (memcg_css)
		isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
	rcu_read_unlock();
	if (!isw->new_wb)
		goto out_free;

	/* while holding I_WB_SWITCH, no one else can update the association */
	spin_lock(&inode->i_lock);
521 522 523 524 525 526
	if (!(inode->i_sb->s_flags & MS_ACTIVE) ||
	    inode->i_state & (I_WB_SWITCH | I_FREEING) ||
	    inode_to_wb(inode) == isw->new_wb) {
		spin_unlock(&inode->i_lock);
		goto out_free;
	}
527
	inode->i_state |= I_WB_SWITCH;
528
	__iget(inode);
529 530 531 532 533 534 535 536 537 538 539
	spin_unlock(&inode->i_lock);

	isw->inode = inode;

	/*
	 * In addition to synchronizing among switchers, I_WB_SWITCH tells
	 * the RCU protected stat update paths to grab the mapping's
	 * tree_lock so that stat transfer can synchronize against them.
	 * Let's continue after I_WB_SWITCH is guaranteed to be visible.
	 */
	call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
540 541 542

	atomic_inc(&isw_nr_in_flight);

543
	goto out_unlock;
544 545 546 547 548

out_free:
	if (isw->new_wb)
		wb_put(isw->new_wb);
	kfree(isw);
549 550
out_unlock:
	up_read(&bdi->wb_switch_rwsem);
551 552
}

553 554 555 556 557 558 559 560 561 562 563 564 565
/**
 * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
 * @wbc: writeback_control of interest
 * @inode: target inode
 *
 * @inode is locked and about to be written back under the control of @wbc.
 * Record @inode's writeback context into @wbc and unlock the i_lock.  On
 * writeback completion, wbc_detach_inode() should be called.  This is used
 * to track the cgroup writeback context.
 */
void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
				 struct inode *inode)
{
566 567 568 569 570
	if (!inode_cgwb_enabled(inode)) {
		spin_unlock(&inode->i_lock);
		return;
	}

571
	wbc->wb = inode_to_wb(inode);
572 573 574 575 576 577 578 579 580
	wbc->inode = inode;

	wbc->wb_id = wbc->wb->memcg_css->id;
	wbc->wb_lcand_id = inode->i_wb_frn_winner;
	wbc->wb_tcand_id = 0;
	wbc->wb_bytes = 0;
	wbc->wb_lcand_bytes = 0;
	wbc->wb_tcand_bytes = 0;

581 582
	wb_get(wbc->wb);
	spin_unlock(&inode->i_lock);
583 584 585 586 587 588 589

	/*
	 * A dying wb indicates that the memcg-blkcg mapping has changed
	 * and a new wb is already serving the memcg.  Switch immediately.
	 */
	if (unlikely(wb_dying(wbc->wb)))
		inode_switch_wbs(inode, wbc->wb_id);
590 591 592
}

/**
593 594
 * wbc_detach_inode - disassociate wbc from inode and perform foreign detection
 * @wbc: writeback_control of the just finished writeback
595 596 597
 *
 * To be called after a writeback attempt of an inode finishes and undoes
 * wbc_attach_and_unlock_inode().  Can be called under any context.
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
 *
 * As concurrent write sharing of an inode is expected to be very rare and
 * memcg only tracks page ownership on first-use basis severely confining
 * the usefulness of such sharing, cgroup writeback tracks ownership
 * per-inode.  While the support for concurrent write sharing of an inode
 * is deemed unnecessary, an inode being written to by different cgroups at
 * different points in time is a lot more common, and, more importantly,
 * charging only by first-use can too readily lead to grossly incorrect
 * behaviors (single foreign page can lead to gigabytes of writeback to be
 * incorrectly attributed).
 *
 * To resolve this issue, cgroup writeback detects the majority dirtier of
 * an inode and transfers the ownership to it.  To avoid unnnecessary
 * oscillation, the detection mechanism keeps track of history and gives
 * out the switch verdict only if the foreign usage pattern is stable over
 * a certain amount of time and/or writeback attempts.
 *
 * On each writeback attempt, @wbc tries to detect the majority writer
 * using Boyer-Moore majority vote algorithm.  In addition to the byte
 * count from the majority voting, it also counts the bytes written for the
 * current wb and the last round's winner wb (max of last round's current
 * wb, the winner from two rounds ago, and the last round's majority
 * candidate).  Keeping track of the historical winner helps the algorithm
 * to semi-reliably detect the most active writer even when it's not the
 * absolute majority.
 *
 * Once the winner of the round is determined, whether the winner is
 * foreign or not and how much IO time the round consumed is recorded in
 * inode->i_wb_frn_history.  If the amount of recorded foreign IO time is
 * over a certain threshold, the switch verdict is given.
628 629 630
 */
void wbc_detach_inode(struct writeback_control *wbc)
{
631 632
	struct bdi_writeback *wb = wbc->wb;
	struct inode *inode = wbc->inode;
633 634
	unsigned long avg_time, max_bytes, max_time;
	u16 history;
635 636
	int max_id;

637 638 639 640 641 642
	if (!wb)
		return;

	history = inode->i_wb_frn_history;
	avg_time = inode->i_wb_frn_avg_time;

643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
	/* pick the winner of this round */
	if (wbc->wb_bytes >= wbc->wb_lcand_bytes &&
	    wbc->wb_bytes >= wbc->wb_tcand_bytes) {
		max_id = wbc->wb_id;
		max_bytes = wbc->wb_bytes;
	} else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) {
		max_id = wbc->wb_lcand_id;
		max_bytes = wbc->wb_lcand_bytes;
	} else {
		max_id = wbc->wb_tcand_id;
		max_bytes = wbc->wb_tcand_bytes;
	}

	/*
	 * Calculate the amount of IO time the winner consumed and fold it
	 * into the running average kept per inode.  If the consumed IO
	 * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for
	 * deciding whether to switch or not.  This is to prevent one-off
	 * small dirtiers from skewing the verdict.
	 */
	max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT,
				wb->avg_write_bandwidth);
	if (avg_time)
		avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) -
			    (avg_time >> WB_FRN_TIME_AVG_SHIFT);
	else
		avg_time = max_time;	/* immediate catch up on first run */

	if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) {
		int slots;

		/*
		 * The switch verdict is reached if foreign wb's consume
		 * more than a certain proportion of IO time in a
		 * WB_FRN_TIME_PERIOD.  This is loosely tracked by 16 slot
		 * history mask where each bit represents one sixteenth of
		 * the period.  Determine the number of slots to shift into
		 * history from @max_time.
		 */
		slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT),
			    (unsigned long)WB_FRN_HIST_MAX_SLOTS);
		history <<= slots;
		if (wbc->wb_id != max_id)
			history |= (1U << slots) - 1;

		/*
		 * Switch if the current wb isn't the consistent winner.
		 * If there are multiple closely competing dirtiers, the
		 * inode may switch across them repeatedly over time, which
		 * is okay.  The main goal is avoiding keeping an inode on
		 * the wrong wb for an extended period of time.
		 */
695 696
		if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
			inode_switch_wbs(inode, max_id);
697 698 699 700 701 702 703 704 705 706
	}

	/*
	 * Multiple instances of this function may race to update the
	 * following fields but we don't mind occassional inaccuracies.
	 */
	inode->i_wb_frn_winner = max_id;
	inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
	inode->i_wb_frn_history = history;

707 708 709 710
	wb_put(wbc->wb);
	wbc->wb = NULL;
}

711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
/**
 * wbc_account_io - account IO issued during writeback
 * @wbc: writeback_control of the writeback in progress
 * @page: page being written out
 * @bytes: number of bytes being written out
 *
 * @bytes from @page are about to written out during the writeback
 * controlled by @wbc.  Keep the book for foreign inode detection.  See
 * wbc_detach_inode().
 */
void wbc_account_io(struct writeback_control *wbc, struct page *page,
		    size_t bytes)
{
	int id;

	/*
	 * pageout() path doesn't attach @wbc to the inode being written
	 * out.  This is intentional as we don't want the function to block
	 * behind a slow cgroup.  Ultimately, we want pageout() to kick off
	 * regular writeback instead of writing things out itself.
	 */
	if (!wbc->wb)
		return;

	id = mem_cgroup_css_from_page(page)->id;

	if (id == wbc->wb_id) {
		wbc->wb_bytes += bytes;
		return;
	}

	if (id == wbc->wb_lcand_id)
		wbc->wb_lcand_bytes += bytes;

	/* Boyer-Moore majority vote algorithm */
	if (!wbc->wb_tcand_bytes)
		wbc->wb_tcand_id = id;
	if (id == wbc->wb_tcand_id)
		wbc->wb_tcand_bytes += bytes;
	else
		wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
}
753
EXPORT_SYMBOL_GPL(wbc_account_io);
754

755 756
/**
 * inode_congested - test whether an inode is congested
757
 * @inode: inode to test for congestion (may be NULL)
758 759 760 761 762 763 764 765 766
 * @cong_bits: mask of WB_[a]sync_congested bits to test
 *
 * Tests whether @inode is congested.  @cong_bits is the mask of congestion
 * bits to test and the return value is the mask of set bits.
 *
 * If cgroup writeback is enabled for @inode, the congestion state is
 * determined by whether the cgwb (cgroup bdi_writeback) for the blkcg
 * associated with @inode is congested; otherwise, the root wb's congestion
 * state is used.
767 768 769
 *
 * @inode is allowed to be NULL as this function is often called on
 * mapping->host which is NULL for the swapper space.
770 771 772
 */
int inode_congested(struct inode *inode, int cong_bits)
{
773 774 775 776
	/*
	 * Once set, ->i_wb never becomes NULL while the inode is alive.
	 * Start transaction iff ->i_wb is visible.
	 */
777
	if (inode && inode_to_wb_is_valid(inode)) {
778
		struct bdi_writeback *wb;
Greg Thelen's avatar
Greg Thelen committed
779 780
		struct wb_lock_cookie lock_cookie = {};
		bool congested;
781

Greg Thelen's avatar
Greg Thelen committed
782
		wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
783
		congested = wb_congested(wb, cong_bits);
Greg Thelen's avatar
Greg Thelen committed
784
		unlocked_inode_to_wb_end(inode, &lock_cookie);
785
		return congested;
786 787 788 789 790 791
	}

	return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
}
EXPORT_SYMBOL_GPL(inode_congested);

792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
/**
 * wb_split_bdi_pages - split nr_pages to write according to bandwidth
 * @wb: target bdi_writeback to split @nr_pages to
 * @nr_pages: number of pages to write for the whole bdi
 *
 * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
 * relation to the total write bandwidth of all wb's w/ dirty inodes on
 * @wb->bdi.
 */
static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
{
	unsigned long this_bw = wb->avg_write_bandwidth;
	unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);

	if (nr_pages == LONG_MAX)
		return LONG_MAX;

	/*
	 * This may be called on clean wb's and proportional distribution
	 * may not make sense, just use the original @nr_pages in those
	 * cases.  In general, we wanna err on the side of writing more.
	 */
	if (!tot_bw || this_bw >= tot_bw)
		return nr_pages;
	else
		return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
}

820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
/**
 * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
 * @bdi: target backing_dev_info
 * @base_work: wb_writeback_work to issue
 * @skip_if_busy: skip wb's which already have writeback in progress
 *
 * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
 * have dirty inodes.  If @base_work->nr_page isn't %LONG_MAX, it's
 * distributed to the busy wbs according to each wb's proportion in the
 * total active write bandwidth of @bdi.
 */
static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
				  struct wb_writeback_work *base_work,
				  bool skip_if_busy)
{
835
	struct bdi_writeback *last_wb = NULL;
836 837
	struct bdi_writeback *wb = list_entry(&bdi->wb_list,
					      struct bdi_writeback, bdi_node);
838 839 840 841

	might_sleep();
restart:
	rcu_read_lock();
842
	list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
843 844 845 846 847
		DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done);
		struct wb_writeback_work fallback_work;
		struct wb_writeback_work *work;
		long nr_pages;

848 849 850 851 852
		if (last_wb) {
			wb_put(last_wb);
			last_wb = NULL;
		}

853 854 855 856 857 858
		/* SYNC_ALL writes out I_DIRTY_TIME too */
		if (!wb_has_dirty_io(wb) &&
		    (base_work->sync_mode == WB_SYNC_NONE ||
		     list_empty(&wb->b_dirty_time)))
			continue;
		if (skip_if_busy && writeback_in_progress(wb))
859 860
			continue;

861 862 863 864 865 866 867 868 869
		nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);

		work = kmalloc(sizeof(*work), GFP_ATOMIC);
		if (work) {
			*work = *base_work;
			work->nr_pages = nr_pages;
			work->auto_free = 1;
			wb_queue_work(wb, work);
			continue;
870
		}
871 872 873 874 875 876 877 878 879 880

		/* alloc failed, execute synchronously using on-stack fallback */
		work = &fallback_work;
		*work = *base_work;
		work->nr_pages = nr_pages;
		work->auto_free = 0;
		work->done = &fallback_work_done;

		wb_queue_work(wb, work);

881 882 883 884 885 886 887 888
		/*
		 * Pin @wb so that it stays on @bdi->wb_list.  This allows
		 * continuing iteration from @wb after dropping and
		 * regrabbing rcu read lock.
		 */
		wb_get(wb);
		last_wb = wb;

889 890 891
		rcu_read_unlock();
		wb_wait_for_completion(bdi, &fallback_work_done);
		goto restart;
892 893
	}
	rcu_read_unlock();
894 895 896

	if (last_wb)
		wb_put(last_wb);
897 898
}

899 900 901 902 903 904 905 906 907 908 909 910 911
/**
 * cgroup_writeback_umount - flush inode wb switches for umount
 *
 * This function is called when a super_block is about to be destroyed and
 * flushes in-flight inode wb switches.  An inode wb switch goes through
 * RCU and then workqueue, so the two need to be flushed in order to ensure
 * that all previously scheduled switches are finished.  As wb switches are
 * rare occurrences and synchronize_rcu() can take a while, perform
 * flushing iff wb switches are in flight.
 */
void cgroup_writeback_umount(void)
{
	if (atomic_read(&isw_nr_in_flight)) {
912 913 914 915 916
		/*
		 * Use rcu_barrier() to wait for all pending callbacks to
		 * ensure that all in-flight wb switches are in the workqueue.
		 */
		rcu_barrier();
917 918 919 920 921 922 923 924 925 926 927 928 929
		flush_workqueue(isw_wq);
	}
}

static int __init cgroup_writeback_init(void)
{
	isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
	if (!isw_wq)
		return -ENOMEM;
	return 0;
}
fs_initcall(cgroup_writeback_init);

930 931
#else	/* CONFIG_CGROUP_WRITEBACK */

932 933 934
static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }

935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955
static struct bdi_writeback *
locked_inode_to_wb_and_lock_list(struct inode *inode)
	__releases(&inode->i_lock)
	__acquires(&wb->list_lock)
{
	struct bdi_writeback *wb = inode_to_wb(inode);

	spin_unlock(&inode->i_lock);
	spin_lock(&wb->list_lock);
	return wb;
}

static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
	__acquires(&wb->list_lock)
{
	struct bdi_writeback *wb = inode_to_wb(inode);

	spin_lock(&wb->list_lock);
	return wb;
}

956 957 958 959 960
static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
{
	return nr_pages;
}

961 962 963 964 965 966
static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
				  struct wb_writeback_work *base_work,
				  bool skip_if_busy)
{
	might_sleep();

967
	if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
968 969 970 971 972
		base_work->auto_free = 0;
		wb_queue_work(&bdi->wb, base_work);
	}
}

973 974
#endif	/* CONFIG_CGROUP_WRITEBACK */

975 976
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
			bool range_cyclic, enum wb_reason reason)
977
{
978 979 980 981 982 983 984 985 986
	struct wb_writeback_work *work;

	if (!wb_has_dirty_io(wb))
		return;

	/*
	 * This is WB_SYNC_NONE writeback, so if allocation fails just
	 * wakeup the thread for old dirty data writeback
	 */
987 988
	work = kzalloc(sizeof(*work),
		       GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
989
	if (!work) {
990
		trace_writeback_nowork(wb);
991 992 993 994 995 996 997 998
		wb_wakeup(wb);
		return;
	}

	work->sync_mode	= WB_SYNC_NONE;
	work->nr_pages	= nr_pages;
	work->range_cyclic = range_cyclic;
	work->reason	= reason;
999
	work->auto_free	= 1;
1000 1001

	wb_queue_work(wb, work);
1002
}
1003

1004
/**
1005 1006
 * wb_start_background_writeback - start background writeback
 * @wb: bdi_writback to write from
1007 1008
 *
 * Description:
1009
 *   This makes sure WB_SYNC_NONE background writeback happens. When
1010
 *   this function returns, it is only guaranteed that for given wb
1011 1012
 *   some IO is happening if we are over background dirty threshold.
 *   Caller need not hold sb s_umount semaphore.
1013
 */
1014
void wb_start_background_writeback(struct bdi_writeback *wb)
1015
{
1016 1017 1018 1019
	/*
	 * We just wake up the flusher thread. It will perform background
	 * writeback as soon as there is no other work to do.
	 */
1020
	trace_writeback_wake_background(wb);
1021
	wb_wakeup(wb);
Linus Torvalds's avatar
Linus Torvalds committed
1022 1023
}

1024 1025 1026
/*
 * Remove the inode from the writeback list it is on.
 */
1027
void inode_io_list_del(struct inode *inode)
1028
{
1029
	struct bdi_writeback *wb;
1030

1031
	wb = inode_to_wb_and_lock_list(inode);
1032
	inode_io_list_del_locked(inode, wb);
1033
	spin_unlock(&wb->list_lock);
1034 1035
}

1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
/*
 * mark an inode as under writeback on the sb
 */
void sb_mark_inode_writeback(struct inode *inode)
{
	struct super_block *sb = inode->i_sb;
	unsigned long flags;

	if (list_empty(&inode->i_wb_list)) {
		spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1046
		if (list_empty(&inode->i_wb_list)) {
1047
			list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb);
1048 1049
			trace_sb_mark_inode_writeback(inode);
		}
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063
		spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
	}
}

/*
 * clear an inode as under writeback on the sb
 */
void sb_clear_inode_writeback(struct inode *inode)
{
	struct super_block *sb = inode->i_sb;
	unsigned long flags;

	if (!list_empty(&inode->i_wb_list)) {
		spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1064 1065 1066 1067
		if (!list_empty(&inode->i_wb_list)) {
			list_del_init(&inode->i_wb_list);
			trace_sb_clear_inode_writeback(inode);
		}
1068 1069 1070 1071
		spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
	}
}

1072 1073 1074 1075 1076
/*
 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
 * furthest end of its superblock's dirty-inode list.
 *
 * Before stamping the inode's ->dirtied_when, we check to see whether it is
1077
 * already the most-recently-dirtied inode on the b_dirty list.  If that is
1078 1079 1080
 * the case then the inode must have been redirtied while it was being written
 * out and we don't reset its dirtied_when.
 */
1081
static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
1082
{
1083
	if (!list_empty(&wb->b_dirty)) {
1084
		struct inode *tail;
1085

Nick Piggin's avatar
Nick Piggin committed
1086
		tail = wb_inode(wb->b_dirty.next);
1087
		if (time_before(inode->dirtied_when, tail->dirtied_when))
1088 1089
			inode->dirtied_when = jiffies;
	}
1090
	inode_io_list_move_locked(inode, wb, &wb->b_dirty);
1091 1092
}

1093
/*
1094
 * requeue inode for re-scanning after bdi->b_io list is exhausted.
1095
 */
1096
static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
1097
{
1098
	inode_io_list_move_locked(inode, wb, &wb->b_more_io);
1099 1100
}

Joern Engel's avatar
Joern Engel committed
1101 1102
static void inode_sync_complete(struct inode *inode)
{
1103
	inode->i_state &= ~I_SYNC;
1104 1105
	/* If inode is clean an unused, put it into LRU now... */
	inode_add_lru(inode);
1106
	/* Waiters must see I_SYNC cleared before being woken up */
Joern Engel's avatar
Joern Engel committed
1107 1108 1109 1110
	smp_mb();
	wake_up_bit(&inode->i_state, __I_SYNC);
}

1111 1112 1113 1114 1115 1116 1117 1118
static bool inode_dirtied_after(struct inode *inode, unsigned long t)
{
	bool ret = time_after(inode->dirtied_when, t);
#ifndef CONFIG_64BIT
	/*
	 * For inodes being constantly redirtied, dirtied_when can get stuck.
	 * It _appears_ to be in the future, but is actually in distant past.
	 * This test is necessary to prevent such wrapped-around relative times
1119
	 * from permanently stopping the whole bdi writeback.
1120 1121 1122 1123 1124 1125
	 */
	ret = ret && time_before_eq(inode->dirtied_when, jiffies);
#endif
	return ret;
}

1126 1127
#define EXPIRE_DIRTY_ATIME 0x0001

1128
/*
1129
 * Move expired (dirtied before work->older_than_this) dirty inodes from
Jan Kara's avatar
Jan Kara committed
1130
 * @delaying_queue to @dispatch_queue.
1131
 */
1132
static int move_expired_inodes(struct list_head *delaying_queue,
1133
			       struct list_head *dispatch_queue,
1134
			       int flags,
1135
			       struct wb_writeback_work *work)
1136
{
1137 1138
	unsigned long *older_than_this = NULL;
	unsigned long expire_time;
1139 1140
	LIST_HEAD(tmp);
	struct list_head *pos, *node;
1141
	struct super_block *sb = NULL;
1142
	struct inode *inode;
1143
	int do_sb_sort = 0;
1144
	int moved = 0;
1145

1146 1147
	if ((flags & EXPIRE_DIRTY_ATIME) == 0)
		older_than_this = work->older_than_this;
1148 1149
	else if (!work->for_sync) {
		expire_time = jiffies - (dirtytime_expire_interval * HZ);
1150 1151
		older_than_this = &expire_time;
	}
1152
	while (!list_empty(delaying_queue)) {
Nick Piggin's avatar
Nick Piggin committed
1153
		inode = wb_inode(delaying_queue->prev);
1154 1155
		if (older_than_this &&
		    inode_dirtied_after(inode, *older_than_this))
1156
			break;
1157
		list_move(&inode->i_io_list, &tmp);
1158
		moved++;
1159 1160
		if (flags & EXPIRE_DIRTY_ATIME)
			set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
1161 1162
		if (sb_is_blkdev_sb(inode->i_sb))
			continue;
1163 1164 1165
		if (sb && sb != inode->i_sb)
			do_sb_sort = 1;
		sb = inode->i_sb;
1166 1167
	}

1168 1169 1170
	/* just one sb in list, splice to dispatch_queue and we're done */
	if (!do_sb_sort) {
		list_splice(&tmp, dispatch_queue);
1171
		goto out;
1172 1173
	}

1174 1175
	/* Move inodes from one superblock together */
	while (!list_empty(&tmp)) {
Nick Piggin's avatar
Nick Piggin committed
1176
		sb = wb_inode(tmp.prev)->i_sb;
1177
		list_for_each_prev_safe(pos, node, &tmp) {
Nick Piggin's avatar
Nick Piggin committed
1178
			inode = wb_inode(pos);
1179
			if (inode->i_sb == sb)
1180
				list_move(&inode->i_io_list, dispatch_queue);
1181
		}
1182
	}
1183 1184
out:
	return moved;
1185 1186 1187 1188
}

/*
 * Queue all expired dirty inodes for io, eldest first.
1189 1190 1191 1192 1193 1194 1195 1196
 * Before
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    gf         edc     BA
 * After
 *         newly dirtied     b_dirty    b_io    b_more_io
 *         =============>    g          fBAedc
 *                                           |
 *                                           +--> dequeue for IO
1197
 */
1198
static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
1199
{
1200
	int moved;
1201

1202
	assert_spin_locked(&wb->list_lock);
1203
	list_splice_init(&wb->b_more_io, &wb->b_io);
1204 1205 1206
	moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
	moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
				     EXPIRE_DIRTY_ATIME, work);
1207 1208
	if (moved)
		wb_io_lists_populated(wb);
1209
	trace_writeback_queue_io(wb, work, moved);
1210 1211
}

1212
static int write_inode(struct inode *inode, struct writeback_control *wbc)
1213
{
Tejun Heo's avatar
Tejun Heo committed
1214 1215 1216 1217 1218 1219 1220 1221
	int ret;

	if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
		trace_writeback_write_inode_start(inode, wbc);
		ret = inode->i_sb->s_op->write_inode(inode, wbc);
		trace_writeback_write_inode(inode, wbc);
		return ret;
	}
1222
	return 0;
1223 1224
}

Linus Torvalds's avatar
Linus Torvalds committed
1225
/*
1226 1227
 * Wait for writeback on an inode to complete. Called with i_lock held.
 * Caller must make sure inode cannot go away when we drop i_lock.
1228
 */
1229 1230 1231
static void __inode_wait_for_writeback(struct inode *inode)
	__releases(inode->i_lock)
	__acquires(inode->i_lock)
1232 1233 1234 1235 1236
{
	DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
	wait_queue_head_t *wqh;

	wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1237 1238
	while (inode->i_state & I_SYNC) {
		spin_unlock(&inode->i_lock);
1239 1240
		__wait_on_bit(wqh, &wq, bit_wait,
			      TASK_UNINTERRUPTIBLE);
1241
		spin_lock(&inode->i_lock);
1242
	}
1243 1244
}