inode.c 55.5 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2
/*
 * (C) 1997 Linus Torvalds
3
 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
Linus Torvalds's avatar
Linus Torvalds committed
4
 */
Al Viro's avatar
Al Viro committed
5
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
6 7 8 9 10 11 12 13
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/backing-dev.h>
#include <linux/hash.h>
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
14
#include <linux/fsnotify.h>
15
#include <linux/mount.h>
Al Viro's avatar
Al Viro committed
16
#include <linux/posix_acl.h>
17
#include <linux/prefetch.h>
18
#include <linux/buffer_head.h> /* for inode_has_buffers */
Miklos Szeredi's avatar
Miklos Szeredi committed
19
#include <linux/ratelimit.h>
20
#include <linux/list_lru.h>
21
#include <trace/events/writeback.h>
22
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
23

24
/*
25
 * Inode locking rules:
26 27 28
 *
 * inode->i_lock protects:
 *   inode->i_state, inode->i_hash, __iget()
29
 * Inode LRU list locks protect:
30
 *   inode->i_sb->s_inode_lru, inode->i_lru
31 32
 * inode->i_sb->s_inode_list_lock protects:
 *   inode->i_sb->s_inodes, inode->i_sb_list
33
 * bdi->wb.list_lock protects:
34
 *   bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
35 36
 * inode_hash_lock protects:
 *   inode_hashtable, inode->i_hash
37 38
 *
 * Lock ordering:
39
 *
40
 * inode->i_sb->s_inode_list_lock
41
 *   inode->i_lock
42
 *     Inode LRU list locks
43
 *
44
 * bdi->wb.list_lock
45
 *   inode->i_lock
46 47
 *
 * inode_hash_lock
48
 *   inode->i_sb->s_inode_list_lock
49 50 51 52
 *   inode->i_lock
 *
 * iunique_lock
 *   inode_hash_lock
53 54
 */

55 56
static unsigned int i_hash_mask __read_mostly;
static unsigned int i_hash_shift __read_mostly;
57 58
static struct hlist_head *inode_hashtable __read_mostly;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
Linus Torvalds's avatar
Linus Torvalds committed
59

Jens Axboe's avatar
Jens Axboe committed
60 61 62 63 64 65 66 67
/*
 * Empty aops. Can be used for the cases where the user does not
 * define any of the address_space operations.
 */
const struct address_space_operations empty_aops = {
};
EXPORT_SYMBOL(empty_aops);

Linus Torvalds's avatar
Linus Torvalds committed
68 69 70 71 72
/*
 * Statistics gathering..
 */
struct inodes_stat_t inodes_stat;

73 74
static DEFINE_PER_CPU(unsigned long, nr_inodes);
static DEFINE_PER_CPU(unsigned long, nr_unused);
75

76
static struct kmem_cache *inode_cachep __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
77

78
static long get_nr_inodes(void)
79
{
80
	int i;
81
	long sum = 0;
82 83 84
	for_each_possible_cpu(i)
		sum += per_cpu(nr_inodes, i);
	return sum < 0 ? 0 : sum;
85 86
}

87
static inline long get_nr_inodes_unused(void)
88
{
89
	int i;
90
	long sum = 0;
91 92 93
	for_each_possible_cpu(i)
		sum += per_cpu(nr_unused, i);
	return sum < 0 ? 0 : sum;
94 95
}

96
long get_nr_dirty_inodes(void)
97
{
98
	/* not actually dirty inodes, but a wild approximation */
99
	long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
100 101 102 103 104 105 106
	return nr_dirty > 0 ? nr_dirty : 0;
}

/*
 * Handle nr_inode sysctl
 */
#ifdef CONFIG_SYSCTL
107
int proc_nr_inodes(struct ctl_table *table, int write,
108 109 110
		   void __user *buffer, size_t *lenp, loff_t *ppos)
{
	inodes_stat.nr_inodes = get_nr_inodes();
111
	inodes_stat.nr_unused = get_nr_inodes_unused();
112
	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
113 114 115
}
#endif

116 117 118 119 120
static int no_open(struct inode *inode, struct file *file)
{
	return -ENXIO;
}

121
/**
122
 * inode_init_always - perform inode structure initialisation
123 124
 * @sb: superblock inode belongs to
 * @inode: inode to initialise
125 126 127 128
 *
 * These are initializations that need to be done on every inode
 * allocation as the fields are not initialised by slab allocation.
 */
129
int inode_init_always(struct super_block *sb, struct inode *inode)
Linus Torvalds's avatar
Linus Torvalds committed
130
{
131
	static const struct inode_operations empty_iops;
132
	static const struct file_operations no_open_fops = {.open = no_open};
133
	struct address_space *const mapping = &inode->i_data;
134 135 136 137 138 139

	inode->i_sb = sb;
	inode->i_blkbits = sb->s_blocksize_bits;
	inode->i_flags = 0;
	atomic_set(&inode->i_count, 1);
	inode->i_op = &empty_iops;
140
	inode->i_fop = &no_open_fops;
Miklos Szeredi's avatar
Miklos Szeredi committed
141
	inode->__i_nlink = 1;
142
	inode->i_opflags = 0;
143 144
	if (sb->s_xattr)
		inode->i_opflags |= IOP_XATTR;
145 146
	i_uid_write(inode, 0);
	i_gid_write(inode, 0);
147 148
	atomic_set(&inode->i_writecount, 0);
	inode->i_size = 0;
149
	inode->i_write_hint = WRITE_LIFE_NOT_SET;
150 151 152 153 154 155
	inode->i_blocks = 0;
	inode->i_bytes = 0;
	inode->i_generation = 0;
	inode->i_pipe = NULL;
	inode->i_bdev = NULL;
	inode->i_cdev = NULL;
Al Viro's avatar
Al Viro committed
156
	inode->i_link = NULL;
Al Viro's avatar
Al Viro committed
157
	inode->i_dir_seq = 0;
158 159
	inode->i_rdev = 0;
	inode->dirtied_when = 0;
Mimi Zohar's avatar
Mimi Zohar committed
160

161 162 163 164 165 166
#ifdef CONFIG_CGROUP_WRITEBACK
	inode->i_wb_frn_winner = 0;
	inode->i_wb_frn_avg_time = 0;
	inode->i_wb_frn_history = 0;
#endif

Mimi Zohar's avatar
Mimi Zohar committed
167
	if (security_inode_alloc(inode))
168
		goto out;
169 170 171
	spin_lock_init(&inode->i_lock);
	lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);

172 173
	init_rwsem(&inode->i_rwsem);
	lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
174

Christoph Hellwig's avatar
Christoph Hellwig committed
175
	atomic_set(&inode->i_dio_count, 0);
176 177 178 179

	mapping->a_ops = &empty_aops;
	mapping->host = inode;
	mapping->flags = 0;
180
	mapping->wb_err = 0;
181
	atomic_set(&mapping->i_mmap_writable, 0);
182
	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
183
	mapping->private_data = NULL;
184 185 186
	mapping->writeback_index = 0;
	inode->i_private = NULL;
	inode->i_mapping = mapping;
187
	INIT_HLIST_HEAD(&inode->i_dentry);	/* buggered by rcu freeing */
Al Viro's avatar
Al Viro committed
188 189 190
#ifdef CONFIG_FS_POSIX_ACL
	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
#endif
191

192 193 194
#ifdef CONFIG_FSNOTIFY
	inode->i_fsnotify_mask = 0;
#endif
195
	inode->i_flctx = NULL;
196
	this_cpu_inc(nr_inodes);
197

198 199 200
	return 0;
out:
	return -ENOMEM;
Linus Torvalds's avatar
Linus Torvalds committed
201
}
202 203 204 205 206 207 208 209 210 211 212
EXPORT_SYMBOL(inode_init_always);

static struct inode *alloc_inode(struct super_block *sb)
{
	struct inode *inode;

	if (sb->s_op->alloc_inode)
		inode = sb->s_op->alloc_inode(sb);
	else
		inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);

213 214 215 216 217 218 219 220 221 222 223 224
	if (!inode)
		return NULL;

	if (unlikely(inode_init_always(sb, inode))) {
		if (inode->i_sb->s_op->destroy_inode)
			inode->i_sb->s_op->destroy_inode(inode);
		else
			kmem_cache_free(inode_cachep, inode);
		return NULL;
	}

	return inode;
225
}
Linus Torvalds's avatar
Linus Torvalds committed
226

227 228 229 230 231 232
void free_inode_nonrcu(struct inode *inode)
{
	kmem_cache_free(inode_cachep, inode);
}
EXPORT_SYMBOL(free_inode_nonrcu);

Christoph Hellwig's avatar
Christoph Hellwig committed
233
void __destroy_inode(struct inode *inode)
Linus Torvalds's avatar
Linus Torvalds committed
234
{
235
	BUG_ON(inode_has_buffers(inode));
236
	inode_detach_wb(inode);
Linus Torvalds's avatar
Linus Torvalds committed
237
	security_inode_free(inode);
238
	fsnotify_inode_delete(inode);
239
	locks_free_lock_context(inode);
Miklos Szeredi's avatar
Miklos Szeredi committed
240 241 242 243 244
	if (!inode->i_nlink) {
		WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
		atomic_long_dec(&inode->i_sb->s_remove_count);
	}

Al Viro's avatar
Al Viro committed
245
#ifdef CONFIG_FS_POSIX_ACL
246
	if (inode->i_acl && !is_uncached_acl(inode->i_acl))
Al Viro's avatar
Al Viro committed
247
		posix_acl_release(inode->i_acl);
248
	if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
Al Viro's avatar
Al Viro committed
249 250
		posix_acl_release(inode->i_default_acl);
#endif
251
	this_cpu_dec(nr_inodes);
Christoph Hellwig's avatar
Christoph Hellwig committed
252 253 254
}
EXPORT_SYMBOL(__destroy_inode);

Nick Piggin's avatar
Nick Piggin committed
255 256 257 258 259 260
static void i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	kmem_cache_free(inode_cachep, inode);
}

261
static void destroy_inode(struct inode *inode)
Christoph Hellwig's avatar
Christoph Hellwig committed
262
{
Nick Piggin's avatar
Nick Piggin committed
263
	BUG_ON(!list_empty(&inode->i_lru));
Christoph Hellwig's avatar
Christoph Hellwig committed
264
	__destroy_inode(inode);
Linus Torvalds's avatar
Linus Torvalds committed
265 266 267
	if (inode->i_sb->s_op->destroy_inode)
		inode->i_sb->s_op->destroy_inode(inode);
	else
Nick Piggin's avatar
Nick Piggin committed
268
		call_rcu(&inode->i_rcu, i_callback);
Linus Torvalds's avatar
Linus Torvalds committed
269 270
}

Miklos Szeredi's avatar
Miklos Szeredi committed
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
/**
 * drop_nlink - directly drop an inode's link count
 * @inode: inode
 *
 * This is a low-level filesystem helper to replace any
 * direct filesystem manipulation of i_nlink.  In cases
 * where we are attempting to track writes to the
 * filesystem, a decrement to zero means an imminent
 * write when the file is truncated and actually unlinked
 * on the filesystem.
 */
void drop_nlink(struct inode *inode)
{
	WARN_ON(inode->i_nlink == 0);
	inode->__i_nlink--;
	if (!inode->i_nlink)
		atomic_long_inc(&inode->i_sb->s_remove_count);
}
EXPORT_SYMBOL(drop_nlink);

/**
 * clear_nlink - directly zero an inode's link count
 * @inode: inode
 *
 * This is a low-level filesystem helper to replace any
 * direct filesystem manipulation of i_nlink.  See
 * drop_nlink() for why we care about i_nlink hitting zero.
 */
void clear_nlink(struct inode *inode)
{
	if (inode->i_nlink) {
		inode->__i_nlink = 0;
		atomic_long_inc(&inode->i_sb->s_remove_count);
	}
}
EXPORT_SYMBOL(clear_nlink);

/**
 * set_nlink - directly set an inode's link count
 * @inode: inode
 * @nlink: new nlink (should be non-zero)
 *
 * This is a low-level filesystem helper to replace any
 * direct filesystem manipulation of i_nlink.
 */
void set_nlink(struct inode *inode, unsigned int nlink)
{
	if (!nlink) {
		clear_nlink(inode);
	} else {
		/* Yes, some filesystems do change nlink from zero to one */
		if (inode->i_nlink == 0)
			atomic_long_dec(&inode->i_sb->s_remove_count);

		inode->__i_nlink = nlink;
	}
}
EXPORT_SYMBOL(set_nlink);

/**
 * inc_nlink - directly increment an inode's link count
 * @inode: inode
 *
 * This is a low-level filesystem helper to replace any
 * direct filesystem manipulation of i_nlink.  Currently,
 * it is only here for parity with dec_nlink().
 */
void inc_nlink(struct inode *inode)
{
340 341
	if (unlikely(inode->i_nlink == 0)) {
		WARN_ON(!(inode->i_state & I_LINKABLE));
Miklos Szeredi's avatar
Miklos Szeredi committed
342
		atomic_long_dec(&inode->i_sb->s_remove_count);
343
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
344 345 346 347 348

	inode->__i_nlink++;
}
EXPORT_SYMBOL(inc_nlink);

349 350 351
void address_space_init_once(struct address_space *mapping)
{
	memset(mapping, 0, sizeof(*mapping));
352
	INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT);
353
	spin_lock_init(&mapping->tree_lock);
354
	init_rwsem(&mapping->i_mmap_rwsem);
355 356
	INIT_LIST_HEAD(&mapping->private_list);
	spin_lock_init(&mapping->private_lock);
357
	mapping->i_mmap = RB_ROOT_CACHED;
358 359 360
}
EXPORT_SYMBOL(address_space_init_once);

Linus Torvalds's avatar
Linus Torvalds committed
361 362 363 364 365 366 367 368 369 370
/*
 * These are initializations that only need to be done
 * once, because the fields are idempotent across use
 * of the inode, so let the slab aware of that.
 */
void inode_init_once(struct inode *inode)
{
	memset(inode, 0, sizeof(*inode));
	INIT_HLIST_NODE(&inode->i_hash);
	INIT_LIST_HEAD(&inode->i_devices);
371
	INIT_LIST_HEAD(&inode->i_io_list);
372
	INIT_LIST_HEAD(&inode->i_wb_list);
Nick Piggin's avatar
Nick Piggin committed
373
	INIT_LIST_HEAD(&inode->i_lru);
374
	address_space_init_once(&inode->i_data);
Linus Torvalds's avatar
Linus Torvalds committed
375 376 377 378
	i_size_ordered_init(inode);
}
EXPORT_SYMBOL(inode_init_once);

379
static void init_once(void *foo)
Linus Torvalds's avatar
Linus Torvalds committed
380
{
381
	struct inode *inode = (struct inode *) foo;
Linus Torvalds's avatar
Linus Torvalds committed
382

383
	inode_init_once(inode);
Linus Torvalds's avatar
Linus Torvalds committed
384 385 386
}

/*
387
 * inode->i_lock must be held
Linus Torvalds's avatar
Linus Torvalds committed
388
 */
389
void __iget(struct inode *inode)
Linus Torvalds's avatar
Linus Torvalds committed
390
{
391 392
	atomic_inc(&inode->i_count);
}
393

Al Viro's avatar
Al Viro committed
394 395 396 397 398 399 400 401 402
/*
 * get additional reference to inode; caller must already hold one.
 */
void ihold(struct inode *inode)
{
	WARN_ON(atomic_inc_return(&inode->i_count) < 2);
}
EXPORT_SYMBOL(ihold);

403 404
static void inode_lru_list_add(struct inode *inode)
{
405
	if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
406
		this_cpu_inc(nr_unused);
407 408
	else
		inode->i_state |= I_REFERENCED;
409
}
410

411 412 413 414 415 416 417
/*
 * Add inode to LRU if needed (inode is unused and clean).
 *
 * Needs inode->i_lock held.
 */
void inode_add_lru(struct inode *inode)
{
418 419
	if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
				I_FREEING | I_WILL_FREE)) &&
420 421 422 423 424
	    !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
		inode_lru_list_add(inode);
}


425 426
static void inode_lru_list_del(struct inode *inode)
{
427 428

	if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
429
		this_cpu_dec(nr_unused);
Linus Torvalds's avatar
Linus Torvalds committed
430 431
}

432 433 434 435 436 437
/**
 * inode_sb_list_add - add inode to the superblock list of inodes
 * @inode: inode to add
 */
void inode_sb_list_add(struct inode *inode)
{
438
	spin_lock(&inode->i_sb->s_inode_list_lock);
439
	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
440
	spin_unlock(&inode->i_sb->s_inode_list_lock);
441 442 443
}
EXPORT_SYMBOL_GPL(inode_sb_list_add);

444
static inline void inode_sb_list_del(struct inode *inode)
445
{
446
	if (!list_empty(&inode->i_sb_list)) {
447
		spin_lock(&inode->i_sb->s_inode_list_lock);
448
		list_del_init(&inode->i_sb_list);
449
		spin_unlock(&inode->i_sb->s_inode_list_lock);
450
	}
451 452
}

453 454 455 456 457 458
static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
	unsigned long tmp;

	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
			L1_CACHE_BYTES;
459 460
	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
	return tmp & i_hash_mask;
461 462 463 464 465 466 467 468 469 470 471 472
}

/**
 *	__insert_inode_hash - hash an inode
 *	@inode: unhashed inode
 *	@hashval: unsigned long value used to locate this object in the
 *		inode_hashtable.
 *
 *	Add an inode to the inode hash for this superblock.
 */
void __insert_inode_hash(struct inode *inode, unsigned long hashval)
{
473 474
	struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);

475
	spin_lock(&inode_hash_lock);
476
	spin_lock(&inode->i_lock);
477
	hlist_add_head(&inode->i_hash, b);
478
	spin_unlock(&inode->i_lock);
479
	spin_unlock(&inode_hash_lock);
480 481 482 483
}
EXPORT_SYMBOL(__insert_inode_hash);

/**
484
 *	__remove_inode_hash - remove an inode from the hash
485 486 487 488
 *	@inode: inode to unhash
 *
 *	Remove an inode from the superblock.
 */
489
void __remove_inode_hash(struct inode *inode)
490
{
491
	spin_lock(&inode_hash_lock);
492
	spin_lock(&inode->i_lock);
493
	hlist_del_init(&inode->i_hash);
494
	spin_unlock(&inode->i_lock);
495
	spin_unlock(&inode_hash_lock);
496
}
497
EXPORT_SYMBOL(__remove_inode_hash);
498

499
void clear_inode(struct inode *inode)
Al Viro's avatar
Al Viro committed
500 501
{
	might_sleep();
502 503 504 505 506 507
	/*
	 * We have to cycle tree_lock here because reclaim can be still in the
	 * process of removing the last page (in __delete_from_page_cache())
	 * and we must not free mapping under it.
	 */
	spin_lock_irq(&inode->i_data.tree_lock);
Al Viro's avatar
Al Viro committed
508
	BUG_ON(inode->i_data.nrpages);
509
	BUG_ON(inode->i_data.nrexceptional);
510
	spin_unlock_irq(&inode->i_data.tree_lock);
Al Viro's avatar
Al Viro committed
511 512 513
	BUG_ON(!list_empty(&inode->i_data.private_list));
	BUG_ON(!(inode->i_state & I_FREEING));
	BUG_ON(inode->i_state & I_CLEAR);
514
	BUG_ON(!list_empty(&inode->i_wb_list));
Nick Piggin's avatar
Nick Piggin committed
515
	/* don't need i_lock here, no concurrent mods to i_state */
Al Viro's avatar
Al Viro committed
516 517
	inode->i_state = I_FREEING | I_CLEAR;
}
518
EXPORT_SYMBOL(clear_inode);
Al Viro's avatar
Al Viro committed
519

Dave Chinner's avatar
Dave Chinner committed
520 521 522 523 524 525 526 527 528 529 530 531 532
/*
 * Free the inode passed in, removing it from the lists it is still connected
 * to. We remove any pages still attached to the inode and wait for any IO that
 * is still in progress before finally destroying the inode.
 *
 * An inode must already be marked I_FREEING so that we avoid the inode being
 * moved back onto lists if we race with other code that manipulates the lists
 * (e.g. writeback_single_inode). The caller is responsible for setting this.
 *
 * An inode must already be removed from the LRU list before being evicted from
 * the cache. This should occur atomically with setting the I_FREEING state
 * flag, so no inodes here should ever be on the LRU when being evicted.
 */
533
static void evict(struct inode *inode)
534 535 536
{
	const struct super_operations *op = inode->i_sb->s_op;

Dave Chinner's avatar
Dave Chinner committed
537 538 539
	BUG_ON(!(inode->i_state & I_FREEING));
	BUG_ON(!list_empty(&inode->i_lru));

540 541
	if (!list_empty(&inode->i_io_list))
		inode_io_list_del(inode);
542

543 544
	inode_sb_list_del(inode);

545 546 547 548 549 550 551
	/*
	 * Wait for flusher thread to be done with the inode so that filesystem
	 * does not start destroying it while writeback is still running. Since
	 * the inode has I_FREEING set, flusher thread won't start new work on
	 * the inode.  We just have to wait for running writeback to finish.
	 */
	inode_wait_for_writeback(inode);
552

Al Viro's avatar
Al Viro committed
553 554
	if (op->evict_inode) {
		op->evict_inode(inode);
555
	} else {
556
		truncate_inode_pages_final(&inode->i_data);
557
		clear_inode(inode);
558
	}
559 560 561 562
	if (S_ISBLK(inode->i_mode) && inode->i_bdev)
		bd_forget(inode);
	if (S_ISCHR(inode->i_mode) && inode->i_cdev)
		cd_forget(inode);
Dave Chinner's avatar
Dave Chinner committed
563 564 565 566 567 568 569 570 571

	remove_inode_hash(inode);

	spin_lock(&inode->i_lock);
	wake_up_bit(&inode->i_state, __I_NEW);
	BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
	spin_unlock(&inode->i_lock);

	destroy_inode(inode);
572 573
}

Linus Torvalds's avatar
Linus Torvalds committed
574 575 576 577 578 579 580 581 582 583 584 585
/*
 * dispose_list - dispose of the contents of a local list
 * @head: the head of the list to free
 *
 * Dispose-list gets a local list with local inodes in it, so it doesn't
 * need to worry about list corruption and SMP locks.
 */
static void dispose_list(struct list_head *head)
{
	while (!list_empty(head)) {
		struct inode *inode;

Nick Piggin's avatar
Nick Piggin committed
586 587
		inode = list_first_entry(head, struct inode, i_lru);
		list_del_init(&inode->i_lru);
Linus Torvalds's avatar
Linus Torvalds committed
588

589
		evict(inode);
590
		cond_resched();
Linus Torvalds's avatar
Linus Torvalds committed
591 592 593
	}
}

Al Viro's avatar
Al Viro committed
594 595 596 597 598 599 600 601
/**
 * evict_inodes	- evict all evictable inodes for a superblock
 * @sb:		superblock to operate on
 *
 * Make sure that no inodes with zero refcount are retained.  This is
 * called by superblock shutdown after having MS_ACTIVE flag removed,
 * so any inode reaching zero refcount during or after that call will
 * be immediately evicted.
Linus Torvalds's avatar
Linus Torvalds committed
602
 */
Al Viro's avatar
Al Viro committed
603
void evict_inodes(struct super_block *sb)
Linus Torvalds's avatar
Linus Torvalds committed
604
{
Al Viro's avatar
Al Viro committed
605 606
	struct inode *inode, *next;
	LIST_HEAD(dispose);
Linus Torvalds's avatar
Linus Torvalds committed
607

608
again:
609
	spin_lock(&sb->s_inode_list_lock);
Al Viro's avatar
Al Viro committed
610 611
	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
		if (atomic_read(&inode->i_count))
Nick Piggin's avatar
Nick Piggin committed
612
			continue;
613 614 615 616

		spin_lock(&inode->i_lock);
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
			spin_unlock(&inode->i_lock);
Linus Torvalds's avatar
Linus Torvalds committed
617
			continue;
618
		}
Al Viro's avatar
Al Viro committed
619 620

		inode->i_state |= I_FREEING;
621
		inode_lru_list_del(inode);
622
		spin_unlock(&inode->i_lock);
623
		list_add(&inode->i_lru, &dispose);
624 625 626 627 628 629 630 631 632 633 634 635

		/*
		 * We can have a ton of inodes to evict at unmount time given
		 * enough memory, check to see if we need to go to sleep for a
		 * bit so we don't livelock.
		 */
		if (need_resched()) {
			spin_unlock(&sb->s_inode_list_lock);
			cond_resched();
			dispose_list(&dispose);
			goto again;
		}
Linus Torvalds's avatar
Linus Torvalds committed
636
	}
637
	spin_unlock(&sb->s_inode_list_lock);
Al Viro's avatar
Al Viro committed
638 639

	dispose_list(&dispose);
Linus Torvalds's avatar
Linus Torvalds committed
640
}
641
EXPORT_SYMBOL_GPL(evict_inodes);
Linus Torvalds's avatar
Linus Torvalds committed
642 643

/**
644 645
 * invalidate_inodes	- attempt to free all inodes on a superblock
 * @sb:		superblock to operate on
646
 * @kill_dirty: flag to guide handling of dirty inodes
Linus Torvalds's avatar
Linus Torvalds committed
647
 *
648 649
 * Attempts to free all inodes for a given superblock.  If there were any
 * busy inodes return a non-zero value, else zero.
650 651
 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
 * them as busy.
Linus Torvalds's avatar
Linus Torvalds committed
652
 */
653
int invalidate_inodes(struct super_block *sb, bool kill_dirty)
Linus Torvalds's avatar
Linus Torvalds committed
654
{
655
	int busy = 0;
656 657
	struct inode *inode, *next;
	LIST_HEAD(dispose);
Linus Torvalds's avatar
Linus Torvalds committed
658

659
	spin_lock(&sb->s_inode_list_lock);
660
	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
661 662 663
		spin_lock(&inode->i_lock);
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
			spin_unlock(&inode->i_lock);
Nick Piggin's avatar
Nick Piggin committed
664
			continue;
665
		}
666
		if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
667
			spin_unlock(&inode->i_lock);
668 669 670
			busy = 1;
			continue;
		}
671
		if (atomic_read(&inode->i_count)) {
672
			spin_unlock(&inode->i_lock);
673
			busy = 1;
Linus Torvalds's avatar
Linus Torvalds committed
674 675
			continue;
		}
676 677

		inode->i_state |= I_FREEING;
678
		inode_lru_list_del(inode);
679
		spin_unlock(&inode->i_lock);
680
		list_add(&inode->i_lru, &dispose);
Linus Torvalds's avatar
Linus Torvalds committed
681
	}
682
	spin_unlock(&sb->s_inode_list_lock);
Linus Torvalds's avatar
Linus Torvalds committed
683

684
	dispose_list(&dispose);
Linus Torvalds's avatar
Linus Torvalds committed
685 686 687 688 689

	return busy;
}

/*
690
 * Isolate the inode from the LRU in preparation for freeing it.
Linus Torvalds's avatar
Linus Torvalds committed
691 692
 *
 * Any inodes which are pinned purely because of attached pagecache have their
693 694
 * pagecache removed.  If the inode has metadata buffers attached to
 * mapping->private_list then try to remove them.
Linus Torvalds's avatar
Linus Torvalds committed
695
 *
696 697 698 699 700 701 702
 * If the inode has the I_REFERENCED flag set, then it means that it has been
 * used recently - the flag is set in iput_final(). When we encounter such an
 * inode, clear the flag and move it to the back of the LRU so it gets another
 * pass through the LRU before it gets reclaimed. This is necessary because of
 * the fact we are doing lazy LRU updates to minimise lock contention so the
 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
 * with this flag set because they are the inodes that are out of order.
Linus Torvalds's avatar
Linus Torvalds committed
703
 */
704 705
static enum lru_status inode_lru_isolate(struct list_head *item,
		struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
Linus Torvalds's avatar
Linus Torvalds committed
706
{
707 708
	struct list_head *freeable = arg;
	struct inode	*inode = container_of(item, struct inode, i_lru);
Linus Torvalds's avatar
Linus Torvalds committed
709

710 711 712 713 714 715
	/*
	 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
	 * If we fail to get the lock, just skip it.
	 */
	if (!spin_trylock(&inode->i_lock))
		return LRU_SKIP;
Linus Torvalds's avatar
Linus Torvalds committed
716

717 718 719 720 721 722
	/*
	 * Referenced or dirty inodes are still in use. Give them another pass
	 * through the LRU as we canot reclaim them now.
	 */
	if (atomic_read(&inode->i_count) ||
	    (inode->i_state & ~I_REFERENCED)) {
723
		list_lru_isolate(lru, &inode->i_lru);
724 725 726 727
		spin_unlock(&inode->i_lock);
		this_cpu_dec(nr_unused);
		return LRU_REMOVED;
	}
Linus Torvalds's avatar
Linus Torvalds committed
728

729 730 731 732 733 734
	/* recently referenced inodes get one more pass */
	if (inode->i_state & I_REFERENCED) {
		inode->i_state &= ~I_REFERENCED;
		spin_unlock(&inode->i_lock);
		return LRU_ROTATE;
	}
Linus Torvalds's avatar
Linus Torvalds committed
735

736 737 738 739 740 741 742 743 744 745 746 747 748
	if (inode_has_buffers(inode) || inode->i_data.nrpages) {
		__iget(inode);
		spin_unlock(&inode->i_lock);
		spin_unlock(lru_lock);
		if (remove_inode_buffers(inode)) {
			unsigned long reap;
			reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
			if (current_is_kswapd())
				__count_vm_events(KSWAPD_INODESTEAL, reap);
			else
				__count_vm_events(PGINODESTEAL, reap);
			if (current->reclaim_state)
				current->reclaim_state->reclaimed_slab += reap;
749
		}
750 751 752 753
		iput(inode);
		spin_lock(lru_lock);
		return LRU_RETRY;
	}
754

755 756
	WARN_ON(inode->i_state & I_NEW);
	inode->i_state |= I_FREEING;
757
	list_lru_isolate_move(lru, &inode->i_lru, freeable);
758
	spin_unlock(&inode->i_lock);
759

760 761 762
	this_cpu_dec(nr_unused);
	return LRU_REMOVED;
}
Nick Piggin's avatar
Nick Piggin committed
763

764 765 766 767 768 769
/*
 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
 * This is called from the superblock shrinker function with a number of inodes
 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
 * then are freed outside inode_lock by dispose_list().
 */
770
long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
771 772 773
{
	LIST_HEAD(freeable);
	long freed;
Linus Torvalds's avatar
Linus Torvalds committed
774

775 776
	freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
				     inode_lru_isolate, &freeable);
Linus Torvalds's avatar
Linus Torvalds committed
777
	dispose_list(&freeable);
778
	return freed;
Linus Torvalds's avatar
Linus Torvalds committed
779 780 781 782 783 784
}

static void __wait_on_freeing_inode(struct inode *inode);
/*
 * Called with the inode lock held.
 */
785 786 787 788
static struct inode *find_inode(struct super_block *sb,
				struct hlist_head *head,
				int (*test)(struct inode *, void *),
				void *data)
Linus Torvalds's avatar
Linus Torvalds committed
789
{
790
	struct inode *inode = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
791 792

repeat:
793
	hlist_for_each_entry(inode, head, i_hash) {
794
		if (inode->i_sb != sb)
Linus Torvalds's avatar
Linus Torvalds committed
795
			continue;
796
		if (!test(inode, data))
Linus Torvalds's avatar
Linus Torvalds committed
797
			continue;
798
		spin_lock(&inode->i_lock);
799
		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
Linus Torvalds's avatar
Linus Torvalds committed
800 801 802
			__wait_on_freeing_inode(inode);
			goto repeat;
		}
803
		__iget(inode);
804
		spin_unlock(&inode->i_lock);
805
		return inode;
Linus Torvalds's avatar
Linus Torvalds committed
806
	}
807
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
808 809 810 811 812 813
}

/*
 * find_inode_fast is the fast path version of find_inode, see the comment at
 * iget_locked for details.
 */
814 815
static struct inode *find_inode_fast(struct super_block *sb,
				struct hlist_head *head, unsigned long ino)
Linus Torvalds's avatar
Linus Torvalds committed
816
{
817
	struct inode *inode = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
818 819

repeat:
820
	hlist_for_each_entry(inode, head, i_hash) {
821
		if (inode->i_ino != ino)
Linus Torvalds's avatar
Linus Torvalds committed
822
			continue;
823
		if (inode->i_sb != sb)
Linus Torvalds's avatar
Linus Torvalds committed
824
			continue;
825
		spin_lock(&inode->i_lock);
826
		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
Linus Torvalds's avatar
Linus Torvalds committed
827 828 829
			__wait_on_freeing_inode(inode);
			goto repeat;
		}
830
		__iget(inode);
831
		spin_unlock(&inode->i_lock);
832
		return inode;
Linus Torvalds's avatar
Linus Torvalds committed
833
	}
834
	return NULL;
835 836
}

837 838 839 840
/*
 * Each cpu owns a range of LAST_INO_BATCH numbers.
 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
 * to renew the exhausted range.
841
 *
842 843 844 845 846 847 848 849 850
 * This does not significantly increase overflow rate because every CPU can
 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
 * overflow rate by 2x, which does not seem too significant.
 *
 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
 * error if st_ino won't fit in target struct field. Use 32bit counter
 * here to attempt to avoid that.
851
 */
852 853 854
#define LAST_INO_BATCH 1024
static DEFINE_PER_CPU(unsigned int, last_ino);

855
unsigned int get_next_ino(void)
856
{
857 858
	unsigned int *p = &get_cpu_var(last_ino);
	unsigned int res = *p;
859

860 861 862 863 864 865 866 867 868
#ifdef CONFIG_SMP
	if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
		static atomic_t shared_last_ino;
		int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);

		res = next - LAST_INO_BATCH;
	}
#endif

869 870 871 872 873
	res++;
	/* get_next_ino should not provide a 0 inode number */
	if (unlikely(!res))
		res++;
	*p = res;
874 875
	put_cpu_var(last_ino);
	return res;
876
}
877
EXPORT_SYMBOL(get_next_ino);
878

879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
/**
 *	new_inode_pseudo 	- obtain an inode
 *	@sb: superblock
 *
 *	Allocates a new inode for given superblock.
 *	Inode wont be chained in superblock s_inodes list
 *	This means :
 *	- fs can't be unmount
 *	- quotas, fsnotify, writeback can't work
 */
struct inode *new_inode_pseudo(struct super_block *sb)
{
	struct inode *inode = alloc_inode(sb);

	if (inode) {
		spin_lock(&inode->i_lock);
		inode->i_state = 0;
		spin_unlock(&inode->i_lock);
		INIT_LIST_HEAD(&inode->i_sb_list);
	}
	return inode;
}

Linus Torvalds's avatar
Linus Torvalds committed
902 903 904 905
/**
 *	new_inode 	- obtain an inode
 *	@sb: superblock
 *
906
 *	Allocates a new inode for given superblock. The default gfp_mask
907
 *	for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
908 909 910 911 912
 *	If HIGHMEM pages are unsuitable or it is known that pages allocated
 *	for the page cache are not reclaimable or migratable,
 *	mapping_set_gfp_mask() must be called with suitable flags on the
 *	newly created inode's mapping
 *
Linus Torvalds's avatar
Linus Torvalds committed
913 914 915
 */
struct inode *new_inode(struct super_block *sb)
{
916
	struct inode *inode;
Linus Torvalds's avatar
Linus Torvalds committed
917

918
	spin_lock_prefetch(&sb->s_inode_list_lock);
919

920 921
	inode = new_inode_pseudo(sb);
	if (inode)
922
		inode_sb_list_add(inode);
Linus Torvalds's avatar
Linus Torvalds committed
923 924 925 926
	return inode;
}
EXPORT_SYMBOL(new_inode);

927
#ifdef CONFIG_DEBUG_LOCK_ALLOC
928 929
void lockdep_annotate_inode_mutex_key(struct inode *inode)
{
930
	if (S_ISDIR(inode->i_mode)) {
931 932
		struct file_system_type *type = inode->i_sb->s_type;

933
		/* Set new key only if filesystem hasn't already changed it */
934
		if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
935 936 937
			/*
			 * ensure nobody is actually holding i_mutex
			 */
938 939 940
			// mutex_destroy(&inode->i_mutex);
			init_rwsem(&inode->i_rwsem);
			lockdep_set_class(&inode->i_rwsem,
941 942
					  &type->i_mutex_dir_key);
		}
943
	}
944 945
}
EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
946
#endif
947 948 949 950 951 952 953 954 955 956 957

/**
 * unlock_new_inode - clear the I_NEW state and wake up any waiters
 * @inode:	new inode to unlock
 *
 * Called when the inode is fully initialised to clear the new state of the
 * inode and wake up anyone waiting for the inode to finish initialisation.
 */
void unlock_new_inode(struct inode *inode)
{
	lockdep_annotate_inode_mutex_key(inode);
958
	spin_lock(&inode->i_lock);
Christoph Hellwig's avatar
Christoph Hellwig committed
959 960
	WARN_ON(!(inode->i_state & I_NEW));
	inode->i_state &= ~I_NEW;
961
	smp_mb();
962 963
	wake_up_bit(&inode->i_state, __I_NEW);
	spin_unlock(&inode->i_lock);
Linus Torvalds's avatar
Linus Torvalds committed
964 965 966
}
EXPORT_SYMBOL(unlock_new_inode);

967 968
/**
 * lock_two_nondirectories - take two i_mutexes on non-directory objects
969 970 971 972
 *
 * Lock any non-NULL argument that is not a directory.
 * Zero, one or two objects may be locked by this function.
 *
973 974 975 976 977
 * @inode1: first inode to lock
 * @inode2: second inode to lock
 */
void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
{
978 979 980 981
	if (inode1 > inode2)
		swap(inode1, inode2);

	if (inode1 && !S_ISDIR(inode1->i_mode))
Al Viro's avatar
Al Viro committed
982
		inode_lock(inode1);
983
	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
Al Viro's avatar
Al Viro committed
984
		inode_lock_nested(inode2, I_MUTEX_NONDIR2);
985 986 987 988 989 990 991 992 993 994
}
EXPORT_SYMBOL(lock_two_nondirectories);

/**
 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
 * @inode1: first inode to unlock
 * @inode2: second inode to unlock
 */
void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
{
995
	if (inode1 && !S_ISDIR(inode1->i_mode))
Al Viro's avatar
Al Viro committed
996
		inode_unlock(inode1);
997
	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
Al Viro's avatar
Al Viro committed
998
		inode_unlock(inode2);
999