bnode.c 15.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Linus Torvalds's avatar
Linus Torvalds committed
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 *  linux/fs/hfsplus/bnode.c
 *
 * Copyright (C) 2001
 * Brad Boyer (flar@allandria.com)
 * (C) 2003 Ardis Technologies <roman@ardistech.com>
 *
 * Handle basic btree node operations
 */

#include <linux/string.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/fs.h>
#include <linux/swap.h>

#include "hfsplus_fs.h"
#include "hfsplus_raw.h"

/* Copy a specified range of bytes from the raw data of a node */
void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
{
	struct page **pagep;
	int l;

	off += node->page_offset;
28 29
	pagep = node->page + (off >> PAGE_SHIFT);
	off &= ~PAGE_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
30

31
	l = min_t(int, len, PAGE_SIZE - off);
Linus Torvalds's avatar
Linus Torvalds committed
32 33 34 35 36
	memcpy(buf, kmap(*pagep) + off, l);
	kunmap(*pagep);

	while ((len -= l) != 0) {
		buf += l;
37
		l = min_t(int, len, PAGE_SIZE);
Linus Torvalds's avatar
Linus Torvalds committed
38 39 40 41 42 43 44 45
		memcpy(buf, kmap(*++pagep), l);
		kunmap(*pagep);
	}
}

u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
{
	__be16 data;
46
	/* TODO: optimize later... */
Linus Torvalds's avatar
Linus Torvalds committed
47 48 49 50 51 52 53
	hfs_bnode_read(node, &data, off, 2);
	return be16_to_cpu(data);
}

u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
{
	u8 data;
54
	/* TODO: optimize later... */
Linus Torvalds's avatar
Linus Torvalds committed
55 56 57 58 59 60 61 62 63 64 65
	hfs_bnode_read(node, &data, off, 1);
	return data;
}

void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
{
	struct hfs_btree *tree;
	int key_len;

	tree = node->tree;
	if (node->type == HFS_NODE_LEAF ||
66 67
	    tree->attributes & HFS_TREE_VARIDXKEYS ||
	    node->tree->cnid == HFSPLUS_ATTR_CNID)
Linus Torvalds's avatar
Linus Torvalds committed
68 69 70 71 72 73 74 75 76 77 78 79 80
		key_len = hfs_bnode_read_u16(node, off) + 2;
	else
		key_len = tree->max_key_len + 2;

	hfs_bnode_read(node, key, off, key_len);
}

void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
{
	struct page **pagep;
	int l;

	off += node->page_offset;
81 82
	pagep = node->page + (off >> PAGE_SHIFT);
	off &= ~PAGE_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
83

84
	l = min_t(int, len, PAGE_SIZE - off);
Linus Torvalds's avatar
Linus Torvalds committed
85 86 87 88 89 90
	memcpy(kmap(*pagep) + off, buf, l);
	set_page_dirty(*pagep);
	kunmap(*pagep);

	while ((len -= l) != 0) {
		buf += l;
91
		l = min_t(int, len, PAGE_SIZE);
Linus Torvalds's avatar
Linus Torvalds committed
92 93 94 95 96 97 98 99 100
		memcpy(kmap(*++pagep), buf, l);
		set_page_dirty(*pagep);
		kunmap(*pagep);
	}
}

void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
{
	__be16 v = cpu_to_be16(data);
101
	/* TODO: optimize later... */
Linus Torvalds's avatar
Linus Torvalds committed
102 103 104 105 106 107 108 109 110
	hfs_bnode_write(node, &v, off, 2);
}

void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
{
	struct page **pagep;
	int l;

	off += node->page_offset;
111 112
	pagep = node->page + (off >> PAGE_SHIFT);
	off &= ~PAGE_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
113

114
	l = min_t(int, len, PAGE_SIZE - off);
Linus Torvalds's avatar
Linus Torvalds committed
115 116 117 118 119
	memset(kmap(*pagep) + off, 0, l);
	set_page_dirty(*pagep);
	kunmap(*pagep);

	while ((len -= l) != 0) {
120
		l = min_t(int, len, PAGE_SIZE);
Linus Torvalds's avatar
Linus Torvalds committed
121 122 123 124 125 126 127 128 129 130 131 132 133
		memset(kmap(*++pagep), 0, l);
		set_page_dirty(*pagep);
		kunmap(*pagep);
	}
}

void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
		    struct hfs_bnode *src_node, int src, int len)
{
	struct hfs_btree *tree;
	struct page **src_page, **dst_page;
	int l;

134
	hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
Linus Torvalds's avatar
Linus Torvalds committed
135 136 137 138 139
	if (!len)
		return;
	tree = src_node->tree;
	src += src_node->page_offset;
	dst += dst_node->page_offset;
140 141 142 143
	src_page = src_node->page + (src >> PAGE_SHIFT);
	src &= ~PAGE_MASK;
	dst_page = dst_node->page + (dst >> PAGE_SHIFT);
	dst &= ~PAGE_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
144 145

	if (src == dst) {
146
		l = min_t(int, len, PAGE_SIZE - src);
Linus Torvalds's avatar
Linus Torvalds committed
147 148 149 150 151 152
		memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
		kunmap(*src_page);
		set_page_dirty(*dst_page);
		kunmap(*dst_page);

		while ((len -= l) != 0) {
153
			l = min_t(int, len, PAGE_SIZE);
Linus Torvalds's avatar
Linus Torvalds committed
154 155 156 157 158 159 160 161 162 163 164
			memcpy(kmap(*++dst_page), kmap(*++src_page), l);
			kunmap(*src_page);
			set_page_dirty(*dst_page);
			kunmap(*dst_page);
		}
	} else {
		void *src_ptr, *dst_ptr;

		do {
			src_ptr = kmap(*src_page) + src;
			dst_ptr = kmap(*dst_page) + dst;
165 166
			if (PAGE_SIZE - src < PAGE_SIZE - dst) {
				l = PAGE_SIZE - src;
Linus Torvalds's avatar
Linus Torvalds committed
167 168 169
				src = 0;
				dst += l;
			} else {
170
				l = PAGE_SIZE - dst;
Linus Torvalds's avatar
Linus Torvalds committed
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
				src += l;
				dst = 0;
			}
			l = min(len, l);
			memcpy(dst_ptr, src_ptr, l);
			kunmap(*src_page);
			set_page_dirty(*dst_page);
			kunmap(*dst_page);
			if (!dst)
				dst_page++;
			else
				src_page++;
		} while ((len -= l));
	}
}

void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
{
	struct page **src_page, **dst_page;
	int l;

192
	hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
Linus Torvalds's avatar
Linus Torvalds committed
193 194 195 196 197 198
	if (!len)
		return;
	src += node->page_offset;
	dst += node->page_offset;
	if (dst > src) {
		src += len - 1;
199 200
		src_page = node->page + (src >> PAGE_SHIFT);
		src = (src & ~PAGE_MASK) + 1;
Linus Torvalds's avatar
Linus Torvalds committed
201
		dst += len - 1;
202 203
		dst_page = node->page + (dst >> PAGE_SHIFT);
		dst = (dst & ~PAGE_MASK) + 1;
Linus Torvalds's avatar
Linus Torvalds committed
204 205 206 207 208 209 210 211

		if (src == dst) {
			while (src < len) {
				memmove(kmap(*dst_page), kmap(*src_page), src);
				kunmap(*src_page);
				set_page_dirty(*dst_page);
				kunmap(*dst_page);
				len -= src;
212
				src = PAGE_SIZE;
Linus Torvalds's avatar
Linus Torvalds committed
213 214 215 216
				src_page--;
				dst_page--;
			}
			src -= len;
217 218
			memmove(kmap(*dst_page) + src,
				kmap(*src_page) + src, len);
Linus Torvalds's avatar
Linus Torvalds committed
219 220 221 222 223 224 225 226 227 228 229
			kunmap(*src_page);
			set_page_dirty(*dst_page);
			kunmap(*dst_page);
		} else {
			void *src_ptr, *dst_ptr;

			do {
				src_ptr = kmap(*src_page) + src;
				dst_ptr = kmap(*dst_page) + dst;
				if (src < dst) {
					l = src;
230
					src = PAGE_SIZE;
Linus Torvalds's avatar
Linus Torvalds committed
231 232 233 234
					dst -= l;
				} else {
					l = dst;
					src -= l;
235
					dst = PAGE_SIZE;
Linus Torvalds's avatar
Linus Torvalds committed
236 237 238 239 240 241
				}
				l = min(len, l);
				memmove(dst_ptr - l, src_ptr - l, l);
				kunmap(*src_page);
				set_page_dirty(*dst_page);
				kunmap(*dst_page);
242
				if (dst == PAGE_SIZE)
Linus Torvalds's avatar
Linus Torvalds committed
243 244 245 246 247 248
					dst_page--;
				else
					src_page--;
			} while ((len -= l));
		}
	} else {
249 250 251 252
		src_page = node->page + (src >> PAGE_SHIFT);
		src &= ~PAGE_MASK;
		dst_page = node->page + (dst >> PAGE_SHIFT);
		dst &= ~PAGE_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
253 254

		if (src == dst) {
255
			l = min_t(int, len, PAGE_SIZE - src);
256 257
			memmove(kmap(*dst_page) + src,
				kmap(*src_page) + src, l);
Linus Torvalds's avatar
Linus Torvalds committed
258 259 260 261 262
			kunmap(*src_page);
			set_page_dirty(*dst_page);
			kunmap(*dst_page);

			while ((len -= l) != 0) {
263
				l = min_t(int, len, PAGE_SIZE);
264 265
				memmove(kmap(*++dst_page),
					kmap(*++src_page), l);
Linus Torvalds's avatar
Linus Torvalds committed
266 267 268 269 270 271 272 273 274 275
				kunmap(*src_page);
				set_page_dirty(*dst_page);
				kunmap(*dst_page);
			}
		} else {
			void *src_ptr, *dst_ptr;

			do {
				src_ptr = kmap(*src_page) + src;
				dst_ptr = kmap(*dst_page) + dst;
276 277 278
				if (PAGE_SIZE - src <
						PAGE_SIZE - dst) {
					l = PAGE_SIZE - src;
Linus Torvalds's avatar
Linus Torvalds committed
279 280 281
					src = 0;
					dst += l;
				} else {
282
					l = PAGE_SIZE - dst;
Linus Torvalds's avatar
Linus Torvalds committed
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
					src += l;
					dst = 0;
				}
				l = min(len, l);
				memmove(dst_ptr, src_ptr, l);
				kunmap(*src_page);
				set_page_dirty(*dst_page);
				kunmap(*dst_page);
				if (!dst)
					dst_page++;
				else
					src_page++;
			} while ((len -= l));
		}
	}
}

void hfs_bnode_dump(struct hfs_bnode *node)
{
	struct hfs_bnode_desc desc;
	__be32 cnid;
	int i, off, key_off;

306
	hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
Linus Torvalds's avatar
Linus Torvalds committed
307
	hfs_bnode_read(node, &desc, 0, sizeof(desc));
308
	hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
Linus Torvalds's avatar
Linus Torvalds committed
309 310 311 312 313 314
		be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
		desc.type, desc.height, be16_to_cpu(desc.num_recs));

	off = node->tree->node_size - 2;
	for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
		key_off = hfs_bnode_read_u16(node, off);
315
		hfs_dbg(BNODE_MOD, " %d", key_off);
Linus Torvalds's avatar
Linus Torvalds committed
316 317 318
		if (i && node->type == HFS_NODE_INDEX) {
			int tmp;

319 320
			if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
					node->tree->cnid == HFSPLUS_ATTR_CNID)
Linus Torvalds's avatar
Linus Torvalds committed
321 322 323
				tmp = hfs_bnode_read_u16(node, key_off) + 2;
			else
				tmp = node->tree->max_key_len + 2;
324
			hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
Linus Torvalds's avatar
Linus Torvalds committed
325
			hfs_bnode_read(node, &cnid, key_off + tmp, 4);
326
			hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
Linus Torvalds's avatar
Linus Torvalds committed
327 328 329 330
		} else if (i && node->type == HFS_NODE_LEAF) {
			int tmp;

			tmp = hfs_bnode_read_u16(node, key_off);
331
			hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
Linus Torvalds's avatar
Linus Torvalds committed
332 333
		}
	}
334
	hfs_dbg_cont(BNODE_MOD, "\n");
Linus Torvalds's avatar
Linus Torvalds committed
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
}

void hfs_bnode_unlink(struct hfs_bnode *node)
{
	struct hfs_btree *tree;
	struct hfs_bnode *tmp;
	__be32 cnid;

	tree = node->tree;
	if (node->prev) {
		tmp = hfs_bnode_find(tree, node->prev);
		if (IS_ERR(tmp))
			return;
		tmp->next = node->next;
		cnid = cpu_to_be32(tmp->next);
350 351
		hfs_bnode_write(tmp, &cnid,
			offsetof(struct hfs_bnode_desc, next), 4);
Linus Torvalds's avatar
Linus Torvalds committed
352 353 354 355 356 357 358 359 360 361
		hfs_bnode_put(tmp);
	} else if (node->type == HFS_NODE_LEAF)
		tree->leaf_head = node->next;

	if (node->next) {
		tmp = hfs_bnode_find(tree, node->next);
		if (IS_ERR(tmp))
			return;
		tmp->prev = node->prev;
		cnid = cpu_to_be32(tmp->prev);
362 363
		hfs_bnode_write(tmp, &cnid,
			offsetof(struct hfs_bnode_desc, prev), 4);
Linus Torvalds's avatar
Linus Torvalds committed
364 365 366 367
		hfs_bnode_put(tmp);
	} else if (node->type == HFS_NODE_LEAF)
		tree->leaf_tail = node->prev;

368
	/* move down? */
369
	if (!node->prev && !node->next)
370
		hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
Linus Torvalds's avatar
Linus Torvalds committed
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
	if (!node->parent) {
		tree->root = 0;
		tree->depth = 0;
	}
	set_bit(HFS_BNODE_DELETED, &node->flags);
}

static inline int hfs_bnode_hash(u32 num)
{
	num = (num >> 16) + num;
	num += num >> 8;
	return num & (NODE_HASH_SIZE - 1);
}

struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
{
	struct hfs_bnode *node;

	if (cnid >= tree->node_count) {
390 391
		pr_err("request for non-existent node %d in B*Tree\n",
		       cnid);
Linus Torvalds's avatar
Linus Torvalds committed
392 393 394 395
		return NULL;
	}

	for (node = tree->node_hash[hfs_bnode_hash(cnid)];
396 397
			node; node = node->next_hash)
		if (node->this == cnid)
Linus Torvalds's avatar
Linus Torvalds committed
398 399 400 401 402 403 404 405 406 407 408 409 410 411
			return node;
	return NULL;
}

static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
{
	struct super_block *sb;
	struct hfs_bnode *node, *node2;
	struct address_space *mapping;
	struct page *page;
	int size, block, i, hash;
	loff_t off;

	if (cnid >= tree->node_count) {
412 413
		pr_err("request for non-existent node %d in B*Tree\n",
		       cnid);
Linus Torvalds's avatar
Linus Torvalds committed
414 415 416 417 418 419
		return NULL;
	}

	sb = tree->inode->i_sb;
	size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
		sizeof(struct page *);
420
	node = kzalloc(size, GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
421 422 423 424 425 426
	if (!node)
		return NULL;
	node->tree = tree;
	node->this = cnid;
	set_bit(HFS_BNODE_NEW, &node->flags);
	atomic_set(&node->refcnt, 1);
427 428
	hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
		node->tree->cnid, node->this);
Linus Torvalds's avatar
Linus Torvalds committed
429 430 431 432 433 434 435 436 437 438 439
	init_waitqueue_head(&node->lock_wq);
	spin_lock(&tree->hash_lock);
	node2 = hfs_bnode_findhash(tree, cnid);
	if (!node2) {
		hash = hfs_bnode_hash(cnid);
		node->next_hash = tree->node_hash[hash];
		tree->node_hash[hash] = node;
		tree->node_hash_cnt++;
	} else {
		spin_unlock(&tree->hash_lock);
		kfree(node);
440 441
		wait_event(node2->lock_wq,
			!test_bit(HFS_BNODE_NEW, &node2->flags));
Linus Torvalds's avatar
Linus Torvalds committed
442 443 444 445 446 447
		return node2;
	}
	spin_unlock(&tree->hash_lock);

	mapping = tree->inode->i_mapping;
	off = (loff_t)cnid << tree->node_size_shift;
448 449
	block = off >> PAGE_SHIFT;
	node->page_offset = off & ~PAGE_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
450
	for (i = 0; i < tree->pages_per_bnode; block++, i++) {
451
		page = read_mapping_page(mapping, block, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
452 453 454
		if (IS_ERR(page))
			goto fail;
		if (PageError(page)) {
455
			put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
			goto fail;
		}
		node->page[i] = page;
	}

	return node;
fail:
	set_bit(HFS_BNODE_ERROR, &node->flags);
	return node;
}

void hfs_bnode_unhash(struct hfs_bnode *node)
{
	struct hfs_bnode **p;

471
	hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
Linus Torvalds's avatar
Linus Torvalds committed
472 473 474 475
		node->tree->cnid, node->this, atomic_read(&node->refcnt));
	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
	     *p && *p != node; p = &(*p)->next_hash)
		;
476
	BUG_ON(!*p);
Linus Torvalds's avatar
Linus Torvalds committed
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
	*p = node->next_hash;
	node->tree->node_hash_cnt--;
}

/* Load a particular node out of a tree */
struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
{
	struct hfs_bnode *node;
	struct hfs_bnode_desc *desc;
	int i, rec_off, off, next_off;
	int entry_size, key_size;

	spin_lock(&tree->hash_lock);
	node = hfs_bnode_findhash(tree, num);
	if (node) {
		hfs_bnode_get(node);
		spin_unlock(&tree->hash_lock);
494 495
		wait_event(node->lock_wq,
			!test_bit(HFS_BNODE_NEW, &node->flags));
Linus Torvalds's avatar
Linus Torvalds committed
496 497 498 499 500 501 502 503 504 505 506 507 508
		if (test_bit(HFS_BNODE_ERROR, &node->flags))
			goto node_error;
		return node;
	}
	spin_unlock(&tree->hash_lock);
	node = __hfs_bnode_create(tree, num);
	if (!node)
		return ERR_PTR(-ENOMEM);
	if (test_bit(HFS_BNODE_ERROR, &node->flags))
		goto node_error;
	if (!test_bit(HFS_BNODE_NEW, &node->flags))
		return node;

509 510
	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
			node->page_offset);
Linus Torvalds's avatar
Linus Torvalds committed
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
	node->prev = be32_to_cpu(desc->prev);
	node->next = be32_to_cpu(desc->next);
	node->num_recs = be16_to_cpu(desc->num_recs);
	node->type = desc->type;
	node->height = desc->height;
	kunmap(node->page[0]);

	switch (node->type) {
	case HFS_NODE_HEADER:
	case HFS_NODE_MAP:
		if (node->height != 0)
			goto node_error;
		break;
	case HFS_NODE_LEAF:
		if (node->height != 1)
			goto node_error;
		break;
	case HFS_NODE_INDEX:
		if (node->height <= 1 || node->height > tree->depth)
			goto node_error;
		break;
	default:
		goto node_error;
	}

	rec_off = tree->node_size - 2;
	off = hfs_bnode_read_u16(node, rec_off);
	if (off != sizeof(struct hfs_bnode_desc))
		goto node_error;
	for (i = 1; i <= node->num_recs; off = next_off, i++) {
		rec_off -= 2;
		next_off = hfs_bnode_read_u16(node, rec_off);
		if (next_off <= off ||
		    next_off > tree->node_size ||
		    next_off & 1)
			goto node_error;
		entry_size = next_off - off;
		if (node->type != HFS_NODE_INDEX &&
		    node->type != HFS_NODE_LEAF)
			continue;
		key_size = hfs_bnode_read_u16(node, off) + 2;
		if (key_size >= entry_size || key_size & 1)
			goto node_error;
	}
	clear_bit(HFS_BNODE_NEW, &node->flags);
	wake_up(&node->lock_wq);
	return node;

node_error:
	set_bit(HFS_BNODE_ERROR, &node->flags);
	clear_bit(HFS_BNODE_NEW, &node->flags);
	wake_up(&node->lock_wq);
	hfs_bnode_put(node);
	return ERR_PTR(-EIO);
}

void hfs_bnode_free(struct hfs_bnode *node)
{
569
	int i;
Linus Torvalds's avatar
Linus Torvalds committed
570

571 572
	for (i = 0; i < node->tree->pages_per_bnode; i++)
		if (node->page[i])
573
			put_page(node->page[i]);
Linus Torvalds's avatar
Linus Torvalds committed
574 575 576 577 578 579 580 581 582 583 584 585 586
	kfree(node);
}

struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
{
	struct hfs_bnode *node;
	struct page **pagep;
	int i;

	spin_lock(&tree->hash_lock);
	node = hfs_bnode_findhash(tree, num);
	spin_unlock(&tree->hash_lock);
	if (node) {
587
		pr_crit("new node %u already hashed?\n", num);
588 589
		WARN_ON(1);
		return node;
Linus Torvalds's avatar
Linus Torvalds committed
590 591 592 593 594 595 596 597 598 599 600
	}
	node = __hfs_bnode_create(tree, num);
	if (!node)
		return ERR_PTR(-ENOMEM);
	if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
		hfs_bnode_put(node);
		return ERR_PTR(-EIO);
	}

	pagep = node->page;
	memset(kmap(*pagep) + node->page_offset, 0,
601
	       min_t(int, PAGE_SIZE, tree->node_size));
Linus Torvalds's avatar
Linus Torvalds committed
602 603 604
	set_page_dirty(*pagep);
	kunmap(*pagep);
	for (i = 1; i < tree->pages_per_bnode; i++) {
605
		memset(kmap(*++pagep), 0, PAGE_SIZE);
Linus Torvalds's avatar
Linus Torvalds committed
606 607 608 609 610 611 612 613 614 615 616 617 618
		set_page_dirty(*pagep);
		kunmap(*pagep);
	}
	clear_bit(HFS_BNODE_NEW, &node->flags);
	wake_up(&node->lock_wq);

	return node;
}

void hfs_bnode_get(struct hfs_bnode *node)
{
	if (node) {
		atomic_inc(&node->refcnt);
619
		hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
620 621
			node->tree->cnid, node->this,
			atomic_read(&node->refcnt));
Linus Torvalds's avatar
Linus Torvalds committed
622 623 624 625 626 627 628 629 630 631
	}
}

/* Dispose of resources used by a node */
void hfs_bnode_put(struct hfs_bnode *node)
{
	if (node) {
		struct hfs_btree *tree = node->tree;
		int i;

632
		hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
633 634
			node->tree->cnid, node->this,
			atomic_read(&node->refcnt));
635
		BUG_ON(!atomic_read(&node->refcnt));
636
		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
Linus Torvalds's avatar
Linus Torvalds committed
637 638
			return;
		for (i = 0; i < tree->pages_per_bnode; i++) {
639 640
			if (!node->page[i])
				continue;
Linus Torvalds's avatar
Linus Torvalds committed
641 642 643 644 645 646
			mark_page_accessed(node->page[i]);
		}

		if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
			hfs_bnode_unhash(node);
			spin_unlock(&tree->hash_lock);
647 648
			if (hfs_bnode_need_zeroout(tree))
				hfs_bnode_clear(node, 0, tree->node_size);
Linus Torvalds's avatar
Linus Torvalds committed
649 650 651 652 653 654 655 656
			hfs_bmap_free(node);
			hfs_bnode_free(node);
			return;
		}
		spin_unlock(&tree->hash_lock);
	}
}

657 658 659 660 661 662 663 664 665 666 667 668 669
/*
 * Unused nodes have to be zeroed if this is the catalog tree and
 * a corresponding flag in the volume header is set.
 */
bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
{
	struct super_block *sb = tree->inode->i_sb;
	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
	const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);

	return tree->cnid == HFSPLUS_CAT_CNID &&
		volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
}