fsl_ddr_edac.c 15.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Freescale Memory Controller kernel module
 *
 * Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
 * ARM-based Layerscape SoCs including LS2xxx. Originally split
 * out from mpc85xx_edac EDAC driver.
 *
 * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
 *
 * Author: Dave Jiang <djiang@mvista.com>
 *
 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
 * the terms of the GNU General Public License version 2. This program
 * is licensed "as is" without any warranty of any kind, whether express
 * or implied.
 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/edac.h>
#include <linux/smp.h>
#include <linux/gfp.h>

#include <linux/of_platform.h>
#include <linux/of_device.h>
29
#include <linux/of_address.h>
30 31 32 33 34 35 36 37 38 39
#include "edac_module.h"
#include "edac_core.h"
#include "fsl_ddr_edac.h"

#define EDAC_MOD_STR	"fsl_ddr_edac"

static int edac_mc_idx;

static u32 orig_ddr_err_disable;
static u32 orig_ddr_err_sbe;
40 41 42 43 44 45 46 47 48 49 50 51 52 53
static bool little_endian;

static inline u32 ddr_in32(void __iomem *addr)
{
	return little_endian ? ioread32(addr) : ioread32be(addr);
}

static inline void ddr_out32(void __iomem *addr, u32 value)
{
	if (little_endian)
		iowrite32(value, addr);
	else
		iowrite32be(value, addr);
}
54 55 56 57 58

/************************ MC SYSFS parts ***********************************/

#define to_mci(k) container_of(k, struct mem_ctl_info, dev)

59 60 61
static ssize_t fsl_mc_inject_data_hi_show(struct device *dev,
					  struct device_attribute *mattr,
					  char *data)
62 63
{
	struct mem_ctl_info *mci = to_mci(dev);
64
	struct fsl_mc_pdata *pdata = mci->pvt_info;
65
	return sprintf(data, "0x%08x",
66
		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI));
67 68
}

69 70
static ssize_t fsl_mc_inject_data_lo_show(struct device *dev,
					  struct device_attribute *mattr,
71 72 73
					      char *data)
{
	struct mem_ctl_info *mci = to_mci(dev);
74
	struct fsl_mc_pdata *pdata = mci->pvt_info;
75
	return sprintf(data, "0x%08x",
76
		       ddr_in32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO));
77 78
}

79 80
static ssize_t fsl_mc_inject_ctrl_show(struct device *dev,
				       struct device_attribute *mattr,
81 82 83
					   char *data)
{
	struct mem_ctl_info *mci = to_mci(dev);
84
	struct fsl_mc_pdata *pdata = mci->pvt_info;
85
	return sprintf(data, "0x%08x",
86
		       ddr_in32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT));
87 88
}

89 90
static ssize_t fsl_mc_inject_data_hi_store(struct device *dev,
					   struct device_attribute *mattr,
91 92 93
					       const char *data, size_t count)
{
	struct mem_ctl_info *mci = to_mci(dev);
94
	struct fsl_mc_pdata *pdata = mci->pvt_info;
95 96 97
	unsigned long val;
	int rc;

98
	if (isdigit(*data)) {
99 100 101 102 103
		rc = kstrtoul(data, 0, &val);
		if (rc)
			return rc;

		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_HI, val);
104 105 106 107 108
		return count;
	}
	return 0;
}

109 110
static ssize_t fsl_mc_inject_data_lo_store(struct device *dev,
					   struct device_attribute *mattr,
111 112 113
					       const char *data, size_t count)
{
	struct mem_ctl_info *mci = to_mci(dev);
114
	struct fsl_mc_pdata *pdata = mci->pvt_info;
115 116 117
	unsigned long val;
	int rc;

118
	if (isdigit(*data)) {
119 120 121 122 123
		rc = kstrtoul(data, 0, &val);
		if (rc)
			return rc;

		ddr_out32(pdata->mc_vbase + FSL_MC_DATA_ERR_INJECT_LO, val);
124 125 126 127 128
		return count;
	}
	return 0;
}

129 130
static ssize_t fsl_mc_inject_ctrl_store(struct device *dev,
					struct device_attribute *mattr,
131 132 133
					       const char *data, size_t count)
{
	struct mem_ctl_info *mci = to_mci(dev);
134
	struct fsl_mc_pdata *pdata = mci->pvt_info;
135 136 137
	unsigned long val;
	int rc;

138
	if (isdigit(*data)) {
139 140 141 142 143
		rc = kstrtoul(data, 0, &val);
		if (rc)
			return rc;

		ddr_out32(pdata->mc_vbase + FSL_MC_ECC_ERR_INJECT, val);
144 145 146 147 148 149
		return count;
	}
	return 0;
}

DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
150
	    fsl_mc_inject_data_hi_show, fsl_mc_inject_data_hi_store);
151
DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
152
	    fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
153
DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
154
	    fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
155

156
static struct attribute *fsl_ddr_dev_attrs[] = {
157 158 159 160 161 162
	&dev_attr_inject_data_hi.attr,
	&dev_attr_inject_data_lo.attr,
	&dev_attr_inject_ctrl.attr,
	NULL
};

163
ATTRIBUTE_GROUPS(fsl_ddr_dev);
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273

/**************************** MC Err device ***************************/

/*
 * Taken from table 8-55 in the MPC8641 User's Manual and/or 9-61 in the
 * MPC8572 User's Manual.  Each line represents a syndrome bit column as a
 * 64-bit value, but split into an upper and lower 32-bit chunk.  The labels
 * below correspond to Freescale's manuals.
 */
static unsigned int ecc_table[16] = {
	/* MSB           LSB */
	/* [0:31]    [32:63] */
	0xf00fe11e, 0xc33c0ff7,	/* Syndrome bit 7 */
	0x00ff00ff, 0x00fff0ff,
	0x0f0f0f0f, 0x0f0fff00,
	0x11113333, 0x7777000f,
	0x22224444, 0x8888222f,
	0x44448888, 0xffff4441,
	0x8888ffff, 0x11118882,
	0xffff1111, 0x22221114,	/* Syndrome bit 0 */
};

/*
 * Calculate the correct ECC value for a 64-bit value specified by high:low
 */
static u8 calculate_ecc(u32 high, u32 low)
{
	u32 mask_low;
	u32 mask_high;
	int bit_cnt;
	u8 ecc = 0;
	int i;
	int j;

	for (i = 0; i < 8; i++) {
		mask_high = ecc_table[i * 2];
		mask_low = ecc_table[i * 2 + 1];
		bit_cnt = 0;

		for (j = 0; j < 32; j++) {
			if ((mask_high >> j) & 1)
				bit_cnt ^= (high >> j) & 1;
			if ((mask_low >> j) & 1)
				bit_cnt ^= (low >> j) & 1;
		}

		ecc |= bit_cnt << i;
	}

	return ecc;
}

/*
 * Create the syndrome code which is generated if the data line specified by
 * 'bit' failed.  Eg generate an 8-bit codes seen in Table 8-55 in the MPC8641
 * User's Manual and 9-61 in the MPC8572 User's Manual.
 */
static u8 syndrome_from_bit(unsigned int bit) {
	int i;
	u8 syndrome = 0;

	/*
	 * Cycle through the upper or lower 32-bit portion of each value in
	 * ecc_table depending on if 'bit' is in the upper or lower half of
	 * 64-bit data.
	 */
	for (i = bit < 32; i < 16; i += 2)
		syndrome |= ((ecc_table[i] >> (bit % 32)) & 1) << (i / 2);

	return syndrome;
}

/*
 * Decode data and ecc syndrome to determine what went wrong
 * Note: This can only decode single-bit errors
 */
static void sbe_ecc_decode(u32 cap_high, u32 cap_low, u32 cap_ecc,
		       int *bad_data_bit, int *bad_ecc_bit)
{
	int i;
	u8 syndrome;

	*bad_data_bit = -1;
	*bad_ecc_bit = -1;

	/*
	 * Calculate the ECC of the captured data and XOR it with the captured
	 * ECC to find an ECC syndrome value we can search for
	 */
	syndrome = calculate_ecc(cap_high, cap_low) ^ cap_ecc;

	/* Check if a data line is stuck... */
	for (i = 0; i < 64; i++) {
		if (syndrome == syndrome_from_bit(i)) {
			*bad_data_bit = i;
			return;
		}
	}

	/* If data is correct, check ECC bits for errors... */
	for (i = 0; i < 8; i++) {
		if ((syndrome >> i) & 0x1) {
			*bad_ecc_bit = i;
			return;
		}
	}
}

#define make64(high, low) (((u64)(high) << 32) | (low))

274
static void fsl_mc_check(struct mem_ctl_info *mci)
275
{
276
	struct fsl_mc_pdata *pdata = mci->pvt_info;
277 278 279 280 281 282 283 284 285 286 287 288
	struct csrow_info *csrow;
	u32 bus_width;
	u32 err_detect;
	u32 syndrome;
	u64 err_addr;
	u32 pfn;
	int row_index;
	u32 cap_high;
	u32 cap_low;
	int bad_data_bit;
	int bad_ecc_bit;

289
	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
290 291 292
	if (!err_detect)
		return;

293 294
	fsl_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
		      err_detect);
295 296 297

	/* no more processing if not ECC bit errors */
	if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
298
		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
299 300 301
		return;
	}

302
	syndrome = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ECC);
303 304

	/* Mask off appropriate bits of syndrome based on bus width */
305 306
	bus_width = (ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG) &
		     DSC_DBW_MASK) ? 32 : 64;
307 308 309 310 311 312
	if (bus_width == 64)
		syndrome &= 0xff;
	else
		syndrome &= 0xffff;

	err_addr = make64(
313 314
		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_EXT_ADDRESS),
		ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_ADDRESS));
315 316 317 318 319 320 321 322
	pfn = err_addr >> PAGE_SHIFT;

	for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
		csrow = mci->csrows[row_index];
		if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
			break;
	}

323 324
	cap_high = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_HI);
	cap_low = ddr_in32(pdata->mc_vbase + FSL_MC_CAPTURE_DATA_LO);
325 326 327 328 329 330 331 332 333 334

	/*
	 * Analyze single-bit errors on 64-bit wide buses
	 * TODO: Add support for 32-bit wide buses
	 */
	if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
		sbe_ecc_decode(cap_high, cap_low, syndrome,
				&bad_data_bit, &bad_ecc_bit);

		if (bad_data_bit != -1)
335
			fsl_mc_printk(mci, KERN_ERR,
336 337
				"Faulty Data bit: %d\n", bad_data_bit);
		if (bad_ecc_bit != -1)
338
			fsl_mc_printk(mci, KERN_ERR,
339 340
				"Faulty ECC bit: %d\n", bad_ecc_bit);

341
		fsl_mc_printk(mci, KERN_ERR,
342 343 344 345 346 347
			"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
			cap_high ^ (1 << (bad_data_bit - 32)),
			cap_low ^ (1 << bad_data_bit),
			syndrome ^ (1 << bad_ecc_bit));
	}

348
	fsl_mc_printk(mci, KERN_ERR,
349 350
			"Captured Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
			cap_high, cap_low, syndrome);
351 352
	fsl_mc_printk(mci, KERN_ERR, "Err addr: %#8.8llx\n", err_addr);
	fsl_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
353 354 355

	/* we are out of range */
	if (row_index == mci->nr_csrows)
356
		fsl_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
357 358 359 360 361 362 363 364 365 366 367 368 369

	if (err_detect & DDR_EDE_SBE)
		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
				     pfn, err_addr & ~PAGE_MASK, syndrome,
				     row_index, 0, -1,
				     mci->ctl_name, "");

	if (err_detect & DDR_EDE_MBE)
		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
				     pfn, err_addr & ~PAGE_MASK, syndrome,
				     row_index, 0, -1,
				     mci->ctl_name, "");

370
	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, err_detect);
371 372
}

373
static irqreturn_t fsl_mc_isr(int irq, void *dev_id)
374 375
{
	struct mem_ctl_info *mci = dev_id;
376
	struct fsl_mc_pdata *pdata = mci->pvt_info;
377 378
	u32 err_detect;

379
	err_detect = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DETECT);
380 381 382
	if (!err_detect)
		return IRQ_NONE;

383
	fsl_mc_check(mci);
384 385 386 387

	return IRQ_HANDLED;
}

388
static void fsl_ddr_init_csrows(struct mem_ctl_info *mci)
389
{
390
	struct fsl_mc_pdata *pdata = mci->pvt_info;
391 392 393 394 395 396 397 398
	struct csrow_info *csrow;
	struct dimm_info *dimm;
	u32 sdram_ctl;
	u32 sdtype;
	enum mem_type mtype;
	u32 cs_bnds;
	int index;

399
	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
400 401 402 403

	sdtype = sdram_ctl & DSC_SDTYPE_MASK;
	if (sdram_ctl & DSC_RD_EN) {
		switch (sdtype) {
404
		case 0x02000000:
405 406
			mtype = MEM_RDDR;
			break;
407
		case 0x03000000:
408 409
			mtype = MEM_RDDR2;
			break;
410
		case 0x07000000:
411 412
			mtype = MEM_RDDR3;
			break;
413 414 415
		case 0x05000000:
			mtype = MEM_RDDR4;
			break;
416 417 418 419 420 421
		default:
			mtype = MEM_UNKNOWN;
			break;
		}
	} else {
		switch (sdtype) {
422
		case 0x02000000:
423 424
			mtype = MEM_DDR;
			break;
425
		case 0x03000000:
426 427
			mtype = MEM_DDR2;
			break;
428
		case 0x07000000:
429 430
			mtype = MEM_DDR3;
			break;
431 432 433
		case 0x05000000:
			mtype = MEM_DDR4;
			break;
434 435 436 437 438 439 440 441 442 443 444 445 446
		default:
			mtype = MEM_UNKNOWN;
			break;
		}
	}

	for (index = 0; index < mci->nr_csrows; index++) {
		u32 start;
		u32 end;

		csrow = mci->csrows[index];
		dimm = csrow->channels[0]->dimm;

447 448
		cs_bnds = ddr_in32(pdata->mc_vbase + FSL_MC_CS_BNDS_0 +
				   (index * FSL_MC_CS_BNDS_OFS));
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472

		start = (cs_bnds & 0xffff0000) >> 16;
		end   = (cs_bnds & 0x0000ffff);

		if (start == end)
			continue;	/* not populated */

		start <<= (24 - PAGE_SHIFT);
		end   <<= (24 - PAGE_SHIFT);
		end    |= (1 << (24 - PAGE_SHIFT)) - 1;

		csrow->first_page = start;
		csrow->last_page = end;

		dimm->nr_pages = end + 1 - start;
		dimm->grain = 8;
		dimm->mtype = mtype;
		dimm->dtype = DEV_UNKNOWN;
		if (sdram_ctl & DSC_X32_EN)
			dimm->dtype = DEV_X32;
		dimm->edac_mode = EDAC_SECDED;
	}
}

473
int fsl_mc_err_probe(struct platform_device *op)
474 475 476
{
	struct mem_ctl_info *mci;
	struct edac_mc_layer layers[2];
477
	struct fsl_mc_pdata *pdata;
478 479 480 481
	struct resource r;
	u32 sdram_ctl;
	int res;

482
	if (!devres_open_group(&op->dev, fsl_mc_err_probe, GFP_KERNEL))
483 484 485 486 487 488 489 490 491 492 493
		return -ENOMEM;

	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
	layers[0].size = 4;
	layers[0].is_virt_csrow = true;
	layers[1].type = EDAC_MC_LAYER_CHANNEL;
	layers[1].size = 1;
	layers[1].is_virt_csrow = false;
	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
			    sizeof(*pdata));
	if (!mci) {
494
		devres_release_group(&op->dev, fsl_mc_err_probe);
495 496 497 498
		return -ENOMEM;
	}

	pdata = mci->pvt_info;
499
	pdata->name = "fsl_mc_err";
500 501 502 503 504 505
	mci->pdev = &op->dev;
	pdata->edac_idx = edac_mc_idx++;
	dev_set_drvdata(mci->pdev, mci);
	mci->ctl_name = pdata->name;
	mci->dev_name = pdata->name;

506 507 508 509 510 511
	/*
	 * Get the endianness of DDR controller registers.
	 * Default is big endian.
	 */
	little_endian = of_property_read_bool(op->dev.of_node, "little-endian");

512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
	res = of_address_to_resource(op->dev.of_node, 0, &r);
	if (res) {
		pr_err("%s: Unable to get resource for MC err regs\n",
		       __func__);
		goto err;
	}

	if (!devm_request_mem_region(&op->dev, r.start, resource_size(&r),
				     pdata->name)) {
		pr_err("%s: Error while requesting mem region\n",
		       __func__);
		res = -EBUSY;
		goto err;
	}

	pdata->mc_vbase = devm_ioremap(&op->dev, r.start, resource_size(&r));
	if (!pdata->mc_vbase) {
		pr_err("%s: Unable to setup MC err regs\n", __func__);
		res = -ENOMEM;
		goto err;
	}

534
	sdram_ctl = ddr_in32(pdata->mc_vbase + FSL_MC_DDR_SDRAM_CFG);
535 536 537 538 539 540 541 542
	if (!(sdram_ctl & DSC_ECC_EN)) {
		/* no ECC */
		pr_warn("%s: No ECC DIMMs discovered\n", __func__);
		res = -ENODEV;
		goto err;
	}

	edac_dbg(3, "init mci\n");
543 544 545 546
	mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR |
			 MEM_FLAG_DDR2 | MEM_FLAG_RDDR2 |
			 MEM_FLAG_DDR3 | MEM_FLAG_RDDR3 |
			 MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
547 548 549 550 551
	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
	mci->edac_cap = EDAC_FLAG_SECDED;
	mci->mod_name = EDAC_MOD_STR;

	if (edac_op_state == EDAC_OPSTATE_POLL)
552
		mci->edac_check = fsl_mc_check;
553 554 555 556 557

	mci->ctl_page_to_phys = NULL;

	mci->scrub_mode = SCRUB_SW_SRC;

558
	fsl_ddr_init_csrows(mci);
559 560

	/* store the original error disable bits */
561 562
	orig_ddr_err_disable = ddr_in32(pdata->mc_vbase + FSL_MC_ERR_DISABLE);
	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE, 0);
563 564

	/* clear all error bits */
565
	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DETECT, ~0);
566

567 568
	res = edac_mc_add_mc_with_groups(mci, fsl_ddr_dev_groups);
	if (res) {
569 570 571 572 573
		edac_dbg(3, "failed edac_mc_add_mc()\n");
		goto err;
	}

	if (edac_op_state == EDAC_OPSTATE_INT) {
574 575
		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN,
			  DDR_EIE_MBEE | DDR_EIE_SBEE);
576 577

		/* store the original error management threshold */
578 579
		orig_ddr_err_sbe = ddr_in32(pdata->mc_vbase +
					    FSL_MC_ERR_SBE) & 0xff0000;
580 581

		/* set threshold to 1 error per interrupt */
582
		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, 0x10000);
583 584

		/* register interrupts */
585
		pdata->irq = platform_get_irq(op, 0);
586
		res = devm_request_irq(&op->dev, pdata->irq,
587
				       fsl_mc_isr,
588 589 590
				       IRQF_SHARED,
				       "[EDAC] MC err", mci);
		if (res < 0) {
591
			pr_err("%s: Unable to request irq %d for FSL DDR DRAM ERR\n",
592 593 594 595 596 597 598 599 600
			       __func__, pdata->irq);
			res = -ENODEV;
			goto err2;
		}

		pr_info(EDAC_MOD_STR " acquired irq %d for MC\n",
		       pdata->irq);
	}

601
	devres_remove_group(&op->dev, fsl_mc_err_probe);
602 603 604 605 606 607 608 609
	edac_dbg(3, "success\n");
	pr_info(EDAC_MOD_STR " MC err registered\n");

	return 0;

err2:
	edac_mc_del_mc(&op->dev);
err:
610
	devres_release_group(&op->dev, fsl_mc_err_probe);
611 612 613 614
	edac_mc_free(mci);
	return res;
}

615
int fsl_mc_err_remove(struct platform_device *op)
616 617
{
	struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
618
	struct fsl_mc_pdata *pdata = mci->pvt_info;
619 620 621 622

	edac_dbg(0, "\n");

	if (edac_op_state == EDAC_OPSTATE_INT) {
623
		ddr_out32(pdata->mc_vbase + FSL_MC_ERR_INT_EN, 0);
624 625
	}

626 627 628
	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_DISABLE,
		  orig_ddr_err_disable);
	ddr_out32(pdata->mc_vbase + FSL_MC_ERR_SBE, orig_ddr_err_sbe);
629 630 631 632 633

	edac_mc_del_mc(&op->dev);
	edac_mc_free(mci);
	return 0;
}