amd64_edac.h 13 KB
Newer Older
1 2 3 4
/*
 * AMD64 class Memory Controller kernel module
 *
 * Copyright (c) 2009 SoftwareBitMaker.
5
 * Copyright (c) 2009-15 Advanced Micro Devices, Inc.
6 7 8 9 10 11 12 13 14 15 16 17 18
 *
 * This file may be distributed under the terms of the
 * GNU General Public License.
 */

#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/edac.h>
19
#include <asm/cpu_device_id.h>
20
#include <asm/msr.h>
21
#include "edac_core.h"
Borislav Petkov's avatar
Borislav Petkov committed
22
#include "mce_amd.h"
23

24 25
#define amd64_debug(fmt, arg...) \
	edac_printk(KERN_DEBUG, "amd64", fmt, ##arg)
26

27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
#define amd64_info(fmt, arg...) \
	edac_printk(KERN_INFO, "amd64", fmt, ##arg)

#define amd64_notice(fmt, arg...) \
	edac_printk(KERN_NOTICE, "amd64", fmt, ##arg)

#define amd64_warn(fmt, arg...) \
	edac_printk(KERN_WARNING, "amd64", fmt, ##arg)

#define amd64_err(fmt, arg...) \
	edac_printk(KERN_ERR, "amd64", fmt, ##arg)

#define amd64_mc_warn(mci, fmt, arg...) \
	edac_mc_chipset_printk(mci, KERN_WARNING, "amd64", fmt, ##arg)

#define amd64_mc_err(mci, fmt, arg...) \
	edac_mc_chipset_printk(mci, KERN_ERR, "amd64", fmt, ##arg)
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93

/*
 * Throughout the comments in this code, the following terms are used:
 *
 *	SysAddr, DramAddr, and InputAddr
 *
 *  These terms come directly from the amd64 documentation
 * (AMD publication #26094).  They are defined as follows:
 *
 *     SysAddr:
 *         This is a physical address generated by a CPU core or a device
 *         doing DMA.  If generated by a CPU core, a SysAddr is the result of
 *         a virtual to physical address translation by the CPU core's address
 *         translation mechanism (MMU).
 *
 *     DramAddr:
 *         A DramAddr is derived from a SysAddr by subtracting an offset that
 *         depends on which node the SysAddr maps to and whether the SysAddr
 *         is within a range affected by memory hoisting.  The DRAM Base
 *         (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers
 *         determine which node a SysAddr maps to.
 *
 *         If the DRAM Hole Address Register (DHAR) is enabled and the SysAddr
 *         is within the range of addresses specified by this register, then
 *         a value x from the DHAR is subtracted from the SysAddr to produce a
 *         DramAddr.  Here, x represents the base address for the node that
 *         the SysAddr maps to plus an offset due to memory hoisting.  See
 *         section 3.4.8 and the comments in amd64_get_dram_hole_info() and
 *         sys_addr_to_dram_addr() below for more information.
 *
 *         If the SysAddr is not affected by the DHAR then a value y is
 *         subtracted from the SysAddr to produce a DramAddr.  Here, y is the
 *         base address for the node that the SysAddr maps to.  See section
 *         3.4.4 and the comments in sys_addr_to_dram_addr() below for more
 *         information.
 *
 *     InputAddr:
 *         A DramAddr is translated to an InputAddr before being passed to the
 *         memory controller for the node that the DramAddr is associated
 *         with.  The memory controller then maps the InputAddr to a csrow.
 *         If node interleaving is not in use, then the InputAddr has the same
 *         value as the DramAddr.  Otherwise, the InputAddr is produced by
 *         discarding the bits used for node interleaving from the DramAddr.
 *         See section 3.4.4 for more information.
 *
 *         The memory controller for a given node uses its DRAM CS Base and
 *         DRAM CS Mask registers to map an InputAddr to a csrow.  See
 *         sections 3.5.4 and 3.5.5 for more information.
 */

94
#define EDAC_AMD64_VERSION		"3.4.0"
95 96 97
#define EDAC_MOD_STR			"amd64_edac"

/* Extended Model from CPUID, for CPU Revision numbers */
98 99 100
#define K8_REV_D			1
#define K8_REV_E			2
#define K8_REV_F			4
101 102

/* Hardware limit on ChipSelect rows per MC and processors per system */
103 104
#define NUM_CHIPSELECTS			8
#define DRAM_RANGES			8
105

106 107
#define ON true
#define OFF false
108 109 110 111

/*
 * PCI-defined configuration space registers
 */
112 113
#define PCI_DEVICE_ID_AMD_15H_NB_F1	0x1601
#define PCI_DEVICE_ID_AMD_15H_NB_F2	0x1602
114 115 116 117
#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 0x141b
#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F2 0x141c
#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F1 0x1571
#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F2 0x1572
118 119
#define PCI_DEVICE_ID_AMD_16H_NB_F1	0x1531
#define PCI_DEVICE_ID_AMD_16H_NB_F2	0x1532
120 121
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
122 123 124 125

/*
 * Function 1 - Address Map
 */
126 127 128
#define DRAM_BASE_LO			0x40
#define DRAM_LIMIT_LO			0x44

129 130 131 132 133 134 135 136 137 138 139
/*
 * F15 M30h D18F1x2[1C:00]
 */
#define DRAM_CONT_BASE			0x200
#define DRAM_CONT_LIMIT			0x204

/*
 * F15 M30h D18F1x2[4C:40]
 */
#define DRAM_CONT_HIGH_OFF		0x240

140 141 142
#define dram_rw(pvt, i)			((u8)(pvt->ranges[i].base.lo & 0x3))
#define dram_intlv_sel(pvt, i)		((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7))
#define dram_dst_node(pvt, i)		((u8)(pvt->ranges[i].lim.lo & 0x7))
143

144
#define DHAR				0xf0
145 146 147
#define dhar_mem_hoist_valid(pvt)	((pvt)->dhar & BIT(1))
#define dhar_base(pvt)			((pvt)->dhar & 0xff000000)
#define k8_dhar_offset(pvt)		(((pvt)->dhar & 0x0000ff00) << 16)
148 149

					/* NOTE: Extra mask bit vs K8 */
150
#define f10_dhar_offset(pvt)		(((pvt)->dhar & 0x0000ff80) << 16)
151

152
#define DCT_CFG_SEL			0x10C
153

154
#define DRAM_LOCAL_NODE_BASE		0x120
155 156
#define DRAM_LOCAL_NODE_LIM		0x124

157 158
#define DRAM_BASE_HI			0x140
#define DRAM_LIMIT_HI			0x144
159 160 161 162 163


/*
 * Function 2 - DRAM controller
 */
164 165 166
#define DCSB0				0x40
#define DCSB1				0x140
#define DCSB_CS_ENABLE			BIT(0)
167

168 169
#define DCSM0				0x60
#define DCSM1				0x160
170

171
#define csrow_enabled(i, dct, pvt)	((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
172

173 174
#define DRAM_CONTROL			0x78

175 176 177 178
#define DBAM0				0x80
#define DBAM1				0x180

/* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */
179
#define DBAM_DIMM(i, reg)		((((reg) >> (4*(i)))) & 0xF)
180 181 182

#define DBAM_MAX_VALUE			11

183 184
#define DCLR0				0x90
#define DCLR1				0x190
185
#define REVE_WIDTH_128			BIT(16)
186
#define WIDTH_128			BIT(11)
187

188 189
#define DCHR0				0x94
#define DCHR1				0x194
190
#define DDR3_MODE			BIT(8)
191

192 193 194
#define DCT_SEL_LO			0x110
#define dct_high_range_enabled(pvt)	((pvt)->dct_sel_lo & BIT(0))
#define dct_interleave_enabled(pvt)	((pvt)->dct_sel_lo & BIT(2))
195

196
#define dct_ganging_enabled(pvt)	((boot_cpu_data.x86 == 0x10) && ((pvt)->dct_sel_lo & BIT(4)))
197

198 199
#define dct_data_intlv_enabled(pvt)	((pvt)->dct_sel_lo & BIT(5))
#define dct_memory_cleared(pvt)		((pvt)->dct_sel_lo & BIT(10))
200

201 202
#define SWAP_INTLV_REG			0x10c

203
#define DCT_SEL_HI			0x114
204

205 206
#define F15H_M60H_SCRCTRL		0x1C8

207 208 209
/*
 * Function 3 - Misc Control
 */
210
#define NBCTL				0x40
211

212 213 214
#define NBCFG				0x44
#define NBCFG_CHIPKILL			BIT(23)
#define NBCFG_ECC_ENABLE		BIT(22)
215

216
/* F3x48: NBSL */
217
#define F10_NBSL_EXT_ERR_ECC		0x8
218
#define NBSL_PP_OBS			0x2
219

220
#define SCRCTRL				0x58
221 222

#define F10_ONLINE_SPARE		0xB0
223 224
#define online_spare_swap_done(pvt, c)	(((pvt)->online_spare >> (1 + 2 * (c))) & 0x1)
#define online_spare_bad_dramcs(pvt, c)	(((pvt)->online_spare >> (4 + 4 * (c))) & 0x7)
225 226

#define F10_NB_ARRAY_ADDR		0xB8
227
#define F10_NB_ARRAY_DRAM		BIT(31)
228 229

/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline  */
230
#define SET_NB_ARRAY_ADDR(section)	(((section) & 0x3) << 1)
231 232

#define F10_NB_ARRAY_DATA		0xBC
233
#define F10_NB_ARR_ECC_WR_REQ		BIT(17)
234 235
#define SET_NB_DRAM_INJECTION_WRITE(inj)  \
					(BIT(((inj.word) & 0xF) + 20) | \
236
					F10_NB_ARR_ECC_WR_REQ | inj.bit_map)
237 238 239 240
#define SET_NB_DRAM_INJECTION_READ(inj)  \
					(BIT(((inj.word) & 0xF) + 20) | \
					BIT(16) |  inj.bit_map)

241

242 243 244 245
#define NBCAP				0xE8
#define NBCAP_CHIPKILL			BIT(4)
#define NBCAP_SECDED			BIT(3)
#define NBCAP_DCT_DUAL			BIT(0)
246

247 248
#define EXT_NB_MCA_CFG			0x180

249
/* MSRs */
250
#define MSR_MCGCTL_NBE			BIT(4)
251

252
enum amd_families {
253 254
	K8_CPUS = 0,
	F10_CPUS,
255
	F15_CPUS,
256
	F15_M30H_CPUS,
257
	F15_M60H_CPUS,
258
	F16_CPUS,
259
	F16_M30H_CPUS,
260
	NUM_FAMILIES,
261 262 263 264
};

/* Error injection control structure */
struct error_injection {
265 266 267
	u32	 section;
	u32	 word;
	u32	 bit_map;
268 269
};

270 271 272 273 274 275 276 277 278 279 280 281 282
/* low and high part of PCI config space regs */
struct reg_pair {
	u32 lo, hi;
};

/*
 * See F1x[1, 0][7C:40] DRAM Base/Limit Registers
 */
struct dram_range {
	struct reg_pair base;
	struct reg_pair lim;
};

283 284 285 286 287 288 289 290 291
/* A DCT chip selects collection */
struct chip_select {
	u32 csbases[NUM_CHIPSELECTS];
	u8 b_cnt;

	u32 csmasks[NUM_CHIPSELECTS];
	u8 m_cnt;
};

292
struct amd64_pvt {
293 294
	struct low_ops *ops;

295
	/* pci_device handles which we utilize */
296
	struct pci_dev *F1, *F2, *F3;
297

298
	u16 mc_node_id;		/* MC index of this MC node */
299
	u8 fam;			/* CPU family */
300 301 302
	u8 model;		/* ... model */
	u8 stepping;		/* ... stepping */

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
	int ext_model;		/* extended model value of this node */
	int channel_count;

	/* Raw registers */
	u32 dclr0;		/* DRAM Configuration Low DCT0 reg */
	u32 dclr1;		/* DRAM Configuration Low DCT1 reg */
	u32 dchr0;		/* DRAM Configuration High DCT0 reg */
	u32 dchr1;		/* DRAM Configuration High DCT1 reg */
	u32 nbcap;		/* North Bridge Capabilities */
	u32 nbcfg;		/* F10 North Bridge Configuration */
	u32 ext_nbcfg;		/* Extended F10 North Bridge Configuration */
	u32 dhar;		/* DRAM Hoist reg */
	u32 dbam0;		/* DRAM Base Address Mapping reg for DCT0 */
	u32 dbam1;		/* DRAM Base Address Mapping reg for DCT1 */

318 319
	/* one for each DCT */
	struct chip_select csels[2];
320

321 322
	/* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
	struct dram_range ranges[DRAM_RANGES];
323 324 325 326

	u64 top_mem;		/* top of memory below 4GB */
	u64 top_mem2;		/* top of memory above 4GB */

327 328
	u32 dct_sel_lo;		/* DRAM Controller Select Low */
	u32 dct_sel_hi;		/* DRAM Controller Select High */
329
	u32 online_spare;	/* On-Line spare Reg */
330

331
	/* x4 or x8 syndromes in use */
332
	u8 ecc_sym_sz;
333

334 335
	/* place to store error injection parameters prior to issue */
	struct error_injection injection;
336 337 338

	/* cache the dram_type */
	enum mem_type dram_type;
339 340
};

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
enum err_codes {
	DECODE_OK	=  0,
	ERR_NODE	= -1,
	ERR_CSROW	= -2,
	ERR_CHANNEL	= -3,
};

struct err_info {
	int err_code;
	struct mem_ctl_info *src_mci;
	int csrow;
	int channel;
	u16 syndrome;
	u32 page;
	u32 offset;
};

358
static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
359 360 361 362 363 364 365 366 367
{
	u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;

	if (boot_cpu_data.x86 == 0xf)
		return addr;

	return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
}

368
static inline u64 get_dram_limit(struct amd64_pvt *pvt, u8 i)
369 370 371 372 373 374 375 376 377
{
	u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;

	if (boot_cpu_data.x86 == 0xf)
		return lim;

	return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim;
}

378 379 380 381 382
static inline u16 extract_syndrome(u64 status)
{
	return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00);
}

383 384 385 386 387 388 389 390
static inline u8 dct_sel_interleave_addr(struct amd64_pvt *pvt)
{
	if (pvt->fam == 0x15 && pvt->model >= 0x30)
		return (((pvt->dct_sel_hi >> 9) & 0x1) << 2) |
			((pvt->dct_sel_lo >> 6) & 0x3);

	return	((pvt)->dct_sel_lo >> 6) & 0x3;
}
391 392 393 394 395 396 397
/*
 * per-node ECC settings descriptor
 */
struct ecc_settings {
	u32 old_nbctl;
	bool nbctl_valid;

398
	struct flags {
399 400
		unsigned long nb_mce_enable:1;
		unsigned long nb_ecc_prev:1;
401 402 403
	} flags;
};

404
#ifdef CONFIG_EDAC_DEBUG
405
extern const struct attribute_group amd64_edac_dbg_group;
406 407 408
#endif

#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
409
extern const struct attribute_group amd64_edac_inj_group;
410 411
#endif

412 413 414 415 416
/*
 * Each of the PCI Device IDs types have their own set of hardware accessor
 * functions and per device encoding/decoding logic.
 */
struct low_ops {
417
	int (*early_channel_count)	(struct amd64_pvt *pvt);
418
	void (*map_sysaddr_to_csrow)	(struct mem_ctl_info *mci, u64 sys_addr,
419
					 struct err_info *);
420 421
	int (*dbam_to_cs)		(struct amd64_pvt *pvt, u8 dct,
					 unsigned cs_mode, int cs_mask_nr);
422 423 424 425
};

struct amd64_family_type {
	const char *ctl_name;
426
	u16 f1_id, f2_id;
427 428 429
	struct low_ops ops;
};

430 431
int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
			       u32 *val, const char *func);
432 433
int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
				u32 val, const char *func);
434

435 436
#define amd64_read_pci_cfg(pdev, offset, val)	\
	__amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
437

438 439
#define amd64_write_pci_cfg(pdev, offset, val)	\
	__amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
440

441 442
int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
			     u64 *hole_offset, u64 *hole_size);
443 444

#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
445 446 447 448 449 450 451 452 453 454 455 456

/* Injection helpers */
static inline void disable_caches(void *dummy)
{
	write_cr0(read_cr0() | X86_CR0_CD);
	wbinvd();
}

static inline void enable_caches(void *dummy)
{
	write_cr0(read_cr0() & ~X86_CR0_CD);
}
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486

static inline u8 dram_intlv_en(struct amd64_pvt *pvt, unsigned int i)
{
	if (pvt->fam == 0x15 && pvt->model >= 0x30) {
		u32 tmp;
		amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &tmp);
		return (u8) tmp & 0xF;
	}
	return (u8) (pvt->ranges[i].base.lo >> 8) & 0x7;
}

static inline u8 dhar_valid(struct amd64_pvt *pvt)
{
	if (pvt->fam == 0x15 && pvt->model >= 0x30) {
		u32 tmp;
		amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &tmp);
		return (tmp >> 1) & BIT(0);
	}
	return (pvt)->dhar & BIT(0);
}

static inline u32 dct_sel_baseaddr(struct amd64_pvt *pvt)
{
	if (pvt->fam == 0x15 && pvt->model >= 0x30) {
		u32 tmp;
		amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &tmp);
		return (tmp >> 11) & 0x1FFF;
	}
	return (pvt)->dct_sel_lo & 0xFFFFF800;
}