1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "amd64_edac.h"
3 #include <asm/amd_nb.h>
5 static struct edac_pci_ctl_info *pci_ctl;
7 static int report_gart_errors;
8 module_param(report_gart_errors, int, 0644);
11 * Set by command line parameter. If BIOS has enabled the ECC, this override is
12 * cleared to prevent re-enabling the hardware by this driver.
14 static int ecc_enable_override;
15 module_param(ecc_enable_override, int, 0644);
17 static struct msr __percpu *msrs;
20 static struct ecc_settings **ecc_stngs;
22 /* Number of Unified Memory Controllers */
26 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
27 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
30 *FIXME: Produce a better mapping/linearisation.
32 static const struct scrubrate {
33 u32 scrubval; /* bit pattern for scrub rate */
34 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
36 { 0x01, 1600000000UL},
58 { 0x00, 0UL}, /* scrubbing off */
61 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
62 u32 *val, const char *func)
66 err = pci_read_config_dword(pdev, offset, val);
68 amd64_warn("%s: error reading F%dx%03x.\n",
69 func, PCI_FUNC(pdev->devfn), offset);
74 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
75 u32 val, const char *func)
79 err = pci_write_config_dword(pdev, offset, val);
81 amd64_warn("%s: error writing to F%dx%03x.\n",
82 func, PCI_FUNC(pdev->devfn), offset);
88 * Select DCT to which PCI cfg accesses are routed
90 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
94 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
95 reg &= (pvt->model == 0x30) ? ~3 : ~1;
97 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
102 * Depending on the family, F2 DCT reads need special handling:
104 * K8: has a single DCT only and no address offsets >= 0x100
106 * F10h: each DCT has its own set of regs
110 * F16h: has only 1 DCT
112 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
114 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
115 int offset, u32 *val)
119 if (dct || offset >= 0x100)
126 * Note: If ganging is enabled, barring the regs
127 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
128 * return 0. (cf. Section 2.8.1 F10h BKDG)
130 if (dct_ganging_enabled(pvt))
139 * F15h: F2x1xx addresses do not map explicitly to DCT1.
140 * We should select which DCT we access using F1x10C[DctCfgSel]
142 dct = (dct && pvt->model == 0x30) ? 3 : dct;
143 f15h_select_dct(pvt, dct);
154 return amd64_read_pci_cfg(pvt->F2, offset, val);
158 * Memory scrubber control interface. For K8, memory scrubbing is handled by
159 * hardware and can involve L2 cache, dcache as well as the main memory. With
160 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
163 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
164 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
165 * bytes/sec for the setting.
167 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
168 * other archs, we might not have access to the caches directly.
171 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
174 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
175 * are shifted down by 0x5, so scrubval 0x5 is written to the register
176 * as 0x0, scrubval 0x6 as 0x1, etc.
178 if (scrubval >= 0x5 && scrubval <= 0x14) {
180 pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
181 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
183 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
187 * Scan the scrub rate mapping table for a close or matching bandwidth value to
188 * issue. If requested is too big, then use last maximum value found.
190 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
196 * map the configured rate (new_bw) to a value specific to the AMD64
197 * memory controller and apply to register. Search for the first
198 * bandwidth entry that is greater or equal than the setting requested
199 * and program that. If at last entry, turn off DRAM scrubbing.
201 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
202 * by falling back to the last element in scrubrates[].
204 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
206 * skip scrub rates which aren't recommended
207 * (see F10 BKDG, F3x58)
209 if (scrubrates[i].scrubval < min_rate)
212 if (scrubrates[i].bandwidth <= new_bw)
216 scrubval = scrubrates[i].scrubval;
218 if (pvt->fam == 0x17 || pvt->fam == 0x18) {
219 __f17h_set_scrubval(pvt, scrubval);
220 } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
221 f15h_select_dct(pvt, 0);
222 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
223 f15h_select_dct(pvt, 1);
224 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
226 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
230 return scrubrates[i].bandwidth;
235 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
237 struct amd64_pvt *pvt = mci->pvt_info;
238 u32 min_scrubrate = 0x5;
243 if (pvt->fam == 0x15) {
245 if (pvt->model < 0x10)
246 f15h_select_dct(pvt, 0);
248 if (pvt->model == 0x60)
251 return __set_scrub_rate(pvt, bw, min_scrubrate);
254 static int get_scrub_rate(struct mem_ctl_info *mci)
256 struct amd64_pvt *pvt = mci->pvt_info;
257 int i, retval = -EINVAL;
263 if (pvt->model < 0x10)
264 f15h_select_dct(pvt, 0);
266 if (pvt->model == 0x60)
267 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
272 amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
273 if (scrubval & BIT(0)) {
274 amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
283 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
287 scrubval = scrubval & 0x001F;
289 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
290 if (scrubrates[i].scrubval == scrubval) {
291 retval = scrubrates[i].bandwidth;
299 * returns true if the SysAddr given by sys_addr matches the
300 * DRAM base/limit associated with node_id
302 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
306 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
307 * all ones if the most significant implemented address bit is 1.
308 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
309 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
310 * Application Programming.
312 addr = sys_addr & 0x000000ffffffffffull;
314 return ((addr >= get_dram_base(pvt, nid)) &&
315 (addr <= get_dram_limit(pvt, nid)));
319 * Attempt to map a SysAddr to a node. On success, return a pointer to the
320 * mem_ctl_info structure for the node that the SysAddr maps to.
322 * On failure, return NULL.
324 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
327 struct amd64_pvt *pvt;
332 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
333 * 3.4.4.2) registers to map the SysAddr to a node ID.
338 * The value of this field should be the same for all DRAM Base
339 * registers. Therefore we arbitrarily choose to read it from the
340 * register for node 0.
342 intlv_en = dram_intlv_en(pvt, 0);
345 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
346 if (base_limit_match(pvt, sys_addr, node_id))
352 if (unlikely((intlv_en != 0x01) &&
353 (intlv_en != 0x03) &&
354 (intlv_en != 0x07))) {
355 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
359 bits = (((u32) sys_addr) >> 12) & intlv_en;
361 for (node_id = 0; ; ) {
362 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
363 break; /* intlv_sel field matches */
365 if (++node_id >= DRAM_RANGES)
369 /* sanity test for sys_addr */
370 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
371 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
372 "range for node %d with node interleaving enabled.\n",
373 __func__, sys_addr, node_id);
378 return edac_mc_find((int)node_id);
381 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
382 (unsigned long)sys_addr);
388 * compute the CS base address of the @csrow on the DRAM controller @dct.
389 * For details see F2x[5C:40] in the processor's BKDG
391 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
392 u64 *base, u64 *mask)
394 u64 csbase, csmask, base_bits, mask_bits;
397 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
398 csbase = pvt->csels[dct].csbases[csrow];
399 csmask = pvt->csels[dct].csmasks[csrow];
400 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
401 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
405 * F16h and F15h, models 30h and later need two addr_shift values:
406 * 8 for high and 6 for low (cf. F16h BKDG).
408 } else if (pvt->fam == 0x16 ||
409 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
410 csbase = pvt->csels[dct].csbases[csrow];
411 csmask = pvt->csels[dct].csmasks[csrow >> 1];
413 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
414 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
417 /* poke holes for the csmask */
418 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
419 (GENMASK_ULL(30, 19) << 8));
421 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
422 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
426 csbase = pvt->csels[dct].csbases[csrow];
427 csmask = pvt->csels[dct].csmasks[csrow >> 1];
430 if (pvt->fam == 0x15)
431 base_bits = mask_bits =
432 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
434 base_bits = mask_bits =
435 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
438 *base = (csbase & base_bits) << addr_shift;
441 /* poke holes for the csmask */
442 *mask &= ~(mask_bits << addr_shift);
444 *mask |= (csmask & mask_bits) << addr_shift;
447 #define for_each_chip_select(i, dct, pvt) \
448 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
450 #define chip_select_base(i, dct, pvt) \
451 pvt->csels[dct].csbases[i]
453 #define for_each_chip_select_mask(i, dct, pvt) \
454 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
456 #define for_each_umc(i) \
457 for (i = 0; i < num_umcs; i++)
460 * @input_addr is an InputAddr associated with the node given by mci. Return the
461 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
463 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
465 struct amd64_pvt *pvt;
471 for_each_chip_select(csrow, 0, pvt) {
472 if (!csrow_enabled(csrow, 0, pvt))
475 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
479 if ((input_addr & mask) == (base & mask)) {
480 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
481 (unsigned long)input_addr, csrow,
487 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
488 (unsigned long)input_addr, pvt->mc_node_id);
494 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
495 * for the node represented by mci. Info is passed back in *hole_base,
496 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
497 * info is invalid. Info may be invalid for either of the following reasons:
499 * - The revision of the node is not E or greater. In this case, the DRAM Hole
500 * Address Register does not exist.
502 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
503 * indicating that its contents are not valid.
505 * The values passed back in *hole_base, *hole_offset, and *hole_size are
506 * complete 32-bit values despite the fact that the bitfields in the DHAR
507 * only represent bits 31-24 of the base and offset values.
509 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
510 u64 *hole_offset, u64 *hole_size)
512 struct amd64_pvt *pvt = mci->pvt_info;
514 /* only revE and later have the DRAM Hole Address Register */
515 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
516 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
517 pvt->ext_model, pvt->mc_node_id);
521 /* valid for Fam10h and above */
522 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
523 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
527 if (!dhar_valid(pvt)) {
528 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
533 /* This node has Memory Hoisting */
535 /* +------------------+--------------------+--------------------+-----
536 * | memory | DRAM hole | relocated |
537 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
539 * | | | [0x100000000, |
540 * | | | (0x100000000+ |
541 * | | | (0xffffffff-x))] |
542 * +------------------+--------------------+--------------------+-----
544 * Above is a diagram of physical memory showing the DRAM hole and the
545 * relocated addresses from the DRAM hole. As shown, the DRAM hole
546 * starts at address x (the base address) and extends through address
547 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
548 * addresses in the hole so that they start at 0x100000000.
551 *hole_base = dhar_base(pvt);
552 *hole_size = (1ULL << 32) - *hole_base;
554 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
555 : k8_dhar_offset(pvt);
557 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
558 pvt->mc_node_id, (unsigned long)*hole_base,
559 (unsigned long)*hole_offset, (unsigned long)*hole_size);
563 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
566 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
567 * assumed that sys_addr maps to the node given by mci.
569 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
570 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
571 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
572 * then it is also involved in translating a SysAddr to a DramAddr. Sections
573 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
574 * These parts of the documentation are unclear. I interpret them as follows:
576 * When node n receives a SysAddr, it processes the SysAddr as follows:
578 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
579 * Limit registers for node n. If the SysAddr is not within the range
580 * specified by the base and limit values, then node n ignores the Sysaddr
581 * (since it does not map to node n). Otherwise continue to step 2 below.
583 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
584 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
585 * the range of relocated addresses (starting at 0x100000000) from the DRAM
586 * hole. If not, skip to step 3 below. Else get the value of the
587 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
588 * offset defined by this value from the SysAddr.
590 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
591 * Base register for node n. To obtain the DramAddr, subtract the base
592 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
594 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
596 struct amd64_pvt *pvt = mci->pvt_info;
597 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
600 dram_base = get_dram_base(pvt, pvt->mc_node_id);
602 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
605 if ((sys_addr >= (1ULL << 32)) &&
606 (sys_addr < ((1ULL << 32) + hole_size))) {
607 /* use DHAR to translate SysAddr to DramAddr */
608 dram_addr = sys_addr - hole_offset;
610 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
611 (unsigned long)sys_addr,
612 (unsigned long)dram_addr);
619 * Translate the SysAddr to a DramAddr as shown near the start of
620 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
621 * only deals with 40-bit values. Therefore we discard bits 63-40 of
622 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
623 * discard are all 1s. Otherwise the bits we discard are all 0s. See
624 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
625 * Programmer's Manual Volume 1 Application Programming.
627 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
629 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
630 (unsigned long)sys_addr, (unsigned long)dram_addr);
635 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
636 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
637 * for node interleaving.
639 static int num_node_interleave_bits(unsigned intlv_en)
641 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
644 BUG_ON(intlv_en > 7);
645 n = intlv_shift_table[intlv_en];
649 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
650 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
652 struct amd64_pvt *pvt;
659 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
660 * concerning translating a DramAddr to an InputAddr.
662 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
663 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
666 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
667 intlv_shift, (unsigned long)dram_addr,
668 (unsigned long)input_addr);
674 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
675 * assumed that @sys_addr maps to the node given by mci.
677 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
682 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
684 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
685 (unsigned long)sys_addr, (unsigned long)input_addr);
690 /* Map the Error address to a PAGE and PAGE OFFSET. */
691 static inline void error_address_to_page_and_offset(u64 error_address,
692 struct err_info *err)
694 err->page = (u32) (error_address >> PAGE_SHIFT);
695 err->offset = ((u32) error_address) & ~PAGE_MASK;
699 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
700 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
701 * of a node that detected an ECC memory error. mci represents the node that
702 * the error address maps to (possibly different from the node that detected
703 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
706 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
710 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
713 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
714 "address 0x%lx\n", (unsigned long)sys_addr);
718 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
721 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
724 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
726 unsigned long edac_cap = EDAC_FLAG_NONE;
730 u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
733 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
736 umc_en_mask |= BIT(i);
738 /* UMC Configuration bit 12 (DimmEccEn) */
739 if (pvt->umc[i].umc_cfg & BIT(12))
740 dimm_ecc_en_mask |= BIT(i);
743 if (umc_en_mask == dimm_ecc_en_mask)
744 edac_cap = EDAC_FLAG_SECDED;
746 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
750 if (pvt->dclr0 & BIT(bit))
751 edac_cap = EDAC_FLAG_SECDED;
757 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
759 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
761 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
763 if (pvt->dram_type == MEM_LRDDR3) {
764 u32 dcsm = pvt->csels[chan].csmasks[0];
766 * It's assumed all LRDIMMs in a DCT are going to be of
767 * same 'type' until proven otherwise. So, use a cs
768 * value of '0' here to get dcsm value.
770 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
773 edac_dbg(1, "All DIMMs support ECC:%s\n",
774 (dclr & BIT(19)) ? "yes" : "no");
777 edac_dbg(1, " PAR/ERR parity: %s\n",
778 (dclr & BIT(8)) ? "enabled" : "disabled");
780 if (pvt->fam == 0x10)
781 edac_dbg(1, " DCT 128bit mode width: %s\n",
782 (dclr & BIT(11)) ? "128b" : "64b");
784 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
785 (dclr & BIT(12)) ? "yes" : "no",
786 (dclr & BIT(13)) ? "yes" : "no",
787 (dclr & BIT(14)) ? "yes" : "no",
788 (dclr & BIT(15)) ? "yes" : "no");
791 #define CS_EVEN_PRIMARY BIT(0)
792 #define CS_ODD_PRIMARY BIT(1)
793 #define CS_EVEN_SECONDARY BIT(2)
794 #define CS_ODD_SECONDARY BIT(3)
796 #define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
797 #define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
799 static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
803 if (csrow_enabled(2 * dimm, ctrl, pvt))
804 cs_mode |= CS_EVEN_PRIMARY;
806 if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
807 cs_mode |= CS_ODD_PRIMARY;
809 /* Asymmetric dual-rank DIMM support. */
810 if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
811 cs_mode |= CS_ODD_SECONDARY;
816 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
818 int dimm, size0, size1, cs0, cs1, cs_mode;
820 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
822 for (dimm = 0; dimm < 2; dimm++) {
826 cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
828 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
829 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
831 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
837 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
839 struct amd64_umc *umc;
840 u32 i, tmp, umc_base;
843 umc_base = get_umc_base(i);
846 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
847 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
848 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
849 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
851 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
852 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
854 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
855 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
856 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
858 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
859 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
860 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
861 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
862 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
863 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
864 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
865 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
866 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
868 if (pvt->dram_type == MEM_LRDDR4) {
869 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
870 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
871 i, 1 << ((tmp >> 4) & 0x3));
874 debug_display_dimm_sizes_df(pvt, i);
877 edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
878 pvt->dhar, dhar_base(pvt));
881 /* Display and decode various NB registers for debug purposes. */
882 static void __dump_misc_regs(struct amd64_pvt *pvt)
884 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
886 edac_dbg(1, " NB two channel DRAM capable: %s\n",
887 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
889 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
890 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
891 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
893 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
895 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
897 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
898 pvt->dhar, dhar_base(pvt),
899 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
900 : f10_dhar_offset(pvt));
902 debug_display_dimm_sizes(pvt, 0);
904 /* everything below this point is Fam10h and above */
908 debug_display_dimm_sizes(pvt, 1);
910 /* Only if NOT ganged does dclr1 have valid info */
911 if (!dct_ganging_enabled(pvt))
912 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
915 /* Display and decode various NB registers for debug purposes. */
916 static void dump_misc_regs(struct amd64_pvt *pvt)
919 __dump_misc_regs_df(pvt);
921 __dump_misc_regs(pvt);
923 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
925 amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
929 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
931 static void prep_chip_selects(struct amd64_pvt *pvt)
933 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
934 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
935 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
936 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
937 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
938 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
939 } else if (pvt->fam >= 0x17) {
943 pvt->csels[umc].b_cnt = 4;
944 pvt->csels[umc].m_cnt = 2;
948 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
949 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
953 static void read_umc_base_mask(struct amd64_pvt *pvt)
955 u32 umc_base_reg, umc_base_reg_sec;
956 u32 umc_mask_reg, umc_mask_reg_sec;
957 u32 base_reg, base_reg_sec;
958 u32 mask_reg, mask_reg_sec;
959 u32 *base, *base_sec;
960 u32 *mask, *mask_sec;
964 umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
965 umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
967 for_each_chip_select(cs, umc, pvt) {
968 base = &pvt->csels[umc].csbases[cs];
969 base_sec = &pvt->csels[umc].csbases_sec[cs];
971 base_reg = umc_base_reg + (cs * 4);
972 base_reg_sec = umc_base_reg_sec + (cs * 4);
974 if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
975 edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
976 umc, cs, *base, base_reg);
978 if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
979 edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
980 umc, cs, *base_sec, base_reg_sec);
983 umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
984 umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
986 for_each_chip_select_mask(cs, umc, pvt) {
987 mask = &pvt->csels[umc].csmasks[cs];
988 mask_sec = &pvt->csels[umc].csmasks_sec[cs];
990 mask_reg = umc_mask_reg + (cs * 4);
991 mask_reg_sec = umc_mask_reg_sec + (cs * 4);
993 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
994 edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
995 umc, cs, *mask, mask_reg);
997 if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
998 edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
999 umc, cs, *mask_sec, mask_reg_sec);
1005 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
1007 static void read_dct_base_mask(struct amd64_pvt *pvt)
1011 prep_chip_selects(pvt);
1014 return read_umc_base_mask(pvt);
1016 for_each_chip_select(cs, 0, pvt) {
1017 int reg0 = DCSB0 + (cs * 4);
1018 int reg1 = DCSB1 + (cs * 4);
1019 u32 *base0 = &pvt->csels[0].csbases[cs];
1020 u32 *base1 = &pvt->csels[1].csbases[cs];
1022 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1023 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
1026 if (pvt->fam == 0xf)
1029 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1030 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
1031 cs, *base1, (pvt->fam == 0x10) ? reg1
1035 for_each_chip_select_mask(cs, 0, pvt) {
1036 int reg0 = DCSM0 + (cs * 4);
1037 int reg1 = DCSM1 + (cs * 4);
1038 u32 *mask0 = &pvt->csels[0].csmasks[cs];
1039 u32 *mask1 = &pvt->csels[1].csmasks[cs];
1041 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1042 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
1045 if (pvt->fam == 0xf)
1048 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1049 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
1050 cs, *mask1, (pvt->fam == 0x10) ? reg1
1055 static void determine_memory_type(struct amd64_pvt *pvt)
1057 u32 dram_ctrl, dcsm;
1061 if (pvt->ext_model >= K8_REV_F)
1064 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1068 if (pvt->dchr0 & DDR3_MODE)
1071 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1075 if (pvt->model < 0x60)
1079 * Model 0x60h needs special handling:
1081 * We use a Chip Select value of '0' to obtain dcsm.
1082 * Theoretically, it is possible to populate LRDIMMs of different
1083 * 'Rank' value on a DCT. But this is not the common case. So,
1084 * it's reasonable to assume all DIMMs are going to be of same
1085 * 'type' until proven otherwise.
1087 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1088 dcsm = pvt->csels[0].csmasks[0];
1090 if (((dram_ctrl >> 8) & 0x7) == 0x2)
1091 pvt->dram_type = MEM_DDR4;
1092 else if (pvt->dclr0 & BIT(16))
1093 pvt->dram_type = MEM_DDR3;
1094 else if (dcsm & 0x3)
1095 pvt->dram_type = MEM_LRDDR3;
1097 pvt->dram_type = MEM_RDDR3;
1106 if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1107 pvt->dram_type = MEM_LRDDR4;
1108 else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1109 pvt->dram_type = MEM_RDDR4;
1111 pvt->dram_type = MEM_DDR4;
1115 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1116 pvt->dram_type = MEM_EMPTY;
1121 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1124 /* Get the number of DCT channels the memory controller is using. */
1125 static int k8_early_channel_count(struct amd64_pvt *pvt)
1129 if (pvt->ext_model >= K8_REV_F)
1130 /* RevF (NPT) and later */
1131 flag = pvt->dclr0 & WIDTH_128;
1133 /* RevE and earlier */
1134 flag = pvt->dclr0 & REVE_WIDTH_128;
1139 return (flag) ? 2 : 1;
1142 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1143 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1145 u16 mce_nid = amd_get_nb_id(m->extcpu);
1146 struct mem_ctl_info *mci;
1151 mci = edac_mc_find(mce_nid);
1155 pvt = mci->pvt_info;
1157 if (pvt->fam == 0xf) {
1162 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1165 * Erratum 637 workaround
1167 if (pvt->fam == 0x15) {
1168 u64 cc6_base, tmp_addr;
1172 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1176 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1177 intlv_en = tmp >> 21 & 0x7;
1179 /* add [47:27] + 3 trailing bits */
1180 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
1182 /* reverse and add DramIntlvEn */
1183 cc6_base |= intlv_en ^ 0x7;
1185 /* pin at [47:24] */
1189 return cc6_base | (addr & GENMASK_ULL(23, 0));
1191 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1194 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1196 /* OR DramIntlvSel into bits [14:12] */
1197 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1199 /* add remaining [11:0] bits from original MC4_ADDR */
1200 tmp_addr |= addr & GENMASK_ULL(11, 0);
1202 return cc6_base | tmp_addr;
1208 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1209 unsigned int device,
1210 struct pci_dev *related)
1212 struct pci_dev *dev = NULL;
1214 while ((dev = pci_get_device(vendor, device, dev))) {
1215 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1216 (dev->bus->number == related->bus->number) &&
1217 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1224 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1226 struct amd_northbridge *nb;
1227 struct pci_dev *f1 = NULL;
1228 unsigned int pci_func;
1229 int off = range << 3;
1232 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1233 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1235 if (pvt->fam == 0xf)
1238 if (!dram_rw(pvt, range))
1241 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1242 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1244 /* F15h: factor in CC6 save area by reading dst node's limit reg */
1245 if (pvt->fam != 0x15)
1248 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1252 if (pvt->model == 0x60)
1253 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1254 else if (pvt->model == 0x30)
1255 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1257 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1259 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1263 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1265 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1267 /* {[39:27],111b} */
1268 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1270 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1273 pvt->ranges[range].lim.hi |= llim >> 13;
1278 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1279 struct err_info *err)
1281 struct amd64_pvt *pvt = mci->pvt_info;
1283 error_address_to_page_and_offset(sys_addr, err);
1286 * Find out which node the error address belongs to. This may be
1287 * different from the node that detected the error.
1289 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1290 if (!err->src_mci) {
1291 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1292 (unsigned long)sys_addr);
1293 err->err_code = ERR_NODE;
1297 /* Now map the sys_addr to a CSROW */
1298 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1299 if (err->csrow < 0) {
1300 err->err_code = ERR_CSROW;
1304 /* CHIPKILL enabled */
1305 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1306 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1307 if (err->channel < 0) {
1309 * Syndrome didn't map, so we don't know which of the
1310 * 2 DIMMs is in error. So we need to ID 'both' of them
1313 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1314 "possible error reporting race\n",
1316 err->err_code = ERR_CHANNEL;
1321 * non-chipkill ecc mode
1323 * The k8 documentation is unclear about how to determine the
1324 * channel number when using non-chipkill memory. This method
1325 * was obtained from email communication with someone at AMD.
1326 * (Wish the email was placed in this comment - norsk)
1328 err->channel = ((sys_addr & BIT(3)) != 0);
1332 static int ddr2_cs_size(unsigned i, bool dct_width)
1338 else if (!(i & 0x1))
1341 shift = (i + 1) >> 1;
1343 return 128 << (shift + !!dct_width);
1346 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1347 unsigned cs_mode, int cs_mask_nr)
1349 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1351 if (pvt->ext_model >= K8_REV_F) {
1352 WARN_ON(cs_mode > 11);
1353 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1355 else if (pvt->ext_model >= K8_REV_D) {
1357 WARN_ON(cs_mode > 10);
1360 * the below calculation, besides trying to win an obfuscated C
1361 * contest, maps cs_mode values to DIMM chip select sizes. The
1364 * cs_mode CS size (mb)
1365 * ======= ============
1378 * Basically, it calculates a value with which to shift the
1379 * smallest CS size of 32MB.
1381 * ddr[23]_cs_size have a similar purpose.
1383 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1385 return 32 << (cs_mode - diff);
1388 WARN_ON(cs_mode > 6);
1389 return 32 << cs_mode;
1394 * Get the number of DCT channels in use.
1397 * number of Memory Channels in operation
1399 * contents of the DCL0_LOW register
1401 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1403 int i, j, channels = 0;
1405 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1406 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1410 * Need to check if in unganged mode: In such, there are 2 channels,
1411 * but they are not in 128 bit mode and thus the above 'dclr0' status
1414 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1415 * their CSEnable bit on. If so, then SINGLE DIMM case.
1417 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1420 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1421 * is more than just one DIMM present in unganged mode. Need to check
1422 * both controllers since DIMMs can be placed in either one.
1424 for (i = 0; i < 2; i++) {
1425 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1427 for (j = 0; j < 4; j++) {
1428 if (DBAM_DIMM(j, dbam) > 0) {
1438 amd64_info("MCT channel count: %d\n", channels);
1443 static int f17_early_channel_count(struct amd64_pvt *pvt)
1445 int i, channels = 0;
1447 /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1449 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1451 amd64_info("MCT channel count: %d\n", channels);
1456 static int ddr3_cs_size(unsigned i, bool dct_width)
1461 if (i == 0 || i == 3 || i == 4)
1467 else if (!(i & 0x1))
1470 shift = (i + 1) >> 1;
1473 cs_size = (128 * (1 << !!dct_width)) << shift;
1478 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1483 if (i < 4 || i == 6)
1487 else if (!(i & 0x1))
1490 shift = (i + 1) >> 1;
1493 cs_size = rank_multiply * (128 << shift);
1498 static int ddr4_cs_size(unsigned i)
1507 /* Min cs_size = 1G */
1508 cs_size = 1024 * (1 << (i >> 1));
1513 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1514 unsigned cs_mode, int cs_mask_nr)
1516 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1518 WARN_ON(cs_mode > 11);
1520 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1521 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1523 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1527 * F15h supports only 64bit DCT interfaces
1529 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1530 unsigned cs_mode, int cs_mask_nr)
1532 WARN_ON(cs_mode > 12);
1534 return ddr3_cs_size(cs_mode, false);
1537 /* F15h M60h supports DDR4 mapping as well.. */
1538 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1539 unsigned cs_mode, int cs_mask_nr)
1542 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1544 WARN_ON(cs_mode > 12);
1546 if (pvt->dram_type == MEM_DDR4) {
1550 cs_size = ddr4_cs_size(cs_mode);
1551 } else if (pvt->dram_type == MEM_LRDDR3) {
1552 unsigned rank_multiply = dcsm & 0xf;
1554 if (rank_multiply == 3)
1556 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1558 /* Minimum cs size is 512mb for F15hM60h*/
1562 cs_size = ddr3_cs_size(cs_mode, false);
1569 * F16h and F15h model 30h have only limited cs_modes.
1571 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1572 unsigned cs_mode, int cs_mask_nr)
1574 WARN_ON(cs_mode > 12);
1576 if (cs_mode == 6 || cs_mode == 8 ||
1577 cs_mode == 9 || cs_mode == 12)
1580 return ddr3_cs_size(cs_mode, false);
1583 static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1584 unsigned int cs_mode, int csrow_nr)
1586 u32 addr_mask_orig, addr_mask_deinterleaved;
1587 u32 msb, weight, num_zero_bits;
1590 /* No Chip Selects are enabled. */
1594 /* Requested size of an even CS but none are enabled. */
1595 if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1598 /* Requested size of an odd CS but none are enabled. */
1599 if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1603 * There is one mask per DIMM, and two Chip Selects per DIMM.
1604 * CS0 and CS1 -> DIMM0
1605 * CS2 and CS3 -> DIMM1
1607 dimm = csrow_nr >> 1;
1609 /* Asymmetric dual-rank DIMM support. */
1610 if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1611 addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
1613 addr_mask_orig = pvt->csels[umc].csmasks[dimm];
1616 * The number of zero bits in the mask is equal to the number of bits
1617 * in a full mask minus the number of bits in the current mask.
1619 * The MSB is the number of bits in the full mask because BIT[0] is
1622 msb = fls(addr_mask_orig) - 1;
1623 weight = hweight_long(addr_mask_orig);
1624 num_zero_bits = msb - weight;
1626 /* Take the number of zero bits off from the top of the mask. */
1627 addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1629 edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1630 edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
1631 edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1633 /* Register [31:1] = Address [39:9]. Size is in kBs here. */
1634 size = (addr_mask_deinterleaved >> 2) + 1;
1636 /* Return size in MBs. */
1640 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1643 if (pvt->fam == 0xf)
1646 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1647 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1648 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1650 edac_dbg(0, " DCTs operate in %s mode\n",
1651 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1653 if (!dct_ganging_enabled(pvt))
1654 edac_dbg(0, " Address range split per DCT: %s\n",
1655 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1657 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1658 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1659 (dct_memory_cleared(pvt) ? "yes" : "no"));
1661 edac_dbg(0, " channel interleave: %s, "
1662 "interleave bits selector: 0x%x\n",
1663 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1664 dct_sel_interleave_addr(pvt));
1667 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1671 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1672 * 2.10.12 Memory Interleaving Modes).
1674 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1675 u8 intlv_en, int num_dcts_intlv,
1682 return (u8)(dct_sel);
1684 if (num_dcts_intlv == 2) {
1685 select = (sys_addr >> 8) & 0x3;
1686 channel = select ? 0x3 : 0;
1687 } else if (num_dcts_intlv == 4) {
1688 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1689 switch (intlv_addr) {
1691 channel = (sys_addr >> 8) & 0x3;
1694 channel = (sys_addr >> 9) & 0x3;
1702 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1703 * Interleaving Modes.
1705 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1706 bool hi_range_sel, u8 intlv_en)
1708 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1710 if (dct_ganging_enabled(pvt))
1714 return dct_sel_high;
1717 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1719 if (dct_interleave_enabled(pvt)) {
1720 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1722 /* return DCT select function: 0=DCT0, 1=DCT1 */
1724 return sys_addr >> 6 & 1;
1726 if (intlv_addr & 0x2) {
1727 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1728 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1730 return ((sys_addr >> shift) & 1) ^ temp;
1733 if (intlv_addr & 0x4) {
1734 u8 shift = intlv_addr & 0x1 ? 9 : 8;
1736 return (sys_addr >> shift) & 1;
1739 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1742 if (dct_high_range_enabled(pvt))
1743 return ~dct_sel_high & 1;
1748 /* Convert the sys_addr to the normalized DCT address */
1749 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1750 u64 sys_addr, bool hi_rng,
1751 u32 dct_sel_base_addr)
1754 u64 dram_base = get_dram_base(pvt, range);
1755 u64 hole_off = f10_dhar_offset(pvt);
1756 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1761 * base address of high range is below 4Gb
1762 * (bits [47:27] at [31:11])
1763 * DRAM address space on this DCT is hoisted above 4Gb &&
1766 * remove hole offset from sys_addr
1768 * remove high range offset from sys_addr
1770 if ((!(dct_sel_base_addr >> 16) ||
1771 dct_sel_base_addr < dhar_base(pvt)) &&
1773 (sys_addr >= BIT_64(32)))
1774 chan_off = hole_off;
1776 chan_off = dct_sel_base_off;
1780 * we have a valid hole &&
1785 * remove dram base to normalize to DCT address
1787 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1788 chan_off = hole_off;
1790 chan_off = dram_base;
1793 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1797 * checks if the csrow passed in is marked as SPARED, if so returns the new
1800 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1804 if (online_spare_swap_done(pvt, dct) &&
1805 csrow == online_spare_bad_dramcs(pvt, dct)) {
1807 for_each_chip_select(tmp_cs, dct, pvt) {
1808 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1818 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1819 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1822 * -EINVAL: NOT FOUND
1823 * 0..csrow = Chip-Select Row
1825 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1827 struct mem_ctl_info *mci;
1828 struct amd64_pvt *pvt;
1829 u64 cs_base, cs_mask;
1830 int cs_found = -EINVAL;
1833 mci = edac_mc_find(nid);
1837 pvt = mci->pvt_info;
1839 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1841 for_each_chip_select(csrow, dct, pvt) {
1842 if (!csrow_enabled(csrow, dct, pvt))
1845 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1847 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1848 csrow, cs_base, cs_mask);
1852 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1853 (in_addr & cs_mask), (cs_base & cs_mask));
1855 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1856 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1860 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1862 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1870 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1871 * swapped with a region located at the bottom of memory so that the GPU can use
1872 * the interleaved region and thus two channels.
1874 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1876 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1878 if (pvt->fam == 0x10) {
1879 /* only revC3 and revE have that feature */
1880 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1884 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1886 if (!(swap_reg & 0x1))
1889 swap_base = (swap_reg >> 3) & 0x7f;
1890 swap_limit = (swap_reg >> 11) & 0x7f;
1891 rgn_size = (swap_reg >> 20) & 0x7f;
1892 tmp_addr = sys_addr >> 27;
1894 if (!(sys_addr >> 34) &&
1895 (((tmp_addr >= swap_base) &&
1896 (tmp_addr <= swap_limit)) ||
1897 (tmp_addr < rgn_size)))
1898 return sys_addr ^ (u64)swap_base << 27;
1903 /* For a given @dram_range, check if @sys_addr falls within it. */
1904 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1905 u64 sys_addr, int *chan_sel)
1907 int cs_found = -EINVAL;
1911 bool high_range = false;
1913 u8 node_id = dram_dst_node(pvt, range);
1914 u8 intlv_en = dram_intlv_en(pvt, range);
1915 u32 intlv_sel = dram_intlv_sel(pvt, range);
1917 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1918 range, sys_addr, get_dram_limit(pvt, range));
1920 if (dhar_valid(pvt) &&
1921 dhar_base(pvt) <= sys_addr &&
1922 sys_addr < BIT_64(32)) {
1923 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1928 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1931 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1933 dct_sel_base = dct_sel_baseaddr(pvt);
1936 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1937 * select between DCT0 and DCT1.
1939 if (dct_high_range_enabled(pvt) &&
1940 !dct_ganging_enabled(pvt) &&
1941 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1944 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1946 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1947 high_range, dct_sel_base);
1949 /* Remove node interleaving, see F1x120 */
1951 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1952 (chan_addr & 0xfff);
1954 /* remove channel interleave */
1955 if (dct_interleave_enabled(pvt) &&
1956 !dct_high_range_enabled(pvt) &&
1957 !dct_ganging_enabled(pvt)) {
1959 if (dct_sel_interleave_addr(pvt) != 1) {
1960 if (dct_sel_interleave_addr(pvt) == 0x3)
1962 chan_addr = ((chan_addr >> 10) << 9) |
1963 (chan_addr & 0x1ff);
1965 /* A[6] or hash 6 */
1966 chan_addr = ((chan_addr >> 7) << 6) |
1970 chan_addr = ((chan_addr >> 13) << 12) |
1971 (chan_addr & 0xfff);
1974 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1976 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1979 *chan_sel = channel;
1984 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1985 u64 sys_addr, int *chan_sel)
1987 int cs_found = -EINVAL;
1988 int num_dcts_intlv = 0;
1989 u64 chan_addr, chan_offset;
1990 u64 dct_base, dct_limit;
1991 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1992 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1994 u64 dhar_offset = f10_dhar_offset(pvt);
1995 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1996 u8 node_id = dram_dst_node(pvt, range);
1997 u8 intlv_en = dram_intlv_en(pvt, range);
1999 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
2000 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2002 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
2003 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
2005 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2006 range, sys_addr, get_dram_limit(pvt, range));
2008 if (!(get_dram_base(pvt, range) <= sys_addr) &&
2009 !(get_dram_limit(pvt, range) >= sys_addr))
2012 if (dhar_valid(pvt) &&
2013 dhar_base(pvt) <= sys_addr &&
2014 sys_addr < BIT_64(32)) {
2015 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2020 /* Verify sys_addr is within DCT Range. */
2021 dct_base = (u64) dct_sel_baseaddr(pvt);
2022 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2024 if (!(dct_cont_base_reg & BIT(0)) &&
2025 !(dct_base <= (sys_addr >> 27) &&
2026 dct_limit >= (sys_addr >> 27)))
2029 /* Verify number of dct's that participate in channel interleaving. */
2030 num_dcts_intlv = (int) hweight8(intlv_en);
2032 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2035 if (pvt->model >= 0x60)
2036 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2038 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2039 num_dcts_intlv, dct_sel);
2041 /* Verify we stay within the MAX number of channels allowed */
2045 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2047 /* Get normalized DCT addr */
2048 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2049 chan_offset = dhar_offset;
2051 chan_offset = dct_base << 27;
2053 chan_addr = sys_addr - chan_offset;
2055 /* remove channel interleave */
2056 if (num_dcts_intlv == 2) {
2057 if (intlv_addr == 0x4)
2058 chan_addr = ((chan_addr >> 9) << 8) |
2060 else if (intlv_addr == 0x5)
2061 chan_addr = ((chan_addr >> 10) << 9) |
2062 (chan_addr & 0x1ff);
2066 } else if (num_dcts_intlv == 4) {
2067 if (intlv_addr == 0x4)
2068 chan_addr = ((chan_addr >> 10) << 8) |
2070 else if (intlv_addr == 0x5)
2071 chan_addr = ((chan_addr >> 11) << 9) |
2072 (chan_addr & 0x1ff);
2077 if (dct_offset_en) {
2078 amd64_read_pci_cfg(pvt->F1,
2079 DRAM_CONT_HIGH_OFF + (int) channel * 4,
2081 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
2084 f15h_select_dct(pvt, channel);
2086 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2090 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2091 * there is support for 4 DCT's, but only 2 are currently functional.
2092 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2093 * pvt->csels[1]. So we need to use '1' here to get correct info.
2094 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2096 alias_channel = (channel == 3) ? 1 : channel;
2098 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2101 *chan_sel = alias_channel;
2106 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2110 int cs_found = -EINVAL;
2113 for (range = 0; range < DRAM_RANGES; range++) {
2114 if (!dram_rw(pvt, range))
2117 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2118 cs_found = f15_m30h_match_to_this_node(pvt, range,
2122 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2123 (get_dram_limit(pvt, range) >= sys_addr)) {
2124 cs_found = f1x_match_to_this_node(pvt, range,
2125 sys_addr, chan_sel);
2134 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2135 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2137 * The @sys_addr is usually an error address received from the hardware
2140 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2141 struct err_info *err)
2143 struct amd64_pvt *pvt = mci->pvt_info;
2145 error_address_to_page_and_offset(sys_addr, err);
2147 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2148 if (err->csrow < 0) {
2149 err->err_code = ERR_CSROW;
2154 * We need the syndromes for channel detection only when we're
2155 * ganged. Otherwise @chan should already contain the channel at
2158 if (dct_ganging_enabled(pvt))
2159 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2163 * debug routine to display the memory sizes of all logical DIMMs and its
2166 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2168 int dimm, size0, size1;
2169 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2170 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
2172 if (pvt->fam == 0xf) {
2173 /* K8 families < revF not supported yet */
2174 if (pvt->ext_model < K8_REV_F)
2180 if (pvt->fam == 0x10) {
2181 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2183 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2184 pvt->csels[1].csbases :
2185 pvt->csels[0].csbases;
2188 dcsb = pvt->csels[1].csbases;
2190 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2193 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2195 /* Dump memory sizes for DIMM and its CSROWs */
2196 for (dimm = 0; dimm < 4; dimm++) {
2199 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2201 * For F15m60h, we need multiplier for LRDIMM cs_size
2202 * calculation. We pass dimm value to the dbam_to_cs
2203 * mapper so we can find the multiplier from the
2204 * corresponding DCSM.
2206 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2207 DBAM_DIMM(dimm, dbam),
2211 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2212 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2213 DBAM_DIMM(dimm, dbam),
2216 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2218 dimm * 2 + 1, size1);
2222 static struct amd64_family_type family_types[] = {
2225 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2226 .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2228 .early_channel_count = k8_early_channel_count,
2229 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
2230 .dbam_to_cs = k8_dbam_to_chip_select,
2235 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2236 .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2238 .early_channel_count = f1x_early_channel_count,
2239 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2240 .dbam_to_cs = f10_dbam_to_chip_select,
2245 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2246 .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2248 .early_channel_count = f1x_early_channel_count,
2249 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2250 .dbam_to_cs = f15_dbam_to_chip_select,
2254 .ctl_name = "F15h_M30h",
2255 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2256 .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2258 .early_channel_count = f1x_early_channel_count,
2259 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2260 .dbam_to_cs = f16_dbam_to_chip_select,
2264 .ctl_name = "F15h_M60h",
2265 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2266 .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2268 .early_channel_count = f1x_early_channel_count,
2269 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2270 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
2275 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2276 .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2278 .early_channel_count = f1x_early_channel_count,
2279 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2280 .dbam_to_cs = f16_dbam_to_chip_select,
2284 .ctl_name = "F16h_M30h",
2285 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2286 .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2288 .early_channel_count = f1x_early_channel_count,
2289 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2290 .dbam_to_cs = f16_dbam_to_chip_select,
2295 .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2296 .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2298 .early_channel_count = f17_early_channel_count,
2299 .dbam_to_cs = f17_addr_mask_to_cs_size,
2303 .ctl_name = "F17h_M10h",
2304 .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2305 .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2307 .early_channel_count = f17_early_channel_count,
2308 .dbam_to_cs = f17_addr_mask_to_cs_size,
2312 .ctl_name = "F17h_M30h",
2313 .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2314 .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2316 .early_channel_count = f17_early_channel_count,
2317 .dbam_to_cs = f17_addr_mask_to_cs_size,
2321 .ctl_name = "F17h_M70h",
2322 .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
2323 .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
2325 .early_channel_count = f17_early_channel_count,
2326 .dbam_to_cs = f17_addr_mask_to_cs_size,
2332 * These are tables of eigenvectors (one per line) which can be used for the
2333 * construction of the syndrome tables. The modified syndrome search algorithm
2334 * uses those to find the symbol in error and thus the DIMM.
2336 * Algorithm courtesy of Ross LaFetra from AMD.
2338 static const u16 x4_vectors[] = {
2339 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2340 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2341 0x0001, 0x0002, 0x0004, 0x0008,
2342 0x1013, 0x3032, 0x4044, 0x8088,
2343 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2344 0x4857, 0xc4fe, 0x13cc, 0x3288,
2345 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2346 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2347 0x15c1, 0x2a42, 0x89ac, 0x4758,
2348 0x2b03, 0x1602, 0x4f0c, 0xca08,
2349 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2350 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2351 0x2b87, 0x164e, 0x642c, 0xdc18,
2352 0x40b9, 0x80de, 0x1094, 0x20e8,
2353 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2354 0x11c1, 0x2242, 0x84ac, 0x4c58,
2355 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2356 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2357 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2358 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2359 0x16b3, 0x3d62, 0x4f34, 0x8518,
2360 0x1e2f, 0x391a, 0x5cac, 0xf858,
2361 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2362 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2363 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2364 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2365 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2366 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2367 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2368 0x185d, 0x2ca6, 0x7914, 0x9e28,
2369 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2370 0x4199, 0x82ee, 0x19f4, 0x2e58,
2371 0x4807, 0xc40e, 0x130c, 0x3208,
2372 0x1905, 0x2e0a, 0x5804, 0xac08,
2373 0x213f, 0x132a, 0xadfc, 0x5ba8,
2374 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2377 static const u16 x8_vectors[] = {
2378 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2379 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2380 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2381 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2382 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2383 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2384 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2385 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2386 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2387 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2388 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2389 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2390 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2391 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2392 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2393 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2394 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2395 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2396 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2399 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2402 unsigned int i, err_sym;
2404 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2406 unsigned v_idx = err_sym * v_dim;
2407 unsigned v_end = (err_sym + 1) * v_dim;
2409 /* walk over all 16 bits of the syndrome */
2410 for (i = 1; i < (1U << 16); i <<= 1) {
2412 /* if bit is set in that eigenvector... */
2413 if (v_idx < v_end && vectors[v_idx] & i) {
2414 u16 ev_comp = vectors[v_idx++];
2416 /* ... and bit set in the modified syndrome, */
2426 /* can't get to zero, move to next symbol */
2431 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2435 static int map_err_sym_to_channel(int err_sym, int sym_size)
2448 return err_sym >> 4;
2454 /* imaginary bits not in a DIMM */
2456 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2468 return err_sym >> 3;
2474 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2476 struct amd64_pvt *pvt = mci->pvt_info;
2479 if (pvt->ecc_sym_sz == 8)
2480 err_sym = decode_syndrome(syndrome, x8_vectors,
2481 ARRAY_SIZE(x8_vectors),
2483 else if (pvt->ecc_sym_sz == 4)
2484 err_sym = decode_syndrome(syndrome, x4_vectors,
2485 ARRAY_SIZE(x4_vectors),
2488 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2492 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2495 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2498 enum hw_event_mc_err_type err_type;
2502 err_type = HW_EVENT_ERR_CORRECTED;
2503 else if (ecc_type == 1)
2504 err_type = HW_EVENT_ERR_UNCORRECTED;
2505 else if (ecc_type == 3)
2506 err_type = HW_EVENT_ERR_DEFERRED;
2508 WARN(1, "Something is rotten in the state of Denmark.\n");
2512 switch (err->err_code) {
2517 string = "Failed to map error addr to a node";
2520 string = "Failed to map error addr to a csrow";
2523 string = "Unknown syndrome - possible error reporting race";
2526 string = "MCA_SYND not valid - unknown syndrome and csrow";
2529 string = "Cannot decode normalized address";
2532 string = "WTF error";
2536 edac_mc_handle_error(err_type, mci, 1,
2537 err->page, err->offset, err->syndrome,
2538 err->csrow, err->channel, -1,
2542 static inline void decode_bus_error(int node_id, struct mce *m)
2544 struct mem_ctl_info *mci;
2545 struct amd64_pvt *pvt;
2546 u8 ecc_type = (m->status >> 45) & 0x3;
2547 u8 xec = XEC(m->status, 0x1f);
2548 u16 ec = EC(m->status);
2550 struct err_info err;
2552 mci = edac_mc_find(node_id);
2556 pvt = mci->pvt_info;
2558 /* Bail out early if this was an 'observed' error */
2559 if (PP(ec) == NBSL_PP_OBS)
2562 /* Do only ECC errors */
2563 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2566 memset(&err, 0, sizeof(err));
2568 sys_addr = get_error_address(pvt, m);
2571 err.syndrome = extract_syndrome(m->status);
2573 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2575 __log_ecc_error(mci, &err, ecc_type);
2579 * To find the UMC channel represented by this bank we need to match on its
2580 * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2583 * Currently, we can derive the channel number by looking at the 6th nibble in
2584 * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
2587 static int find_umc_channel(struct mce *m)
2589 return (m->ipid & GENMASK(31, 0)) >> 20;
2592 static void decode_umc_error(int node_id, struct mce *m)
2594 u8 ecc_type = (m->status >> 45) & 0x3;
2595 struct mem_ctl_info *mci;
2596 struct amd64_pvt *pvt;
2597 struct err_info err;
2600 mci = edac_mc_find(node_id);
2604 pvt = mci->pvt_info;
2606 memset(&err, 0, sizeof(err));
2608 if (m->status & MCI_STATUS_DEFERRED)
2611 err.channel = find_umc_channel(m);
2613 if (!(m->status & MCI_STATUS_SYNDV)) {
2614 err.err_code = ERR_SYND;
2618 if (ecc_type == 2) {
2619 u8 length = (m->synd >> 18) & 0x3f;
2622 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2624 err.err_code = ERR_CHANNEL;
2627 err.csrow = m->synd & 0x7;
2629 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2630 err.err_code = ERR_NORM_ADDR;
2634 error_address_to_page_and_offset(sys_addr, &err);
2637 __log_ecc_error(mci, &err, ecc_type);
2641 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2642 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2643 * Reserve F0 and F6 on systems with a UMC.
2646 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2649 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2651 amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
2655 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2657 pci_dev_put(pvt->F0);
2660 amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2664 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2665 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2666 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2671 /* Reserve the ADDRESS MAP Device */
2672 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2674 amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
2678 /* Reserve the DCT Device */
2679 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2681 pci_dev_put(pvt->F1);
2684 amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2688 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2689 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2690 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2695 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2698 pci_dev_put(pvt->F0);
2699 pci_dev_put(pvt->F6);
2701 pci_dev_put(pvt->F1);
2702 pci_dev_put(pvt->F2);
2706 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2708 pvt->ecc_sym_sz = 4;
2714 /* Check enabled channels only: */
2715 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
2716 if (pvt->umc[i].ecc_ctrl & BIT(9)) {
2717 pvt->ecc_sym_sz = 16;
2719 } else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
2720 pvt->ecc_sym_sz = 8;
2725 } else if (pvt->fam >= 0x10) {
2728 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2729 /* F16h has only DCT0, so no need to read dbam1. */
2730 if (pvt->fam != 0x16)
2731 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2733 /* F10h, revD and later can do x8 ECC too. */
2734 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2735 pvt->ecc_sym_sz = 8;
2740 * Retrieve the hardware registers of the memory controller.
2742 static void __read_mc_regs_df(struct amd64_pvt *pvt)
2744 u8 nid = pvt->mc_node_id;
2745 struct amd64_umc *umc;
2748 /* Read registers from each UMC */
2751 umc_base = get_umc_base(i);
2754 amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
2755 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
2756 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2757 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
2758 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
2763 * Retrieve the hardware registers of the memory controller (this includes the
2764 * 'Address Map' and 'Misc' device regs)
2766 static void read_mc_regs(struct amd64_pvt *pvt)
2772 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2773 * those are Read-As-Zero.
2775 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2776 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2778 /* Check first whether TOP_MEM2 is enabled: */
2779 rdmsrl(MSR_K8_SYSCFG, msr_val);
2780 if (msr_val & BIT(21)) {
2781 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2782 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2784 edac_dbg(0, " TOP_MEM2 disabled\n");
2788 __read_mc_regs_df(pvt);
2789 amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2794 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2796 read_dram_ctl_register(pvt);
2798 for (range = 0; range < DRAM_RANGES; range++) {
2801 /* read settings for this DRAM range */
2802 read_dram_base_limit_regs(pvt, range);
2804 rw = dram_rw(pvt, range);
2808 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2810 get_dram_base(pvt, range),
2811 get_dram_limit(pvt, range));
2813 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2814 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2815 (rw & 0x1) ? "R" : "-",
2816 (rw & 0x2) ? "W" : "-",
2817 dram_intlv_sel(pvt, range),
2818 dram_dst_node(pvt, range));
2821 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2822 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2824 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2826 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2827 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2829 if (!dct_ganging_enabled(pvt)) {
2830 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2831 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2835 read_dct_base_mask(pvt);
2837 determine_memory_type(pvt);
2838 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2840 determine_ecc_sym_sz(pvt);
2842 dump_misc_regs(pvt);
2846 * NOTE: CPU Revision Dependent code
2849 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2850 * k8 private pointer to -->
2851 * DRAM Bank Address mapping register
2853 * DCL register where dual_channel_active is
2855 * The DBAM register consists of 4 sets of 4 bits each definitions:
2858 * 0-3 CSROWs 0 and 1
2859 * 4-7 CSROWs 2 and 3
2860 * 8-11 CSROWs 4 and 5
2861 * 12-15 CSROWs 6 and 7
2863 * Values range from: 0 to 15
2864 * The meaning of the values depends on CPU revision and dual-channel state,
2865 * see relevant BKDG more info.
2867 * The memory controller provides for total of only 8 CSROWs in its current
2868 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2869 * single channel or two (2) DIMMs in dual channel mode.
2871 * The following code logic collapses the various tables for CSROW based on CPU
2875 * The number of PAGE_SIZE pages on the specified CSROW number it
2879 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2881 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2882 int csrow_nr = csrow_nr_orig;
2883 u32 cs_mode, nr_pages;
2887 cs_mode = DBAM_DIMM(csrow_nr, dbam);
2889 cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
2892 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2893 nr_pages <<= 20 - PAGE_SHIFT;
2895 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2896 csrow_nr_orig, dct, cs_mode);
2897 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2902 static int init_csrows_df(struct mem_ctl_info *mci)
2904 struct amd64_pvt *pvt = mci->pvt_info;
2905 enum edac_type edac_mode = EDAC_NONE;
2906 enum dev_type dev_type = DEV_UNKNOWN;
2907 struct dimm_info *dimm;
2911 if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
2912 edac_mode = EDAC_S16ECD16ED;
2914 } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
2915 edac_mode = EDAC_S8ECD8ED;
2917 } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
2918 edac_mode = EDAC_S4ECD4ED;
2920 } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
2921 edac_mode = EDAC_SECDED;
2925 for_each_chip_select(cs, umc, pvt) {
2926 if (!csrow_enabled(cs, umc, pvt))
2930 dimm = mci->csrows[cs]->channels[umc]->dimm;
2932 edac_dbg(1, "MC node: %d, csrow: %d\n",
2933 pvt->mc_node_id, cs);
2935 dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
2936 dimm->mtype = pvt->dram_type;
2937 dimm->edac_mode = edac_mode;
2938 dimm->dtype = dev_type;
2946 * Initialize the array of csrow attribute instances, based on the values
2947 * from pci config hardware registers.
2949 static int init_csrows(struct mem_ctl_info *mci)
2951 struct amd64_pvt *pvt = mci->pvt_info;
2952 enum edac_type edac_mode = EDAC_NONE;
2953 struct csrow_info *csrow;
2954 struct dimm_info *dimm;
2955 int i, j, empty = 1;
2960 return init_csrows_df(mci);
2962 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2966 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2967 pvt->mc_node_id, val,
2968 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2971 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2973 for_each_chip_select(i, 0, pvt) {
2974 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2975 bool row_dct1 = false;
2977 if (pvt->fam != 0xf)
2978 row_dct1 = !!csrow_enabled(i, 1, pvt);
2980 if (!row_dct0 && !row_dct1)
2983 csrow = mci->csrows[i];
2986 edac_dbg(1, "MC node: %d, csrow: %d\n",
2987 pvt->mc_node_id, i);
2990 nr_pages = get_csrow_nr_pages(pvt, 0, i);
2991 csrow->channels[0]->dimm->nr_pages = nr_pages;
2994 /* K8 has only one DCT */
2995 if (pvt->fam != 0xf && row_dct1) {
2996 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2998 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2999 nr_pages += row_dct1_pages;
3002 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3004 /* Determine DIMM ECC mode: */
3005 if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3006 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3011 for (j = 0; j < pvt->channel_count; j++) {
3012 dimm = csrow->channels[j]->dimm;
3013 dimm->mtype = pvt->dram_type;
3014 dimm->edac_mode = edac_mode;
3021 /* get all cores on this DCT */
3022 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3026 for_each_online_cpu(cpu)
3027 if (amd_get_nb_id(cpu) == nid)
3028 cpumask_set_cpu(cpu, mask);
3031 /* check MCG_CTL on all the cpus on this node */
3032 static bool nb_mce_bank_enabled_on_node(u16 nid)
3038 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3039 amd64_warn("%s: Error allocating mask\n", __func__);
3043 get_cpus_on_this_dct_cpumask(mask, nid);
3045 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3047 for_each_cpu(cpu, mask) {
3048 struct msr *reg = per_cpu_ptr(msrs, cpu);
3049 nbe = reg->l & MSR_MCGCTL_NBE;
3051 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3053 (nbe ? "enabled" : "disabled"));
3061 free_cpumask_var(mask);
3065 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3067 cpumask_var_t cmask;
3070 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3071 amd64_warn("%s: error allocating mask\n", __func__);
3075 get_cpus_on_this_dct_cpumask(cmask, nid);
3077 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3079 for_each_cpu(cpu, cmask) {
3081 struct msr *reg = per_cpu_ptr(msrs, cpu);
3084 if (reg->l & MSR_MCGCTL_NBE)
3085 s->flags.nb_mce_enable = 1;
3087 reg->l |= MSR_MCGCTL_NBE;
3090 * Turn off NB MCE reporting only when it was off before
3092 if (!s->flags.nb_mce_enable)
3093 reg->l &= ~MSR_MCGCTL_NBE;
3096 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3098 free_cpumask_var(cmask);
3103 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3107 u32 value, mask = 0x3; /* UECC/CECC enable */
3109 if (toggle_ecc_err_reporting(s, nid, ON)) {
3110 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3114 amd64_read_pci_cfg(F3, NBCTL, &value);
3116 s->old_nbctl = value & mask;
3117 s->nbctl_valid = true;
3120 amd64_write_pci_cfg(F3, NBCTL, value);
3122 amd64_read_pci_cfg(F3, NBCFG, &value);
3124 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3125 nid, value, !!(value & NBCFG_ECC_ENABLE));
3127 if (!(value & NBCFG_ECC_ENABLE)) {
3128 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3130 s->flags.nb_ecc_prev = 0;
3132 /* Attempt to turn on DRAM ECC Enable */
3133 value |= NBCFG_ECC_ENABLE;
3134 amd64_write_pci_cfg(F3, NBCFG, value);
3136 amd64_read_pci_cfg(F3, NBCFG, &value);
3138 if (!(value & NBCFG_ECC_ENABLE)) {
3139 amd64_warn("Hardware rejected DRAM ECC enable,"
3140 "check memory DIMM configuration.\n");
3143 amd64_info("Hardware accepted DRAM ECC Enable\n");
3146 s->flags.nb_ecc_prev = 1;
3149 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3150 nid, value, !!(value & NBCFG_ECC_ENABLE));
3155 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3158 u32 value, mask = 0x3; /* UECC/CECC enable */
3160 if (!s->nbctl_valid)
3163 amd64_read_pci_cfg(F3, NBCTL, &value);
3165 value |= s->old_nbctl;
3167 amd64_write_pci_cfg(F3, NBCTL, value);
3169 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3170 if (!s->flags.nb_ecc_prev) {
3171 amd64_read_pci_cfg(F3, NBCFG, &value);
3172 value &= ~NBCFG_ECC_ENABLE;
3173 amd64_write_pci_cfg(F3, NBCFG, value);
3176 /* restore the NB Enable MCGCTL bit */
3177 if (toggle_ecc_err_reporting(s, nid, OFF))
3178 amd64_warn("Error restoring NB MCGCTL settings!\n");
3182 * EDAC requires that the BIOS have ECC enabled before
3183 * taking over the processing of ECC errors. A command line
3184 * option allows to force-enable hardware ECC later in
3185 * enable_ecc_error_reporting().
3187 static const char *ecc_msg =
3188 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
3189 " Either enable ECC checking or force module loading by setting "
3190 "'ecc_enable_override'.\n"
3191 " (Note that use of the override may cause unknown side effects.)\n";
3193 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
3195 bool nb_mce_en = false;
3199 if (boot_cpu_data.x86 >= 0x17) {
3200 u8 umc_en_mask = 0, ecc_en_mask = 0;
3203 u32 base = get_umc_base(i);
3205 /* Only check enabled UMCs. */
3206 if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
3209 if (!(value & UMC_SDP_INIT))
3212 umc_en_mask |= BIT(i);
3214 if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
3217 if (value & UMC_ECC_ENABLED)
3218 ecc_en_mask |= BIT(i);
3221 /* Check whether at least one UMC is enabled: */
3223 ecc_en = umc_en_mask == ecc_en_mask;
3225 edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3227 /* Assume UMC MCA banks are enabled. */
3230 amd64_read_pci_cfg(F3, NBCFG, &value);
3232 ecc_en = !!(value & NBCFG_ECC_ENABLE);
3234 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3236 edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3237 MSR_IA32_MCG_CTL, nid);
3240 amd64_info("Node %d: DRAM ECC %s.\n",
3241 nid, (ecc_en ? "enabled" : "disabled"));
3243 if (!ecc_en || !nb_mce_en) {
3244 amd64_info("%s", ecc_msg);
3251 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3253 u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3256 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3257 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3258 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3260 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3261 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3265 /* Set chipkill only if ECC is enabled: */
3267 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3273 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3275 mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3277 mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3281 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
3282 struct amd64_family_type *fam)
3284 struct amd64_pvt *pvt = mci->pvt_info;
3286 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3287 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3290 f17h_determine_edac_ctl_cap(mci, pvt);
3292 if (pvt->nbcap & NBCAP_SECDED)
3293 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3295 if (pvt->nbcap & NBCAP_CHIPKILL)
3296 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3299 mci->edac_cap = determine_edac_cap(pvt);
3300 mci->mod_name = EDAC_MOD_STR;
3301 mci->ctl_name = fam->ctl_name;
3302 mci->dev_name = pci_name(pvt->F3);
3303 mci->ctl_page_to_phys = NULL;
3305 /* memory scrubber interface */
3306 mci->set_sdram_scrub_rate = set_scrub_rate;
3307 mci->get_sdram_scrub_rate = get_scrub_rate;
3311 * returns a pointer to the family descriptor on success, NULL otherwise.
3313 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3315 struct amd64_family_type *fam_type = NULL;
3317 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3318 pvt->stepping = boot_cpu_data.x86_stepping;
3319 pvt->model = boot_cpu_data.x86_model;
3320 pvt->fam = boot_cpu_data.x86;
3324 fam_type = &family_types[K8_CPUS];
3325 pvt->ops = &family_types[K8_CPUS].ops;
3329 fam_type = &family_types[F10_CPUS];
3330 pvt->ops = &family_types[F10_CPUS].ops;
3334 if (pvt->model == 0x30) {
3335 fam_type = &family_types[F15_M30H_CPUS];
3336 pvt->ops = &family_types[F15_M30H_CPUS].ops;
3338 } else if (pvt->model == 0x60) {
3339 fam_type = &family_types[F15_M60H_CPUS];
3340 pvt->ops = &family_types[F15_M60H_CPUS].ops;
3344 fam_type = &family_types[F15_CPUS];
3345 pvt->ops = &family_types[F15_CPUS].ops;
3349 if (pvt->model == 0x30) {
3350 fam_type = &family_types[F16_M30H_CPUS];
3351 pvt->ops = &family_types[F16_M30H_CPUS].ops;
3354 fam_type = &family_types[F16_CPUS];
3355 pvt->ops = &family_types[F16_CPUS].ops;
3359 if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3360 fam_type = &family_types[F17_M10H_CPUS];
3361 pvt->ops = &family_types[F17_M10H_CPUS].ops;
3363 } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3364 fam_type = &family_types[F17_M30H_CPUS];
3365 pvt->ops = &family_types[F17_M30H_CPUS].ops;
3367 } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
3368 fam_type = &family_types[F17_M70H_CPUS];
3369 pvt->ops = &family_types[F17_M70H_CPUS].ops;
3374 fam_type = &family_types[F17_CPUS];
3375 pvt->ops = &family_types[F17_CPUS].ops;
3377 if (pvt->fam == 0x18)
3378 family_types[F17_CPUS].ctl_name = "F18h";
3382 amd64_err("Unsupported family!\n");
3386 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
3388 (pvt->ext_model >= K8_REV_F ? "revF or later "
3389 : "revE or earlier ")
3390 : ""), pvt->mc_node_id);
3394 static const struct attribute_group *amd64_edac_attr_groups[] = {
3395 #ifdef CONFIG_EDAC_DEBUG
3396 &amd64_edac_dbg_group,
3398 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3399 &amd64_edac_inj_group,
3404 /* Set the number of Unified Memory Controllers in the system. */
3405 static void compute_num_umcs(void)
3407 u8 model = boot_cpu_data.x86_model;
3409 if (boot_cpu_data.x86 < 0x17)
3412 if (model >= 0x30 && model <= 0x3f)
3417 edac_dbg(1, "Number of UMCs: %x", num_umcs);
3420 static int init_one_instance(unsigned int nid)
3422 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3423 struct amd64_family_type *fam_type = NULL;
3424 struct mem_ctl_info *mci = NULL;
3425 struct edac_mc_layer layers[2];
3426 struct amd64_pvt *pvt = NULL;
3427 u16 pci_id1, pci_id2;
3431 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3435 pvt->mc_node_id = nid;
3439 fam_type = per_family_init(pvt);
3443 if (pvt->fam >= 0x17) {
3444 pvt->umc = kcalloc(num_umcs, sizeof(struct amd64_umc), GFP_KERNEL);
3450 pci_id1 = fam_type->f0_id;
3451 pci_id2 = fam_type->f6_id;
3453 pci_id1 = fam_type->f1_id;
3454 pci_id2 = fam_type->f2_id;
3457 err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3464 * We need to determine how many memory channels there are. Then use
3465 * that information for calculating the size of the dynamic instance
3466 * tables in the 'mci' structure.
3469 pvt->channel_count = pvt->ops->early_channel_count(pvt);
3470 if (pvt->channel_count < 0)
3474 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3475 layers[0].size = pvt->csels[0].b_cnt;
3476 layers[0].is_virt_csrow = true;
3477 layers[1].type = EDAC_MC_LAYER_CHANNEL;
3480 * Always allocate two channels since we can have setups with DIMMs on
3481 * only one channel. Also, this simplifies handling later for the price
3482 * of a couple of KBs tops.
3484 * On Fam17h+, the number of controllers may be greater than two. So set
3485 * the size equal to the maximum number of UMCs.
3487 if (pvt->fam >= 0x17)
3488 layers[1].size = num_umcs;
3491 layers[1].is_virt_csrow = false;
3493 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
3497 mci->pvt_info = pvt;
3498 mci->pdev = &pvt->F3->dev;
3500 setup_mci_misc_attrs(mci, fam_type);
3502 if (init_csrows(mci))
3503 mci->edac_cap = EDAC_FLAG_NONE;
3506 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3507 edac_dbg(1, "failed edac_mc_add_mc()\n");
3517 free_mc_sibling_devs(pvt);
3520 if (pvt->fam >= 0x17)
3530 static int probe_one_instance(unsigned int nid)
3532 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3533 struct ecc_settings *s;
3537 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3543 if (!ecc_enabled(F3, nid)) {
3546 if (!ecc_enable_override)
3549 if (boot_cpu_data.x86 >= 0x17) {
3550 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3553 amd64_warn("Forcing ECC on!\n");
3555 if (!enable_ecc_error_reporting(s, nid, F3))
3559 ret = init_one_instance(nid);
3561 amd64_err("Error probing instance: %d\n", nid);
3563 if (boot_cpu_data.x86 < 0x17)
3564 restore_ecc_error_reporting(s, nid, F3);
3573 ecc_stngs[nid] = NULL;
3579 static void remove_one_instance(unsigned int nid)
3581 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3582 struct ecc_settings *s = ecc_stngs[nid];
3583 struct mem_ctl_info *mci;
3584 struct amd64_pvt *pvt;
3586 mci = find_mci_by_dev(&F3->dev);
3589 /* Remove from EDAC CORE tracking list */
3590 mci = edac_mc_del_mc(&F3->dev);
3594 pvt = mci->pvt_info;
3596 restore_ecc_error_reporting(s, nid, F3);
3598 free_mc_sibling_devs(pvt);
3600 kfree(ecc_stngs[nid]);
3601 ecc_stngs[nid] = NULL;
3603 /* Free the EDAC CORE resources */
3604 mci->pvt_info = NULL;
3610 static void setup_pci_device(void)
3612 struct mem_ctl_info *mci;
3613 struct amd64_pvt *pvt;
3618 mci = edac_mc_find(0);
3622 pvt = mci->pvt_info;
3624 pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
3626 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
3628 pr_warn("%s(): Unable to create PCI control\n", __func__);
3629 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3633 static const struct x86_cpu_id amd64_cpuids[] = {
3634 { X86_VENDOR_AMD, 0xF, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3635 { X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3636 { X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3637 { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3638 { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3639 { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3642 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3644 static int __init amd64_edac_init(void)
3650 owner = edac_get_owner();
3651 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3654 if (!x86_match_cpu(amd64_cpuids))
3657 if (amd_cache_northbridges() < 0)
3663 ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
3667 msrs = msrs_alloc();
3673 for (i = 0; i < amd_nb_num(); i++) {
3674 err = probe_one_instance(i);
3676 /* unwind properly */
3678 remove_one_instance(i);
3684 if (!edac_has_mcs()) {
3689 /* register stuff with EDAC MCE */
3690 if (report_gart_errors)
3691 amd_report_gart_errors(true);
3693 if (boot_cpu_data.x86 >= 0x17)
3694 amd_register_ecc_decoder(decode_umc_error);
3696 amd_register_ecc_decoder(decode_bus_error);
3700 #ifdef CONFIG_X86_32
3701 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3704 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3719 static void __exit amd64_edac_exit(void)
3724 edac_pci_release_generic_ctl(pci_ctl);
3726 /* unregister from EDAC MCE */
3727 amd_report_gart_errors(false);
3729 if (boot_cpu_data.x86 >= 0x17)
3730 amd_unregister_ecc_decoder(decode_umc_error);
3732 amd_unregister_ecc_decoder(decode_bus_error);
3734 for (i = 0; i < amd_nb_num(); i++)
3735 remove_one_instance(i);
3744 module_init(amd64_edac_init);
3745 module_exit(amd64_edac_exit);
3747 MODULE_LICENSE("GPL");
3748 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3749 "Dave Peterson, Thayne Harbaugh");
3750 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3751 EDAC_AMD64_VERSION);
3753 module_param(edac_op_state, int, 0444);
3754 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");