1 #include "amd64_edac.h"
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 /* Lookup table for all possible MC control instances */
18 static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
19 static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
22 * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
23 * for DDR2 DRAM mapping.
25 u32 revf_quad_ddr2_shift[] = {
26 0, /* 0000b NULL DIMM (128mb) */
45 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
46 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
49 *FIXME: Produce a better mapping/linearisation.
52 struct scrubrate scrubrates[] = {
53 { 0x01, 1600000000UL},
75 { 0x00, 0UL}, /* scrubbing off */
79 * Memory scrubber control interface. For K8, memory scrubbing is handled by
80 * hardware and can involve L2 cache, dcache as well as the main memory. With
81 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
84 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
85 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
86 * bytes/sec for the setting.
88 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
89 * other archs, we might not have access to the caches directly.
93 * scan the scrub rate mapping table for a close or matching bandwidth value to
94 * issue. If requested is too big, then use last maximum value found.
96 static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw,
103 * map the configured rate (new_bw) to a value specific to the AMD64
104 * memory controller and apply to register. Search for the first
105 * bandwidth entry that is greater or equal than the setting requested
106 * and program that. If at last entry, turn off DRAM scrubbing.
108 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
110 * skip scrub rates which aren't recommended
111 * (see F10 BKDG, F3x58)
113 if (scrubrates[i].scrubval < min_scrubrate)
116 if (scrubrates[i].bandwidth <= new_bw)
120 * if no suitable bandwidth found, turn off DRAM scrubbing
121 * entirely by falling back to the last element in the
126 scrubval = scrubrates[i].scrubval;
128 edac_printk(KERN_DEBUG, EDAC_MC,
129 "Setting scrub rate bandwidth: %u\n",
130 scrubrates[i].bandwidth);
132 edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n");
134 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
139 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth)
141 struct amd64_pvt *pvt = mci->pvt_info;
142 u32 min_scrubrate = 0x0;
144 switch (boot_cpu_data.x86) {
146 min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
149 min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
152 min_scrubrate = F11_MIN_SCRUB_RATE_BITS;
156 amd64_printk(KERN_ERR, "Unsupported family!\n");
159 return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth,
163 static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
165 struct amd64_pvt *pvt = mci->pvt_info;
169 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
171 scrubval = scrubval & 0x001F;
173 edac_printk(KERN_DEBUG, EDAC_MC,
174 "pci-read, sdram scrub control value: %d \n", scrubval);
176 for (i = 0; ARRAY_SIZE(scrubrates); i++) {
177 if (scrubrates[i].scrubval == scrubval) {
178 *bw = scrubrates[i].bandwidth;
187 /* Map from a CSROW entry to the mask entry that operates on it */
188 static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
190 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F)
196 /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
197 static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
200 return pvt->dcsb0[csrow];
202 return pvt->dcsb1[csrow];
206 * Return the 'mask' address the i'th CS entry. This function is needed because
207 * there number of DCSM registers on Rev E and prior vs Rev F and later is
210 static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
213 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
215 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
220 * In *base and *limit, pass back the full 40-bit base and limit physical
221 * addresses for the node given by node_id. This information is obtained from
222 * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
223 * base and limit addresses are of type SysAddr, as defined at the start of
224 * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
225 * in the address range they represent.
227 static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
228 u64 *base, u64 *limit)
230 *base = pvt->dram_base[node_id];
231 *limit = pvt->dram_limit[node_id];
235 * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
238 static int amd64_base_limit_match(struct amd64_pvt *pvt,
239 u64 sys_addr, int node_id)
241 u64 base, limit, addr;
243 amd64_get_base_and_limit(pvt, node_id, &base, &limit);
245 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
246 * all ones if the most significant implemented address bit is 1.
247 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
248 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
249 * Application Programming.
251 addr = sys_addr & 0x000000ffffffffffull;
253 return (addr >= base) && (addr <= limit);
257 * Attempt to map a SysAddr to a node. On success, return a pointer to the
258 * mem_ctl_info structure for the node that the SysAddr maps to.
260 * On failure, return NULL.
262 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
265 struct amd64_pvt *pvt;
270 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
271 * 3.4.4.2) registers to map the SysAddr to a node ID.
276 * The value of this field should be the same for all DRAM Base
277 * registers. Therefore we arbitrarily choose to read it from the
278 * register for node 0.
280 intlv_en = pvt->dram_IntlvEn[0];
283 for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
284 if (amd64_base_limit_match(pvt, sys_addr, node_id))
290 if (unlikely((intlv_en != 0x01) &&
291 (intlv_en != 0x03) &&
292 (intlv_en != 0x07))) {
293 amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
294 "IntlvEn field of DRAM Base Register for node 0: "
295 "this probably indicates a BIOS bug.\n", intlv_en);
299 bits = (((u32) sys_addr) >> 12) & intlv_en;
301 for (node_id = 0; ; ) {
302 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
303 break; /* intlv_sel field matches */
305 if (++node_id >= DRAM_REG_COUNT)
309 /* sanity test for sys_addr */
310 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
311 amd64_printk(KERN_WARNING,
312 "%s(): sys_addr 0x%llx falls outside base/limit "
313 "address range for node %d with node interleaving "
315 __func__, sys_addr, node_id);
320 return edac_mc_find(node_id);
323 debugf2("sys_addr 0x%lx doesn't match any node\n",
324 (unsigned long)sys_addr);
330 * Extract the DRAM CS base address from selected csrow register.
332 static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
334 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
339 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
341 static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
343 u64 dcsm_bits, other_bits;
346 /* Extract bits from DRAM CS Mask. */
347 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
349 other_bits = pvt->dcsm_mask;
350 other_bits = ~(other_bits << pvt->dcs_shift);
353 * The extracted bits from DCSM belong in the spaces represented by
354 * the cleared bits in other_bits.
356 mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
362 * @input_addr is an InputAddr associated with the node given by mci. Return the
363 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
365 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
367 struct amd64_pvt *pvt;
374 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
375 * base/mask register pair, test the condition shown near the start of
376 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
378 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
380 /* This DRAM chip select is disabled on this node */
381 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
384 base = base_from_dct_base(pvt, csrow);
385 mask = ~mask_from_dct_mask(pvt, csrow);
387 if ((input_addr & mask) == (base & mask)) {
388 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
389 (unsigned long)input_addr, csrow,
396 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
397 (unsigned long)input_addr, pvt->mc_node_id);
403 * Return the base value defined by the DRAM Base register for the node
404 * represented by mci. This function returns the full 40-bit value despite the
405 * fact that the register only stores bits 39-24 of the value. See section
406 * 3.4.4.1 (BKDG #26094, K8, revA-E)
408 static inline u64 get_dram_base(struct mem_ctl_info *mci)
410 struct amd64_pvt *pvt = mci->pvt_info;
412 return pvt->dram_base[pvt->mc_node_id];
416 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
417 * for the node represented by mci. Info is passed back in *hole_base,
418 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
419 * info is invalid. Info may be invalid for either of the following reasons:
421 * - The revision of the node is not E or greater. In this case, the DRAM Hole
422 * Address Register does not exist.
424 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
425 * indicating that its contents are not valid.
427 * The values passed back in *hole_base, *hole_offset, and *hole_size are
428 * complete 32-bit values despite the fact that the bitfields in the DHAR
429 * only represent bits 31-24 of the base and offset values.
431 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
432 u64 *hole_offset, u64 *hole_size)
434 struct amd64_pvt *pvt = mci->pvt_info;
437 /* only revE and later have the DRAM Hole Address Register */
438 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_E) {
439 debugf1(" revision %d for node %d does not support DHAR\n",
440 pvt->ext_model, pvt->mc_node_id);
444 /* only valid for Fam10h */
445 if (boot_cpu_data.x86 == 0x10 &&
446 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
447 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
451 if ((pvt->dhar & DHAR_VALID) == 0) {
452 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
457 /* This node has Memory Hoisting */
459 /* +------------------+--------------------+--------------------+-----
460 * | memory | DRAM hole | relocated |
461 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
463 * | | | [0x100000000, |
464 * | | | (0x100000000+ |
465 * | | | (0xffffffff-x))] |
466 * +------------------+--------------------+--------------------+-----
468 * Above is a diagram of physical memory showing the DRAM hole and the
469 * relocated addresses from the DRAM hole. As shown, the DRAM hole
470 * starts at address x (the base address) and extends through address
471 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
472 * addresses in the hole so that they start at 0x100000000.
475 base = dhar_base(pvt->dhar);
478 *hole_size = (0x1ull << 32) - base;
480 if (boot_cpu_data.x86 > 0xf)
481 *hole_offset = f10_dhar_offset(pvt->dhar);
483 *hole_offset = k8_dhar_offset(pvt->dhar);
485 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
486 pvt->mc_node_id, (unsigned long)*hole_base,
487 (unsigned long)*hole_offset, (unsigned long)*hole_size);
491 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
494 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
495 * assumed that sys_addr maps to the node given by mci.
497 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
498 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
499 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
500 * then it is also involved in translating a SysAddr to a DramAddr. Sections
501 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
502 * These parts of the documentation are unclear. I interpret them as follows:
504 * When node n receives a SysAddr, it processes the SysAddr as follows:
506 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
507 * Limit registers for node n. If the SysAddr is not within the range
508 * specified by the base and limit values, then node n ignores the Sysaddr
509 * (since it does not map to node n). Otherwise continue to step 2 below.
511 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
512 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
513 * the range of relocated addresses (starting at 0x100000000) from the DRAM
514 * hole. If not, skip to step 3 below. Else get the value of the
515 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
516 * offset defined by this value from the SysAddr.
518 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
519 * Base register for node n. To obtain the DramAddr, subtract the base
520 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
522 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
524 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
527 dram_base = get_dram_base(mci);
529 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
532 if ((sys_addr >= (1ull << 32)) &&
533 (sys_addr < ((1ull << 32) + hole_size))) {
534 /* use DHAR to translate SysAddr to DramAddr */
535 dram_addr = sys_addr - hole_offset;
537 debugf2("using DHAR to translate SysAddr 0x%lx to "
539 (unsigned long)sys_addr,
540 (unsigned long)dram_addr);
547 * Translate the SysAddr to a DramAddr as shown near the start of
548 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
549 * only deals with 40-bit values. Therefore we discard bits 63-40 of
550 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
551 * discard are all 1s. Otherwise the bits we discard are all 0s. See
552 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
553 * Programmer's Manual Volume 1 Application Programming.
555 dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
557 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
558 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
559 (unsigned long)dram_addr);
564 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
565 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
566 * for node interleaving.
568 static int num_node_interleave_bits(unsigned intlv_en)
570 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
573 BUG_ON(intlv_en > 7);
574 n = intlv_shift_table[intlv_en];
578 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
579 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
581 struct amd64_pvt *pvt;
588 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
589 * concerning translating a DramAddr to an InputAddr.
591 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
592 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
595 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
596 intlv_shift, (unsigned long)dram_addr,
597 (unsigned long)input_addr);
603 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
604 * assumed that @sys_addr maps to the node given by mci.
606 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
611 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
613 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
614 (unsigned long)sys_addr, (unsigned long)input_addr);
621 * @input_addr is an InputAddr associated with the node represented by mci.
622 * Translate @input_addr to a DramAddr and return the result.
624 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
626 struct amd64_pvt *pvt;
627 int node_id, intlv_shift;
632 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
633 * shows how to translate a DramAddr to an InputAddr. Here we reverse
634 * this procedure. When translating from a DramAddr to an InputAddr, the
635 * bits used for node interleaving are discarded. Here we recover these
636 * bits from the IntlvSel field of the DRAM Limit register (section
637 * 3.4.4.2) for the node that input_addr is associated with.
640 node_id = pvt->mc_node_id;
641 BUG_ON((node_id < 0) || (node_id > 7));
643 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
645 if (intlv_shift == 0) {
646 debugf1(" InputAddr 0x%lx translates to DramAddr of "
647 "same value\n", (unsigned long)input_addr);
652 bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
653 (input_addr & 0xfff);
655 intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
656 dram_addr = bits + (intlv_sel << 12);
658 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
659 "(%d node interleave bits)\n", (unsigned long)input_addr,
660 (unsigned long)dram_addr, intlv_shift);
666 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
667 * @dram_addr to a SysAddr.
669 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
671 struct amd64_pvt *pvt = mci->pvt_info;
672 u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
675 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
678 if ((dram_addr >= hole_base) &&
679 (dram_addr < (hole_base + hole_size))) {
680 sys_addr = dram_addr + hole_offset;
682 debugf1("using DHAR to translate DramAddr 0x%lx to "
683 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
684 (unsigned long)sys_addr);
690 amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
691 sys_addr = dram_addr + base;
694 * The sys_addr we have computed up to this point is a 40-bit value
695 * because the k8 deals with 40-bit values. However, the value we are
696 * supposed to return is a full 64-bit physical address. The AMD
697 * x86-64 architecture specifies that the most significant implemented
698 * address bit through bit 63 of a physical address must be either all
699 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
700 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
701 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
704 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
706 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
707 pvt->mc_node_id, (unsigned long)dram_addr,
708 (unsigned long)sys_addr);
714 * @input_addr is an InputAddr associated with the node given by mci. Translate
715 * @input_addr to a SysAddr.
717 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
720 return dram_addr_to_sys_addr(mci,
721 input_addr_to_dram_addr(mci, input_addr));
725 * Find the minimum and maximum InputAddr values that map to the given @csrow.
726 * Pass back these values in *input_addr_min and *input_addr_max.
728 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
729 u64 *input_addr_min, u64 *input_addr_max)
731 struct amd64_pvt *pvt;
735 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
737 base = base_from_dct_base(pvt, csrow);
738 mask = mask_from_dct_mask(pvt, csrow);
740 *input_addr_min = base & ~mask;
741 *input_addr_max = base | mask | pvt->dcs_mask_notused;
745 * Extract error address from MCA NB Address Low (section 3.6.4.5) and MCA NB
746 * Address High (section 3.6.4.6) register values and return the result. Address
747 * is located in the info structure (nbeah and nbeal), the encoding is device
750 static u64 extract_error_address(struct mem_ctl_info *mci,
751 struct err_regs *info)
753 struct amd64_pvt *pvt = mci->pvt_info;
755 return pvt->ops->get_error_address(mci, info);
759 /* Map the Error address to a PAGE and PAGE OFFSET. */
760 static inline void error_address_to_page_and_offset(u64 error_address,
761 u32 *page, u32 *offset)
763 *page = (u32) (error_address >> PAGE_SHIFT);
764 *offset = ((u32) error_address) & ~PAGE_MASK;
768 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
769 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
770 * of a node that detected an ECC memory error. mci represents the node that
771 * the error address maps to (possibly different from the node that detected
772 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
775 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
779 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
782 amd64_mc_printk(mci, KERN_ERR,
783 "Failed to translate InputAddr to csrow for "
784 "address 0x%lx\n", (unsigned long)sys_addr);
788 static int get_channel_from_ecc_syndrome(unsigned short syndrome);
790 static void amd64_cpu_display_info(struct amd64_pvt *pvt)
792 if (boot_cpu_data.x86 == 0x11)
793 edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n");
794 else if (boot_cpu_data.x86 == 0x10)
795 edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
796 else if (boot_cpu_data.x86 == 0xf)
797 edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
798 (pvt->ext_model >= OPTERON_CPU_REV_F) ?
799 "Rev F or later" : "Rev E or earlier");
801 /* we'll hardly ever ever get here */
802 edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n");
806 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
809 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
812 enum dev_type edac_cap = EDAC_FLAG_NONE;
814 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
818 if (pvt->dclr0 & BIT(bit))
819 edac_cap = EDAC_FLAG_SECDED;
825 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
827 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
829 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
831 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
832 (dclr & BIT(16)) ? "un" : "",
833 (dclr & BIT(19)) ? "yes" : "no");
835 debugf1(" PAR/ERR parity: %s\n",
836 (dclr & BIT(8)) ? "enabled" : "disabled");
838 debugf1(" DCT 128bit mode width: %s\n",
839 (dclr & BIT(11)) ? "128b" : "64b");
841 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
842 (dclr & BIT(12)) ? "yes" : "no",
843 (dclr & BIT(13)) ? "yes" : "no",
844 (dclr & BIT(14)) ? "yes" : "no",
845 (dclr & BIT(15)) ? "yes" : "no");
848 /* Display and decode various NB registers for debug purposes. */
849 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
853 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
855 debugf1(" NB two channel DRAM capable: %s\n",
856 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
858 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
859 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
860 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
862 amd64_dump_dramcfg_low(pvt->dclr0, 0);
864 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
866 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
869 dhar_base(pvt->dhar),
870 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
871 : f10_dhar_offset(pvt->dhar));
873 debugf1(" DramHoleValid: %s\n",
874 (pvt->dhar & DHAR_VALID) ? "yes" : "no");
876 /* everything below this point is Fam10h and above */
877 if (boot_cpu_data.x86 == 0xf) {
878 amd64_debug_display_dimm_sizes(0, pvt);
882 /* Only if NOT ganged does dclr1 have valid info */
883 if (!dct_ganging_enabled(pvt))
884 amd64_dump_dramcfg_low(pvt->dclr1, 1);
887 * Determine if ganged and then dump memory sizes for first controller,
888 * and if NOT ganged dump info for 2nd controller.
890 ganged = dct_ganging_enabled(pvt);
892 amd64_debug_display_dimm_sizes(0, pvt);
895 amd64_debug_display_dimm_sizes(1, pvt);
898 /* Read in both of DBAM registers */
899 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
901 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0);
903 if (boot_cpu_data.x86 >= 0x10)
904 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1);
908 * NOTE: CPU Revision Dependent code: Rev E and Rev F
910 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
911 * set the shift factor for the DCSB and DCSM values.
913 * ->dcs_mask_notused, RevE:
915 * To find the max InputAddr for the csrow, start with the base address and set
916 * all bits that are "don't care" bits in the test at the start of section
919 * The "don't care" bits are all set bits in the mask and all bits in the gaps
920 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
921 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
924 * ->dcs_mask_notused, RevF and later:
926 * To find the max InputAddr for the csrow, start with the base address and set
927 * all bits that are "don't care" bits in the test at the start of NPT section
930 * The "don't care" bits are all set bits in the mask and all bits in the gaps
931 * between bit ranges [36:27] and [21:13].
933 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
934 * which are all bits in the above-mentioned gaps.
936 static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
939 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) {
940 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
941 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
942 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
943 pvt->dcs_shift = REV_E_DCS_SHIFT;
947 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
948 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
949 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
950 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
952 if (boot_cpu_data.x86 == 0x11) {
963 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
965 static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
969 amd64_set_dct_base_and_mask(pvt);
971 for (cs = 0; cs < pvt->cs_count; cs++) {
972 reg = K8_DCSB0 + (cs * 4);
973 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs]))
974 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
975 cs, pvt->dcsb0[cs], reg);
977 /* If DCT are NOT ganged, then read in DCT1's base */
978 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
979 reg = F10_DCSB1 + (cs * 4);
980 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
982 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
983 cs, pvt->dcsb1[cs], reg);
989 for (cs = 0; cs < pvt->num_dcsm; cs++) {
990 reg = K8_DCSM0 + (cs * 4);
991 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs]))
992 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
993 cs, pvt->dcsm0[cs], reg);
995 /* If DCT are NOT ganged, then read in DCT1's mask */
996 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
997 reg = F10_DCSM1 + (cs * 4);
998 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
1000 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
1001 cs, pvt->dcsm1[cs], reg);
1008 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
1012 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= OPTERON_CPU_REV_F) {
1013 /* Rev F and later */
1014 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1016 /* Rev E and earlier */
1017 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1020 debugf1(" Memory type is: %s\n",
1021 (type == MEM_DDR2) ? "MEM_DDR2" :
1022 (type == MEM_RDDR2) ? "MEM_RDDR2" :
1023 (type == MEM_DDR) ? "MEM_DDR" : "MEM_RDDR");
1029 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
1030 * and the later RevF memory controllers (DDR vs DDR2)
1033 * number of memory channels in operation
1035 * contents of the DCL0_LOW register
1037 static int k8_early_channel_count(struct amd64_pvt *pvt)
1041 err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1045 if ((boot_cpu_data.x86_model >> 4) >= OPTERON_CPU_REV_F) {
1046 /* RevF (NPT) and later */
1047 flag = pvt->dclr0 & F10_WIDTH_128;
1049 /* RevE and earlier */
1050 flag = pvt->dclr0 & REVE_WIDTH_128;
1056 return (flag) ? 2 : 1;
1059 /* extract the ERROR ADDRESS for the K8 CPUs */
1060 static u64 k8_get_error_address(struct mem_ctl_info *mci,
1061 struct err_regs *info)
1063 return (((u64) (info->nbeah & 0xff)) << 32) +
1064 (info->nbeal & ~0x03);
1068 * Read the Base and Limit registers for K8 based Memory controllers; extract
1069 * fields from the 'raw' reg into separate data fields
1071 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
1073 static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1076 u32 off = dram << 3; /* 8 bytes between DRAM entries */
1078 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low);
1080 /* Extract parts into separate data entries */
1081 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1082 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1083 pvt->dram_rw_en[dram] = (low & 0x3);
1085 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low);
1088 * Extract parts into separate data entries. Limit is the HIGHEST memory
1089 * location of the region, so lower 24 bits need to be all ones
1091 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
1092 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1093 pvt->dram_DstNode[dram] = (low & 0x7);
1096 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1097 struct err_regs *info,
1100 struct mem_ctl_info *src_mci;
1101 unsigned short syndrome;
1105 /* Extract the syndrome parts and form a 16-bit syndrome */
1106 syndrome = HIGH_SYNDROME(info->nbsl) << 8;
1107 syndrome |= LOW_SYNDROME(info->nbsh);
1109 /* CHIPKILL enabled */
1110 if (info->nbcfg & K8_NBCFG_CHIPKILL) {
1111 channel = get_channel_from_ecc_syndrome(syndrome);
1114 * Syndrome didn't map, so we don't know which of the
1115 * 2 DIMMs is in error. So we need to ID 'both' of them
1118 amd64_mc_printk(mci, KERN_WARNING,
1119 "unknown syndrome 0x%x - possible error "
1120 "reporting race\n", syndrome);
1121 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1126 * non-chipkill ecc mode
1128 * The k8 documentation is unclear about how to determine the
1129 * channel number when using non-chipkill memory. This method
1130 * was obtained from email communication with someone at AMD.
1131 * (Wish the email was placed in this comment - norsk)
1133 channel = ((SystemAddress & BIT(3)) != 0);
1137 * Find out which node the error address belongs to. This may be
1138 * different from the node that detected the error.
1140 src_mci = find_mc_by_sys_addr(mci, SystemAddress);
1142 amd64_mc_printk(mci, KERN_ERR,
1143 "failed to map error address 0x%lx to a node\n",
1144 (unsigned long)SystemAddress);
1145 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1149 /* Now map the SystemAddress to a CSROW */
1150 csrow = sys_addr_to_csrow(src_mci, SystemAddress);
1152 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1154 error_address_to_page_and_offset(SystemAddress, &page, &offset);
1156 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1157 channel, EDAC_MOD_STR);
1162 * determrine the number of PAGES in for this DIMM's size based on its DRAM
1165 * First step is to calc the number of bits to shift a value of 1 left to
1166 * indicate show many pages. Start with the DBAM value as the starting bits,
1167 * then proceed to adjust those shift bits, based on CPU rev and the table.
1168 * See BKDG on the DBAM
1170 static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
1174 if (pvt->ext_model >= OPTERON_CPU_REV_F) {
1175 nr_pages = 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
1178 * RevE and less section; this line is tricky. It collapses the
1179 * table used by RevD and later to one that matches revisions CG
1182 dram_map -= (pvt->ext_model >= OPTERON_CPU_REV_D) ?
1183 (dram_map > 8 ? 4 : (dram_map > 5 ?
1184 3 : (dram_map > 2 ? 1 : 0))) : 0;
1186 /* 25 shift is 32MiB minimum DIMM size in RevE and prior */
1187 nr_pages = 1 << (dram_map + 25 - PAGE_SHIFT);
1194 * Get the number of DCT channels in use.
1197 * number of Memory Channels in operation
1199 * contents of the DCL0_LOW register
1201 static int f10_early_channel_count(struct amd64_pvt *pvt)
1203 int dbams[] = { DBAM0, DBAM1 };
1204 int i, j, channels = 0;
1207 if (amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0))
1210 if (amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1))
1213 /* If we are in 128 bit mode, then we are using 2 channels */
1214 if (pvt->dclr0 & F10_WIDTH_128) {
1215 debugf0("Data WIDTH is 128 bits - 2 channels\n");
1221 * Need to check if in UN-ganged mode: In such, there are 2 channels,
1222 * but they are NOT in 128 bit mode and thus the above 'dcl0' status bit
1225 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1226 * their CSEnable bit on. If so, then SINGLE DIMM case.
1228 debugf0("Data WIDTH is NOT 128 bits - need more decoding\n");
1231 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1232 * is more than just one DIMM present in unganged mode. Need to check
1233 * both controllers since DIMMs can be placed in either one.
1235 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1236 if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam))
1239 for (j = 0; j < 4; j++) {
1240 if (DBAM_DIMM(j, dbam) > 0) {
1247 debugf0("MCT channel count: %d\n", channels);
1256 static int f10_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
1258 return 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
1261 /* Enable extended configuration access via 0xCF8 feature */
1262 static void amd64_setup(struct amd64_pvt *pvt)
1266 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®);
1268 pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
1269 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1270 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1273 /* Restore the extended configuration access via 0xCF8 feature */
1274 static void amd64_teardown(struct amd64_pvt *pvt)
1278 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®);
1280 reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1281 if (pvt->flags.cf8_extcfg)
1282 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1283 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1286 static u64 f10_get_error_address(struct mem_ctl_info *mci,
1287 struct err_regs *info)
1289 return (((u64) (info->nbeah & 0xffff)) << 32) +
1290 (info->nbeal & ~0x01);
1294 * Read the Base and Limit registers for F10 based Memory controllers. Extract
1295 * fields from the 'raw' reg into separate data fields.
1297 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
1299 static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1301 u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
1303 low_offset = K8_DRAM_BASE_LOW + (dram << 3);
1304 high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1306 /* read the 'raw' DRAM BASE Address register */
1307 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base);
1309 /* Read from the ECS data register */
1310 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base);
1312 /* Extract parts into separate data entries */
1313 pvt->dram_rw_en[dram] = (low_base & 0x3);
1315 if (pvt->dram_rw_en[dram] == 0)
1318 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1320 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
1321 (((u64)low_base & 0xFFFF0000) << 8);
1323 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1324 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1326 /* read the 'raw' LIMIT registers */
1327 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit);
1329 /* Read from the ECS data register for the HIGH portion */
1330 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit);
1332 debugf0(" HW Regs: BASE=0x%08x-%08x LIMIT= 0x%08x-%08x\n",
1333 high_base, low_base, high_limit, low_limit);
1335 pvt->dram_DstNode[dram] = (low_limit & 0x7);
1336 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
1339 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1340 * memory location of the region, so low 24 bits need to be all ones.
1342 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
1343 (((u64) low_limit & 0xFFFF0000) << 8) |
1347 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1350 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
1351 &pvt->dram_ctl_select_low)) {
1352 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
1353 "High range addresses at: 0x%x\n",
1354 pvt->dram_ctl_select_low,
1355 dct_sel_baseaddr(pvt));
1357 debugf0(" DCT mode: %s, All DCTs on: %s\n",
1358 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
1359 (dct_dram_enabled(pvt) ? "yes" : "no"));
1361 if (!dct_ganging_enabled(pvt))
1362 debugf0(" Address range split per DCT: %s\n",
1363 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1365 debugf0(" DCT data interleave for ECC: %s, "
1366 "DRAM cleared since last warm reset: %s\n",
1367 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1368 (dct_memory_cleared(pvt) ? "yes" : "no"));
1370 debugf0(" DCT channel interleave: %s, "
1371 "DCT interleave bits selector: 0x%x\n",
1372 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1373 dct_sel_interleave_addr(pvt));
1376 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
1377 &pvt->dram_ctl_select_high);
1381 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1382 * Interleaving Modes.
1384 static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1385 int hi_range_sel, u32 intlv_en)
1387 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
1389 if (dct_ganging_enabled(pvt))
1391 else if (hi_range_sel)
1393 else if (dct_interleave_enabled(pvt)) {
1395 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1397 if (dct_sel_interleave_addr(pvt) == 0)
1398 cs = sys_addr >> 6 & 1;
1399 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1400 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1402 if (dct_sel_interleave_addr(pvt) & 1)
1403 cs = (sys_addr >> 9 & 1) ^ temp;
1405 cs = (sys_addr >> 6 & 1) ^ temp;
1406 } else if (intlv_en & 4)
1407 cs = sys_addr >> 15 & 1;
1408 else if (intlv_en & 2)
1409 cs = sys_addr >> 14 & 1;
1410 else if (intlv_en & 1)
1411 cs = sys_addr >> 13 & 1;
1413 cs = sys_addr >> 12 & 1;
1414 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1415 cs = ~dct_sel_high & 1;
1422 static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
1426 else if (intlv_en == 3)
1428 else if (intlv_en == 7)
1434 /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
1435 static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
1436 u32 dct_sel_base_addr,
1437 u64 dct_sel_base_off,
1438 u32 hole_valid, u32 hole_off,
1444 if (!(dct_sel_base_addr & 0xFFFFF800) &&
1445 hole_valid && (sys_addr >= 0x100000000ULL))
1446 chan_off = hole_off << 16;
1448 chan_off = dct_sel_base_off;
1450 if (hole_valid && (sys_addr >= 0x100000000ULL))
1451 chan_off = hole_off << 16;
1453 chan_off = dram_base & 0xFFFFF8000000ULL;
1456 return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
1457 (chan_off & 0x0000FFFFFF800000ULL);
1460 /* Hack for the time being - Can we get this from BIOS?? */
1461 #define CH0SPARE_RANK 0
1462 #define CH1SPARE_RANK 1
1465 * checks if the csrow passed in is marked as SPARED, if so returns the new
1468 static inline int f10_process_possible_spare(int csrow,
1469 u32 cs, struct amd64_pvt *pvt)
1474 /* Depending on channel, isolate respective SPARING info */
1476 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1477 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1478 if (swap_done && (csrow == bad_dram_cs))
1479 csrow = CH1SPARE_RANK;
1481 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1482 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1483 if (swap_done && (csrow == bad_dram_cs))
1484 csrow = CH0SPARE_RANK;
1490 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1491 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1494 * -EINVAL: NOT FOUND
1495 * 0..csrow = Chip-Select Row
1497 static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1499 struct mem_ctl_info *mci;
1500 struct amd64_pvt *pvt;
1501 u32 cs_base, cs_mask;
1502 int cs_found = -EINVAL;
1505 mci = mci_lookup[nid];
1509 pvt = mci->pvt_info;
1511 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1513 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1515 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1516 if (!(cs_base & K8_DCSB_CS_ENABLE))
1520 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1521 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1522 * of the actual address.
1524 cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1527 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1528 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1530 cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1532 debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
1533 csrow, cs_base, cs_mask);
1535 cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
1537 debugf1(" Final CSMask=0x%x\n", cs_mask);
1538 debugf1(" (InputAddr & ~CSMask)=0x%x "
1539 "(CSBase & ~CSMask)=0x%x\n",
1540 (in_addr & ~cs_mask), (cs_base & ~cs_mask));
1542 if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
1543 cs_found = f10_process_possible_spare(csrow, cs, pvt);
1545 debugf1(" MATCH csrow=%d\n", cs_found);
1552 /* For a given @dram_range, check if @sys_addr falls within it. */
1553 static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1554 u64 sys_addr, int *nid, int *chan_sel)
1556 int node_id, cs_found = -EINVAL, high_range = 0;
1557 u32 intlv_en, intlv_sel, intlv_shift, hole_off;
1558 u32 hole_valid, tmp, dct_sel_base, channel;
1559 u64 dram_base, chan_addr, dct_sel_base_off;
1561 dram_base = pvt->dram_base[dram_range];
1562 intlv_en = pvt->dram_IntlvEn[dram_range];
1564 node_id = pvt->dram_DstNode[dram_range];
1565 intlv_sel = pvt->dram_IntlvSel[dram_range];
1567 debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
1568 dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
1571 * This assumes that one node's DHAR is the same as all the other
1574 hole_off = (pvt->dhar & 0x0000FF80);
1575 hole_valid = (pvt->dhar & 0x1);
1576 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
1578 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
1579 hole_off, hole_valid, intlv_sel);
1582 (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1585 dct_sel_base = dct_sel_baseaddr(pvt);
1588 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1589 * select between DCT0 and DCT1.
1591 if (dct_high_range_enabled(pvt) &&
1592 !dct_ganging_enabled(pvt) &&
1593 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1596 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1598 chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
1599 dct_sel_base_off, hole_valid,
1600 hole_off, dram_base);
1602 intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
1604 /* remove Node ID (in case of memory interleaving) */
1605 tmp = chan_addr & 0xFC0;
1607 chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
1609 /* remove channel interleave and hash */
1610 if (dct_interleave_enabled(pvt) &&
1611 !dct_high_range_enabled(pvt) &&
1612 !dct_ganging_enabled(pvt)) {
1613 if (dct_sel_interleave_addr(pvt) != 1)
1614 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1616 tmp = chan_addr & 0xFC0;
1617 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1622 debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
1623 chan_addr, (u32)(chan_addr >> 8));
1625 cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
1627 if (cs_found >= 0) {
1629 *chan_sel = channel;
1634 static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1635 int *node, int *chan_sel)
1637 int dram_range, cs_found = -EINVAL;
1638 u64 dram_base, dram_limit;
1640 for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
1642 if (!pvt->dram_rw_en[dram_range])
1645 dram_base = pvt->dram_base[dram_range];
1646 dram_limit = pvt->dram_limit[dram_range];
1648 if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
1650 cs_found = f10_match_to_this_node(pvt, dram_range,
1661 * This the F10h reference code from AMD to map a @sys_addr to NodeID,
1664 * The @sys_addr is usually an error address received from the hardware.
1666 static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1667 struct err_regs *info,
1670 struct amd64_pvt *pvt = mci->pvt_info;
1672 unsigned short syndrome;
1673 int nid, csrow, chan = 0;
1675 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1678 error_address_to_page_and_offset(sys_addr, &page, &offset);
1680 syndrome = HIGH_SYNDROME(info->nbsl) << 8;
1681 syndrome |= LOW_SYNDROME(info->nbsh);
1684 * Is CHIPKILL on? If so, then we can attempt to use the
1685 * syndrome to isolate which channel the error was on.
1687 if (pvt->nbcfg & K8_NBCFG_CHIPKILL)
1688 chan = get_channel_from_ecc_syndrome(syndrome);
1691 edac_mc_handle_ce(mci, page, offset, syndrome,
1692 csrow, chan, EDAC_MOD_STR);
1695 * Channel unknown, report all channels on this
1698 for (chan = 0; chan < mci->csrows[csrow].nr_channels;
1700 edac_mc_handle_ce(mci, page, offset,
1708 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1713 * Input (@index) is the DBAM DIMM value (1 of 4) used as an index into a shift
1714 * table (revf_quad_ddr2_shift) which starts at 128MB DIMM size. Index of 0
1715 * indicates an empty DIMM slot, as reported by Hardware on empty slots.
1717 * Normalize to 128MB by subracting 27 bit shift.
1719 static int map_dbam_to_csrow_size(int index)
1723 if (index > 0 && index <= DBAM_MAX_VALUE)
1724 mega_bytes = ((128 << (revf_quad_ddr2_shift[index]-27)));
1730 * debug routine to display the memory sizes of all logical DIMMs and its
1733 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1735 int dimm, size0, size1;
1739 if (boot_cpu_data.x86 == 0xf) {
1740 /* K8 families < revF not supported yet */
1741 if (pvt->ext_model < OPTERON_CPU_REV_F)
1747 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1748 ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
1750 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1751 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1753 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1755 /* Dump memory sizes for DIMM and its CSROWs */
1756 for (dimm = 0; dimm < 4; dimm++) {
1759 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
1760 size0 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
1763 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
1764 size1 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
1766 edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n",
1767 dimm * 2, size0, dimm * 2 + 1, size1);
1772 * Very early hardware probe on pci_probe thread to determine if this module
1773 * supports the hardware.
1779 static int f10_probe_valid_hardware(struct amd64_pvt *pvt)
1784 * If we are on a DDR3 machine, we don't know yet if
1785 * we support that properly at this time
1787 if ((pvt->dchr0 & F10_DCHR_Ddr3Mode) ||
1788 (pvt->dchr1 & F10_DCHR_Ddr3Mode)) {
1790 amd64_printk(KERN_WARNING,
1791 "%s() This machine is running with DDR3 memory. "
1792 "This is not currently supported. "
1793 "DCHR0=0x%x DCHR1=0x%x\n",
1794 __func__, pvt->dchr0, pvt->dchr1);
1796 amd64_printk(KERN_WARNING,
1797 " Contact '%s' module MAINTAINER to help add"
1808 * There currently are 3 types type of MC devices for AMD Athlon/Opterons
1809 * (as per PCI DEVICE_IDs):
1811 * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI
1812 * DEVICE ID, even though there is differences between the different Revisions
1815 * Family F10h and F11h.
1818 static struct amd64_family_type amd64_family_types[] = {
1821 .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1822 .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1824 .early_channel_count = k8_early_channel_count,
1825 .get_error_address = k8_get_error_address,
1826 .read_dram_base_limit = k8_read_dram_base_limit,
1827 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1828 .dbam_map_to_pages = k8_dbam_map_to_pages,
1832 .ctl_name = "Family 10h",
1833 .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1834 .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1836 .probe_valid_hardware = f10_probe_valid_hardware,
1837 .early_channel_count = f10_early_channel_count,
1838 .get_error_address = f10_get_error_address,
1839 .read_dram_base_limit = f10_read_dram_base_limit,
1840 .read_dram_ctl_register = f10_read_dram_ctl_register,
1841 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1842 .dbam_map_to_pages = f10_dbam_map_to_pages,
1846 .ctl_name = "Family 11h",
1847 .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
1848 .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
1850 .probe_valid_hardware = f10_probe_valid_hardware,
1851 .early_channel_count = f10_early_channel_count,
1852 .get_error_address = f10_get_error_address,
1853 .read_dram_base_limit = f10_read_dram_base_limit,
1854 .read_dram_ctl_register = f10_read_dram_ctl_register,
1855 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1856 .dbam_map_to_pages = f10_dbam_map_to_pages,
1861 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1862 unsigned int device,
1863 struct pci_dev *related)
1865 struct pci_dev *dev = NULL;
1867 dev = pci_get_device(vendor, device, dev);
1869 if ((dev->bus->number == related->bus->number) &&
1870 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1872 dev = pci_get_device(vendor, device, dev);
1879 * syndrome mapping table for ECC ChipKill devices
1881 * The comment in each row is the token (nibble) number that is in error.
1882 * The least significant nibble of the syndrome is the mask for the bits
1883 * that are in error (need to be toggled) for the particular nibble.
1885 * Each row contains 16 entries.
1886 * The first entry (0th) is the channel number for that row of syndromes.
1887 * The remaining 15 entries are the syndromes for the respective Error
1890 * 1st index entry is 0x0001 mask, indicating that the rightmost bit is the
1892 * The 2nd index entry is 0x0010 that the second bit is damaged.
1893 * The 3rd index entry is 0x0011 indicating that the rightmost 2 bits
1895 * Thus so on until index 15, 0x1111, whose entry has the syndrome
1896 * indicating that all 4 bits are damaged.
1898 * A search is performed on this table looking for a given syndrome.
1900 * See the AMD documentation for ECC syndromes. This ECC table is valid
1901 * across all the versions of the AMD64 processors.
1903 * A fast lookup is to use the LAST four bits of the 16-bit syndrome as a
1904 * COLUMN index, then search all ROWS of that column, looking for a match
1905 * with the input syndrome. The ROW value will be the token number.
1907 * The 0'th entry on that row, can be returned as the CHANNEL (0 or 1) of this
1910 #define NUMBER_ECC_ROWS 36
1911 static const unsigned short ecc_chipkill_syndromes[NUMBER_ECC_ROWS][16] = {
1912 /* Channel 0 syndromes */
1913 {/*0*/ 0, 0xe821, 0x7c32, 0x9413, 0xbb44, 0x5365, 0xc776, 0x2f57,
1914 0xdd88, 0x35a9, 0xa1ba, 0x499b, 0x66cc, 0x8eed, 0x1afe, 0xf2df },
1915 {/*1*/ 0, 0x5d31, 0xa612, 0xfb23, 0x9584, 0xc8b5, 0x3396, 0x6ea7,
1916 0xeac8, 0xb7f9, 0x4cda, 0x11eb, 0x7f4c, 0x227d, 0xd95e, 0x846f },
1917 {/*2*/ 0, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
1918 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f },
1919 {/*3*/ 0, 0x2021, 0x3032, 0x1013, 0x4044, 0x6065, 0x7076, 0x5057,
1920 0x8088, 0xa0a9, 0xb0ba, 0x909b, 0xc0cc, 0xe0ed, 0xf0fe, 0xd0df },
1921 {/*4*/ 0, 0x5041, 0xa082, 0xf0c3, 0x9054, 0xc015, 0x30d6, 0x6097,
1922 0xe0a8, 0xb0e9, 0x402a, 0x106b, 0x70fc, 0x20bd, 0xd07e, 0x803f },
1923 {/*5*/ 0, 0xbe21, 0xd732, 0x6913, 0x2144, 0x9f65, 0xf676, 0x4857,
1924 0x3288, 0x8ca9, 0xe5ba, 0x5b9b, 0x13cc, 0xaded, 0xc4fe, 0x7adf },
1925 {/*6*/ 0, 0x4951, 0x8ea2, 0xc7f3, 0x5394, 0x1ac5, 0xdd36, 0x9467,
1926 0xa1e8, 0xe8b9, 0x2f4a, 0x661b, 0xf27c, 0xbb2d, 0x7cde, 0x358f },
1927 {/*7*/ 0, 0x74e1, 0x9872, 0xec93, 0xd6b4, 0xa255, 0x4ec6, 0x3a27,
1928 0x6bd8, 0x1f39, 0xf3aa, 0x874b, 0xbd6c, 0xc98d, 0x251e, 0x51ff },
1929 {/*8*/ 0, 0x15c1, 0x2a42, 0x3f83, 0xcef4, 0xdb35, 0xe4b6, 0xf177,
1930 0x4758, 0x5299, 0x6d1a, 0x78db, 0x89ac, 0x9c6d, 0xa3ee, 0xb62f },
1931 {/*9*/ 0, 0x3d01, 0x1602, 0x2b03, 0x8504, 0xb805, 0x9306, 0xae07,
1932 0xca08, 0xf709, 0xdc0a, 0xe10b, 0x4f0c, 0x720d, 0x590e, 0x640f },
1933 {/*a*/ 0, 0x9801, 0xec02, 0x7403, 0x6b04, 0xf305, 0x8706, 0x1f07,
1934 0xbd08, 0x2509, 0x510a, 0xc90b, 0xd60c, 0x4e0d, 0x3a0e, 0xa20f },
1935 {/*b*/ 0, 0xd131, 0x6212, 0xb323, 0x3884, 0xe9b5, 0x5a96, 0x8ba7,
1936 0x1cc8, 0xcdf9, 0x7eda, 0xafeb, 0x244c, 0xf57d, 0x465e, 0x976f },
1937 {/*c*/ 0, 0xe1d1, 0x7262, 0x93b3, 0xb834, 0x59e5, 0xca56, 0x2b87,
1938 0xdc18, 0x3dc9, 0xae7a, 0x4fab, 0x542c, 0x85fd, 0x164e, 0xf79f },
1939 {/*d*/ 0, 0x6051, 0xb0a2, 0xd0f3, 0x1094, 0x70c5, 0xa036, 0xc067,
1940 0x20e8, 0x40b9, 0x904a, 0x601b, 0x307c, 0x502d, 0x80de, 0xe08f },
1941 {/*e*/ 0, 0xa4c1, 0xf842, 0x5c83, 0xe6f4, 0x4235, 0x1eb6, 0xba77,
1942 0x7b58, 0xdf99, 0x831a, 0x27db, 0x9dac, 0x396d, 0x65ee, 0xc12f },
1943 {/*f*/ 0, 0x11c1, 0x2242, 0x3383, 0xc8f4, 0xd935, 0xeab6, 0xfb77,
1944 0x4c58, 0x5d99, 0x6e1a, 0x7fdb, 0x84ac, 0x956d, 0xa6ee, 0xb72f },
1946 /* Channel 1 syndromes */
1947 {/*10*/ 1, 0x45d1, 0x8a62, 0xcfb3, 0x5e34, 0x1be5, 0xd456, 0x9187,
1948 0xa718, 0xe2c9, 0x2d7a, 0x68ab, 0xf92c, 0xbcfd, 0x734e, 0x369f },
1949 {/*11*/ 1, 0x63e1, 0xb172, 0xd293, 0x14b4, 0x7755, 0xa5c6, 0xc627,
1950 0x28d8, 0x4b39, 0x99aa, 0xfa4b, 0x3c6c, 0x5f8d, 0x8d1e, 0xeeff },
1951 {/*12*/ 1, 0xb741, 0xd982, 0x6ec3, 0x2254, 0x9515, 0xfbd6, 0x4c97,
1952 0x33a8, 0x84e9, 0xea2a, 0x5d6b, 0x11fc, 0xa6bd, 0xc87e, 0x7f3f },
1953 {/*13*/ 1, 0xdd41, 0x6682, 0xbbc3, 0x3554, 0xe815, 0x53d6, 0xce97,
1954 0x1aa8, 0xc7e9, 0x7c2a, 0xa1fb, 0x2ffc, 0xf2bd, 0x497e, 0x943f },
1955 {/*14*/ 1, 0x2bd1, 0x3d62, 0x16b3, 0x4f34, 0x64e5, 0x7256, 0x5987,
1956 0x8518, 0xaec9, 0xb87a, 0x93ab, 0xca2c, 0xe1fd, 0xf74e, 0xdc9f },
1957 {/*15*/ 1, 0x83c1, 0xc142, 0x4283, 0xa4f4, 0x2735, 0x65b6, 0xe677,
1958 0xf858, 0x7b99, 0x391a, 0xbadb, 0x5cac, 0xdf6d, 0x9dee, 0x1e2f },
1959 {/*16*/ 1, 0x8fd1, 0xc562, 0x4ab3, 0xa934, 0x26e5, 0x6c56, 0xe387,
1960 0xfe18, 0x71c9, 0x3b7a, 0xb4ab, 0x572c, 0xd8fd, 0x924e, 0x1d9f },
1961 {/*17*/ 1, 0x4791, 0x89e2, 0xce73, 0x5264, 0x15f5, 0xdb86, 0x9c17,
1962 0xa3b8, 0xe429, 0x2a5a, 0x6dcb, 0xf1dc, 0xb64d, 0x783e, 0x3faf },
1963 {/*18*/ 1, 0x5781, 0xa9c2, 0xfe43, 0x92a4, 0xc525, 0x3b66, 0x6ce7,
1964 0xe3f8, 0xb479, 0x4a3a, 0x1dbb, 0x715c, 0x26dd, 0xd89e, 0x8f1f },
1965 {/*19*/ 1, 0xbf41, 0xd582, 0x6ac3, 0x2954, 0x9615, 0xfcd6, 0x4397,
1966 0x3ea8, 0x81e9, 0xeb2a, 0x546b, 0x17fc, 0xa8bd, 0xc27e, 0x7d3f },
1967 {/*1a*/ 1, 0x9891, 0xe1e2, 0x7273, 0x6464, 0xf7f5, 0x8586, 0x1617,
1968 0xb8b8, 0x2b29, 0x595a, 0xcacb, 0xdcdc, 0x4f4d, 0x3d3e, 0xaeaf },
1969 {/*1b*/ 1, 0xcce1, 0x4472, 0x8893, 0xfdb4, 0x3f55, 0xb9c6, 0x7527,
1970 0x56d8, 0x9a39, 0x12aa, 0xde4b, 0xab6c, 0x678d, 0xef1e, 0x23ff },
1971 {/*1c*/ 1, 0xa761, 0xf9b2, 0x5ed3, 0xe214, 0x4575, 0x1ba6, 0xbcc7,
1972 0x7328, 0xd449, 0x8a9a, 0x2dfb, 0x913c, 0x365d, 0x688e, 0xcfef },
1973 {/*1d*/ 1, 0xff61, 0x55b2, 0xaad3, 0x7914, 0x8675, 0x2ca6, 0xd3c7,
1974 0x9e28, 0x6149, 0xcb9a, 0x34fb, 0xe73c, 0x185d, 0xb28e, 0x4def },
1975 {/*1e*/ 1, 0x5451, 0xa8a2, 0xfcf3, 0x9694, 0xc2c5, 0x3e36, 0x6a67,
1976 0xebe8, 0xbfb9, 0x434a, 0x171b, 0x7d7c, 0x292d, 0xd5de, 0x818f },
1977 {/*1f*/ 1, 0x6fc1, 0xb542, 0xda83, 0x19f4, 0x7635, 0xacb6, 0xc377,
1978 0x2e58, 0x4199, 0x9b1a, 0xf4db, 0x37ac, 0x586d, 0x82ee, 0xed2f },
1980 /* ECC bits are also in the set of tokens and they too can go bad
1981 * first 2 cover channel 0, while the second 2 cover channel 1
1983 {/*20*/ 0, 0xbe01, 0xd702, 0x6903, 0x2104, 0x9f05, 0xf606, 0x4807,
1984 0x3208, 0x8c09, 0xe50a, 0x5b0b, 0x130c, 0xad0d, 0xc40e, 0x7a0f },
1985 {/*21*/ 0, 0x4101, 0x8202, 0xc303, 0x5804, 0x1905, 0xda06, 0x9b07,
1986 0xac08, 0xed09, 0x2e0a, 0x6f0b, 0x640c, 0xb50d, 0x760e, 0x370f },
1987 {/*22*/ 1, 0xc441, 0x4882, 0x8cc3, 0xf654, 0x3215, 0xbed6, 0x7a97,
1988 0x5ba8, 0x9fe9, 0x132a, 0xd76b, 0xadfc, 0x69bd, 0xe57e, 0x213f },
1989 {/*23*/ 1, 0x7621, 0x9b32, 0xed13, 0xda44, 0xac65, 0x4176, 0x3757,
1990 0x6f88, 0x19a9, 0xf4ba, 0x829b, 0xb5cc, 0xc3ed, 0x2efe, 0x58df }
1994 * Given the syndrome argument, scan each of the channel tables for a syndrome
1995 * match. Depending on which table it is found, return the channel number.
1997 static int get_channel_from_ecc_syndrome(unsigned short syndrome)
2002 /* Determine column to scan */
2003 column = syndrome & 0xF;
2005 /* Scan all rows, looking for syndrome, or end of table */
2006 for (row = 0; row < NUMBER_ECC_ROWS; row++) {
2007 if (ecc_chipkill_syndromes[row][column] == syndrome)
2008 return ecc_chipkill_syndromes[row][0];
2011 debugf0("syndrome(%x) not found\n", syndrome);
2016 * Check for valid error in the NB Status High register. If so, proceed to read
2017 * NB Status Low, NB Address Low and NB Address High registers and store data
2018 * into error structure.
2021 * - 1: if hardware regs contains valid error info
2022 * - 0: if no valid error is indicated
2024 static int amd64_get_error_info_regs(struct mem_ctl_info *mci,
2025 struct err_regs *regs)
2027 struct amd64_pvt *pvt;
2028 struct pci_dev *misc_f3_ctl;
2030 pvt = mci->pvt_info;
2031 misc_f3_ctl = pvt->misc_f3_ctl;
2033 if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSH, ®s->nbsh))
2036 if (!(regs->nbsh & K8_NBSH_VALID_BIT))
2039 /* valid error, read remaining error information registers */
2040 if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSL, ®s->nbsl) ||
2041 amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAL, ®s->nbeal) ||
2042 amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAH, ®s->nbeah) ||
2043 amd64_read_pci_cfg(misc_f3_ctl, K8_NBCFG, ®s->nbcfg))
2050 * This function is called to retrieve the error data from hardware and store it
2051 * in the info structure.
2054 * - 1: if a valid error is found
2055 * - 0: if no error is found
2057 static int amd64_get_error_info(struct mem_ctl_info *mci,
2058 struct err_regs *info)
2060 struct amd64_pvt *pvt;
2061 struct err_regs regs;
2063 pvt = mci->pvt_info;
2065 if (!amd64_get_error_info_regs(mci, info))
2069 * Here's the problem with the K8's EDAC reporting: There are four
2070 * registers which report pieces of error information. They are shared
2071 * between CEs and UEs. Furthermore, contrary to what is stated in the
2072 * BKDG, the overflow bit is never used! Every error always updates the
2073 * reporting registers.
2075 * Can you see the race condition? All four error reporting registers
2076 * must be read before a new error updates them! There is no way to read
2077 * all four registers atomically. The best than can be done is to detect
2078 * that a race has occured and then report the error without any kind of
2081 * What is still positive is that errors are still reported and thus
2082 * problems can still be detected - just not localized because the
2083 * syndrome and address are spread out across registers.
2085 * Grrrrr!!!!! Here's hoping that AMD fixes this in some future K8 rev.
2086 * UEs and CEs should have separate register sets with proper overflow
2087 * bits that are used! At very least the problem can be fixed by
2088 * honoring the ErrValid bit in 'nbsh' and not updating registers - just
2089 * set the overflow bit - unless the current error is CE and the new
2090 * error is UE which would be the only situation for overwriting the
2096 /* Use info from the second read - most current */
2097 if (unlikely(!amd64_get_error_info_regs(mci, info)))
2100 /* clear the error bits in hardware */
2101 pci_write_bits32(pvt->misc_f3_ctl, K8_NBSH, 0, K8_NBSH_VALID_BIT);
2103 /* Check for the possible race condition */
2104 if ((regs.nbsh != info->nbsh) ||
2105 (regs.nbsl != info->nbsl) ||
2106 (regs.nbeah != info->nbeah) ||
2107 (regs.nbeal != info->nbeal)) {
2108 amd64_mc_printk(mci, KERN_WARNING,
2109 "hardware STATUS read access race condition "
2117 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
2118 * ADDRESS and process.
2120 static void amd64_handle_ce(struct mem_ctl_info *mci,
2121 struct err_regs *info)
2123 struct amd64_pvt *pvt = mci->pvt_info;
2126 /* Ensure that the Error Address is VALID */
2127 if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
2128 amd64_mc_printk(mci, KERN_ERR,
2129 "HW has no ERROR_ADDRESS available\n");
2130 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
2134 SystemAddress = extract_error_address(mci, info);
2136 amd64_mc_printk(mci, KERN_ERR,
2137 "CE ERROR_ADDRESS= 0x%llx\n", SystemAddress);
2139 pvt->ops->map_sysaddr_to_csrow(mci, info, SystemAddress);
2142 /* Handle any Un-correctable Errors (UEs) */
2143 static void amd64_handle_ue(struct mem_ctl_info *mci,
2144 struct err_regs *info)
2149 struct mem_ctl_info *log_mci, *src_mci = NULL;
2153 if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
2154 amd64_mc_printk(mci, KERN_CRIT,
2155 "HW has no ERROR_ADDRESS available\n");
2156 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2160 SystemAddress = extract_error_address(mci, info);
2163 * Find out which node the error address belongs to. This may be
2164 * different from the node that detected the error.
2166 src_mci = find_mc_by_sys_addr(mci, SystemAddress);
2168 amd64_mc_printk(mci, KERN_CRIT,
2169 "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
2170 (unsigned long)SystemAddress);
2171 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2177 csrow = sys_addr_to_csrow(log_mci, SystemAddress);
2179 amd64_mc_printk(mci, KERN_CRIT,
2180 "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
2181 (unsigned long)SystemAddress);
2182 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2184 error_address_to_page_and_offset(SystemAddress, &page, &offset);
2185 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
2189 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
2190 struct err_regs *info)
2192 u32 ec = ERROR_CODE(info->nbsl);
2193 u32 xec = EXT_ERROR_CODE(info->nbsl);
2194 int ecc_type = (info->nbsh >> 13) & 0x3;
2196 /* Bail early out if this was an 'observed' error */
2197 if (PP(ec) == K8_NBSL_PP_OBS)
2200 /* Do only ECC errors */
2201 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2205 amd64_handle_ce(mci, info);
2206 else if (ecc_type == 1)
2207 amd64_handle_ue(mci, info);
2210 * If main error is CE then overflow must be CE. If main error is UE
2211 * then overflow is unknown. We'll call the overflow a CE - if
2212 * panic_on_ue is set then we're already panic'ed and won't arrive
2213 * here. Else, then apparently someone doesn't think that UE's are
2216 if (info->nbsh & K8_NBSH_OVERFLOW)
2217 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR "Error Overflow");
2220 void amd64_decode_bus_error(int node_id, struct err_regs *regs)
2222 struct mem_ctl_info *mci = mci_lookup[node_id];
2224 __amd64_decode_bus_error(mci, regs);
2227 * Check the UE bit of the NB status high register, if set generate some
2228 * logs. If NOT a GART error, then process the event as a NO-INFO event.
2229 * If it was a GART error, skip that process.
2231 * FIXME: this should go somewhere else, if at all.
2233 if (regs->nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
2234 edac_mc_handle_ue_no_info(mci, "UE bit is set");
2239 * The main polling 'check' function, called FROM the edac core to perform the
2240 * error checking and if an error is encountered, error processing.
2242 static void amd64_check(struct mem_ctl_info *mci)
2244 struct err_regs regs;
2246 if (amd64_get_error_info(mci, ®s)) {
2247 struct amd64_pvt *pvt = mci->pvt_info;
2248 amd_decode_nb_mce(pvt->mc_node_id, ®s, 1);
2254 * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer
2255 * 2) AMD Family index value
2258 * Upon return of 0, the following filled in:
2260 * struct pvt->addr_f1_ctl
2261 * struct pvt->misc_f3_ctl
2263 * Filled in with related device funcitions of 'dram_f2_ctl'
2264 * These devices are "reserved" via the pci_get_device()
2266 * Upon return of 1 (error status):
2270 static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx)
2272 const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx];
2274 /* Reserve the ADDRESS MAP Device */
2275 pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2276 amd64_dev->addr_f1_ctl,
2279 if (!pvt->addr_f1_ctl) {
2280 amd64_printk(KERN_ERR, "error address map device not found: "
2281 "vendor %x device 0x%x (broken BIOS?)\n",
2282 PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl);
2286 /* Reserve the MISC Device */
2287 pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2288 amd64_dev->misc_f3_ctl,
2291 if (!pvt->misc_f3_ctl) {
2292 pci_dev_put(pvt->addr_f1_ctl);
2293 pvt->addr_f1_ctl = NULL;
2295 amd64_printk(KERN_ERR, "error miscellaneous device not found: "
2296 "vendor %x device 0x%x (broken BIOS?)\n",
2297 PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl);
2301 debugf1(" Addr Map device PCI Bus ID:\t%s\n",
2302 pci_name(pvt->addr_f1_ctl));
2303 debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n",
2304 pci_name(pvt->dram_f2_ctl));
2305 debugf1(" Misc device PCI Bus ID:\t%s\n",
2306 pci_name(pvt->misc_f3_ctl));
2311 static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
2313 pci_dev_put(pvt->addr_f1_ctl);
2314 pci_dev_put(pvt->misc_f3_ctl);
2318 * Retrieve the hardware registers of the memory controller (this includes the
2319 * 'Address Map' and 'Misc' device regs)
2321 static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2327 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2328 * those are Read-As-Zero
2330 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2331 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
2333 /* check first whether TOP_MEM2 is enabled */
2334 rdmsrl(MSR_K8_SYSCFG, msr_val);
2335 if (msr_val & (1U << 21)) {
2336 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2337 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2339 debugf0(" TOP_MEM2 disabled.\n");
2341 amd64_cpu_display_info(pvt);
2343 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
2345 if (pvt->ops->read_dram_ctl_register)
2346 pvt->ops->read_dram_ctl_register(pvt);
2348 for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
2350 * Call CPU specific READ function to get the DRAM Base and
2351 * Limit values from the DCT.
2353 pvt->ops->read_dram_base_limit(pvt, dram);
2356 * Only print out debug info on rows with both R and W Enabled.
2357 * Normal processing, compiler should optimize this whole 'if'
2358 * debug output block away.
2360 if (pvt->dram_rw_en[dram] != 0) {
2361 debugf1(" DRAM-BASE[%d]: 0x%016llx "
2362 "DRAM-LIMIT: 0x%016llx\n",
2364 pvt->dram_base[dram],
2365 pvt->dram_limit[dram]);
2367 debugf1(" IntlvEn=%s %s %s "
2368 "IntlvSel=%d DstNode=%d\n",
2369 pvt->dram_IntlvEn[dram] ?
2370 "Enabled" : "Disabled",
2371 (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
2372 (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
2373 pvt->dram_IntlvSel[dram],
2374 pvt->dram_DstNode[dram]);
2378 amd64_read_dct_base_mask(pvt);
2380 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
2381 amd64_read_dbam_reg(pvt);
2383 amd64_read_pci_cfg(pvt->misc_f3_ctl,
2384 F10_ONLINE_SPARE, &pvt->online_spare);
2386 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
2387 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
2389 if (!dct_ganging_enabled(pvt)) {
2390 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
2391 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1);
2393 amd64_dump_misc_regs(pvt);
2397 * NOTE: CPU Revision Dependent code
2400 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
2401 * k8 private pointer to -->
2402 * DRAM Bank Address mapping register
2404 * DCL register where dual_channel_active is
2406 * The DBAM register consists of 4 sets of 4 bits each definitions:
2409 * 0-3 CSROWs 0 and 1
2410 * 4-7 CSROWs 2 and 3
2411 * 8-11 CSROWs 4 and 5
2412 * 12-15 CSROWs 6 and 7
2414 * Values range from: 0 to 15
2415 * The meaning of the values depends on CPU revision and dual-channel state,
2416 * see relevant BKDG more info.
2418 * The memory controller provides for total of only 8 CSROWs in its current
2419 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2420 * single channel or two (2) DIMMs in dual channel mode.
2422 * The following code logic collapses the various tables for CSROW based on CPU
2426 * The number of PAGE_SIZE pages on the specified CSROW number it
2430 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2432 u32 dram_map, nr_pages;
2435 * The math on this doesn't look right on the surface because x/2*4 can
2436 * be simplified to x*2 but this expression makes use of the fact that
2437 * it is integral math where 1/2=0. This intermediate value becomes the
2438 * number of bits to shift the DBAM register to extract the proper CSROW
2441 dram_map = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2443 nr_pages = pvt->ops->dbam_map_to_pages(pvt, dram_map);
2446 * If dual channel then double the memory size of single channel.
2447 * Channel count is 1 or 2
2449 nr_pages <<= (pvt->channel_count - 1);
2451 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, dram_map);
2452 debugf0(" nr_pages= %u channel-count = %d\n",
2453 nr_pages, pvt->channel_count);
2459 * Initialize the array of csrow attribute instances, based on the values
2460 * from pci config hardware registers.
2462 static int amd64_init_csrows(struct mem_ctl_info *mci)
2464 struct csrow_info *csrow;
2465 struct amd64_pvt *pvt;
2466 u64 input_addr_min, input_addr_max, sys_addr;
2469 pvt = mci->pvt_info;
2471 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
2473 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
2474 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2475 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
2478 for (i = 0; i < pvt->cs_count; i++) {
2479 csrow = &mci->csrows[i];
2481 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
2482 debugf1("----CSROW %d EMPTY for node %d\n", i,
2487 debugf1("----CSROW %d VALID for MC node %d\n",
2488 i, pvt->mc_node_id);
2491 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2492 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2493 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2494 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2495 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2496 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2497 csrow->page_mask = ~mask_from_dct_mask(pvt, i);
2498 /* 8 bytes of resolution */
2500 csrow->mtype = amd64_determine_memory_type(pvt);
2502 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2503 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2504 (unsigned long)input_addr_min,
2505 (unsigned long)input_addr_max);
2506 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2507 (unsigned long)sys_addr, csrow->page_mask);
2508 debugf1(" nr_pages: %u first_page: 0x%lx "
2509 "last_page: 0x%lx\n",
2510 (unsigned)csrow->nr_pages,
2511 csrow->first_page, csrow->last_page);
2514 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2516 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2518 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2519 EDAC_S4ECD4ED : EDAC_SECDED;
2521 csrow->edac_mode = EDAC_NONE;
2527 /* get all cores on this DCT */
2528 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2532 for_each_online_cpu(cpu)
2533 if (amd_get_nb_id(cpu) == nid)
2534 cpumask_set_cpu(cpu, mask);
2537 /* check MCG_CTL on all the cpus on this node */
2538 static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2542 int cpu, nbe, idx = 0;
2545 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2546 amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
2551 get_cpus_on_this_dct_cpumask(mask, nid);
2553 msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
2555 amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
2557 free_cpumask_var(mask);
2561 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2563 for_each_cpu(cpu, mask) {
2564 nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
2566 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2568 (nbe ? "enabled" : "disabled"));
2579 free_cpumask_var(mask);
2583 static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2585 cpumask_var_t cmask;
2586 struct msr *msrs = NULL;
2589 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2590 amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
2595 get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
2597 msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
2599 amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
2604 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2606 for_each_cpu(cpu, cmask) {
2609 if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
2610 pvt->flags.ecc_report = 1;
2612 msrs[idx].l |= K8_MSR_MCGCTL_NBE;
2615 * Turn off ECC reporting only when it was off before
2617 if (!pvt->flags.ecc_report)
2618 msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
2622 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2625 free_cpumask_var(cmask);
2631 * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
2634 static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2636 struct amd64_pvt *pvt = mci->pvt_info;
2637 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2639 if (!ecc_enable_override)
2642 amd64_printk(KERN_WARNING,
2643 "'ecc_enable_override' parameter is active, "
2644 "Enabling AMD ECC hardware now: CAUTION\n");
2646 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2648 /* turn on UECCn and CECCEn bits */
2649 pvt->old_nbctl = value & mask;
2650 pvt->nbctl_mcgctl_saved = 1;
2653 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2655 if (amd64_toggle_ecc_err_reporting(pvt, ON))
2656 amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
2659 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2661 debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2662 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2663 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2665 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2666 amd64_printk(KERN_WARNING,
2667 "This node reports that DRAM ECC is "
2668 "currently Disabled; ENABLING now\n");
2670 /* Attempt to turn on DRAM ECC Enable */
2671 value |= K8_NBCFG_ECC_ENABLE;
2672 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
2674 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2676 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2677 amd64_printk(KERN_WARNING,
2678 "Hardware rejects Enabling DRAM ECC checking\n"
2679 "Check memory DIMM configuration\n");
2681 amd64_printk(KERN_DEBUG,
2682 "Hardware accepted DRAM ECC Enable\n");
2685 debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2686 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2687 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2689 pvt->ctl_error_info.nbcfg = value;
2692 static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2694 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2696 if (!pvt->nbctl_mcgctl_saved)
2699 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2701 value |= pvt->old_nbctl;
2703 /* restore the NB Enable MCGCTL bit */
2704 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2706 if (amd64_toggle_ecc_err_reporting(pvt, OFF))
2707 amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
2712 * EDAC requires that the BIOS have ECC enabled before taking over the
2713 * processing of ECC errors. This is because the BIOS can properly initialize
2714 * the memory system completely. A command line option allows to force-enable
2715 * hardware ECC later in amd64_enable_ecc_error_reporting().
2717 static const char *ecc_warning =
2718 "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n"
2719 " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n"
2720 " Also, use of the override can cause unknown side effects.\n";
2722 static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2726 bool nb_mce_en = false;
2728 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2730 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2732 amd64_printk(KERN_WARNING, "This node reports that Memory ECC "
2733 "is currently disabled, set F3x%x[22] (%s).\n",
2734 K8_NBCFG, pci_name(pvt->misc_f3_ctl));
2736 amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n");
2738 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
2740 amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR "
2741 "0x%08x[4] on node %d to enable.\n",
2742 MSR_IA32_MCG_CTL, pvt->mc_node_id);
2744 if (!ecc_enabled || !nb_mce_en) {
2745 if (!ecc_enable_override) {
2746 amd64_printk(KERN_WARNING, "%s", ecc_warning);
2750 /* CLEAR the override, since BIOS controlled it */
2751 ecc_enable_override = 0;
2756 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2757 ARRAY_SIZE(amd64_inj_attrs) +
2760 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2762 static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
2764 unsigned int i = 0, j = 0;
2766 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2767 sysfs_attrs[i] = amd64_dbg_attrs[i];
2769 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2770 sysfs_attrs[i] = amd64_inj_attrs[j];
2772 sysfs_attrs[i] = terminator;
2774 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2777 static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
2779 struct amd64_pvt *pvt = mci->pvt_info;
2781 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2782 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2784 if (pvt->nbcap & K8_NBCAP_SECDED)
2785 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2787 if (pvt->nbcap & K8_NBCAP_CHIPKILL)
2788 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2790 mci->edac_cap = amd64_determine_edac_cap(pvt);
2791 mci->mod_name = EDAC_MOD_STR;
2792 mci->mod_ver = EDAC_AMD64_VERSION;
2793 mci->ctl_name = get_amd_family_name(pvt->mc_type_index);
2794 mci->dev_name = pci_name(pvt->dram_f2_ctl);
2795 mci->ctl_page_to_phys = NULL;
2797 /* IMPORTANT: Set the polling 'check' function in this module */
2798 mci->edac_check = amd64_check;
2800 /* memory scrubber interface */
2801 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2802 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2806 * Init stuff for this DRAM Controller device.
2808 * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration
2809 * Space feature MUST be enabled on ALL Processors prior to actually reading
2810 * from the ECS registers. Since the loading of the module can occur on any
2811 * 'core', and cores don't 'see' all the other processors ECS data when the
2812 * others are NOT enabled. Our solution is to first enable ECS access in this
2813 * routine on all processors, gather some data in a amd64_pvt structure and
2814 * later come back in a finish-setup function to perform that final
2815 * initialization. See also amd64_init_2nd_stage() for that.
2817 static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
2820 struct amd64_pvt *pvt = NULL;
2824 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2828 pvt->mc_node_id = get_node_id(dram_f2_ctl);
2830 pvt->dram_f2_ctl = dram_f2_ctl;
2831 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2832 pvt->mc_type_index = mc_type_index;
2833 pvt->ops = family_ops(mc_type_index);
2836 * We have the dram_f2_ctl device as an argument, now go reserve its
2837 * sibling devices from the PCI system.
2840 err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index);
2845 err = amd64_check_ecc_enabled(pvt);
2850 * Key operation here: setup of HW prior to performing ops on it. Some
2851 * setup is required to access ECS data. After this is performed, the
2852 * 'teardown' function must be called upon error and normal exit paths.
2854 if (boot_cpu_data.x86 >= 0x10)
2858 * Save the pointer to the private data for use in 2nd initialization
2861 pvt_lookup[pvt->mc_node_id] = pvt;
2866 amd64_free_mc_sibling_devices(pvt);
2876 * This is the finishing stage of the init code. Needs to be performed after all
2877 * MCs' hardware have been prepped for accessing extended config space.
2879 static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2881 int node_id = pvt->mc_node_id;
2882 struct mem_ctl_info *mci;
2885 amd64_read_mc_registers(pvt);
2888 if (pvt->ops->probe_valid_hardware) {
2889 err = pvt->ops->probe_valid_hardware(pvt);
2895 * We need to determine how many memory channels there are. Then use
2896 * that information for calculating the size of the dynamic instance
2897 * tables in the 'mci' structure
2899 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2900 if (pvt->channel_count < 0)
2904 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
2908 mci->pvt_info = pvt;
2910 mci->dev = &pvt->dram_f2_ctl->dev;
2911 amd64_setup_mci_misc_attributes(mci);
2913 if (amd64_init_csrows(mci))
2914 mci->edac_cap = EDAC_FLAG_NONE;
2916 amd64_enable_ecc_error_reporting(mci);
2917 amd64_set_mc_sysfs_attributes(mci);
2920 if (edac_mc_add_mc(mci)) {
2921 debugf1("failed edac_mc_add_mc()\n");
2925 mci_lookup[node_id] = mci;
2926 pvt_lookup[node_id] = NULL;
2928 /* register stuff with EDAC MCE */
2929 if (report_gart_errors)
2930 amd_report_gart_errors(true);
2932 amd_register_ecc_decoder(amd64_decode_bus_error);
2940 debugf0("failure to init 2nd stage: ret=%d\n", ret);
2942 amd64_restore_ecc_error_reporting(pvt);
2944 if (boot_cpu_data.x86 > 0xf)
2945 amd64_teardown(pvt);
2947 amd64_free_mc_sibling_devices(pvt);
2949 kfree(pvt_lookup[pvt->mc_node_id]);
2950 pvt_lookup[node_id] = NULL;
2956 static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
2957 const struct pci_device_id *mc_type)
2961 debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev),
2962 get_amd_family_name(mc_type->driver_data));
2964 ret = pci_enable_device(pdev);
2968 ret = amd64_probe_one_instance(pdev, mc_type->driver_data);
2971 debugf0("ret=%d\n", ret);
2976 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2978 struct mem_ctl_info *mci;
2979 struct amd64_pvt *pvt;
2981 /* Remove from EDAC CORE tracking list */
2982 mci = edac_mc_del_mc(&pdev->dev);
2986 pvt = mci->pvt_info;
2988 amd64_restore_ecc_error_reporting(pvt);
2990 if (boot_cpu_data.x86 > 0xf)
2991 amd64_teardown(pvt);
2993 amd64_free_mc_sibling_devices(pvt);
2996 mci->pvt_info = NULL;
2998 mci_lookup[pvt->mc_node_id] = NULL;
3000 /* unregister from EDAC MCE */
3001 amd_report_gart_errors(false);
3002 amd_unregister_ecc_decoder(amd64_decode_bus_error);
3004 /* Free the EDAC CORE resources */
3009 * This table is part of the interface for loading drivers for PCI devices. The
3010 * PCI core identifies what devices are on a system during boot, and then
3011 * inquiry this table to see if this driver is for a given device found.
3013 static const struct pci_device_id amd64_pci_table[] __devinitdata = {
3015 .vendor = PCI_VENDOR_ID_AMD,
3016 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
3017 .subvendor = PCI_ANY_ID,
3018 .subdevice = PCI_ANY_ID,
3021 .driver_data = K8_CPUS
3024 .vendor = PCI_VENDOR_ID_AMD,
3025 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
3026 .subvendor = PCI_ANY_ID,
3027 .subdevice = PCI_ANY_ID,
3030 .driver_data = F10_CPUS
3033 .vendor = PCI_VENDOR_ID_AMD,
3034 .device = PCI_DEVICE_ID_AMD_11H_NB_DRAM,
3035 .subvendor = PCI_ANY_ID,
3036 .subdevice = PCI_ANY_ID,
3039 .driver_data = F11_CPUS
3043 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
3045 static struct pci_driver amd64_pci_driver = {
3046 .name = EDAC_MOD_STR,
3047 .probe = amd64_init_one_instance,
3048 .remove = __devexit_p(amd64_remove_one_instance),
3049 .id_table = amd64_pci_table,
3052 static void amd64_setup_pci_device(void)
3054 struct mem_ctl_info *mci;
3055 struct amd64_pvt *pvt;
3060 mci = mci_lookup[0];
3063 pvt = mci->pvt_info;
3065 edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev,
3068 if (!amd64_ctl_pci) {
3069 pr_warning("%s(): Unable to create PCI control\n",
3072 pr_warning("%s(): PCI error report via EDAC not set\n",
3078 static int __init amd64_edac_init(void)
3080 int nb, err = -ENODEV;
3082 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
3086 if (cache_k8_northbridges() < 0)
3089 err = pci_register_driver(&amd64_pci_driver);
3094 * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
3095 * amd64_pvt structs. These will be used in the 2nd stage init function
3096 * to finish initialization of the MC instances.
3098 for (nb = 0; nb < num_k8_northbridges; nb++) {
3099 if (!pvt_lookup[nb])
3102 err = amd64_init_2nd_stage(pvt_lookup[nb]);
3107 amd64_setup_pci_device();
3112 debugf0("2nd stage failed\n");
3113 pci_unregister_driver(&amd64_pci_driver);
3118 static void __exit amd64_edac_exit(void)
3121 edac_pci_release_generic_ctl(amd64_ctl_pci);
3123 pci_unregister_driver(&amd64_pci_driver);
3126 module_init(amd64_edac_init);
3127 module_exit(amd64_edac_exit);
3129 MODULE_LICENSE("GPL");
3130 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3131 "Dave Peterson, Thayne Harbaugh");
3132 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3133 EDAC_AMD64_VERSION);
3135 module_param(edac_op_state, int, 0444);
3136 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");