summaryrefslogtreecommitdiff
path: root/drivers/edac/amd64_edac.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac/amd64_edac.c')
-rw-r--r--drivers/edac/amd64_edac.c2310
1 files changed, 1502 insertions, 808 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 3aea55698165..2391f3469961 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,11 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/ras.h>
+#include <linux/string_choices.h>
#include "amd64_edac.h"
-#include <asm/amd_nb.h>
+#include <asm/amd/nb.h>
+#include <asm/amd/node.h>
static struct edac_pci_ctl_info *pci_ctl;
-static int report_gart_errors;
-module_param(report_gart_errors, int, 0644);
-
/*
* Set by command line parameter. If BIOS has enabled the ECC, this override is
* cleared to prevent re-enabling the hardware by this driver.
@@ -15,9 +16,26 @@ module_param(ecc_enable_override, int, 0644);
static struct msr __percpu *msrs;
+static inline u32 get_umc_reg(struct amd64_pvt *pvt, u32 reg)
+{
+ if (!pvt->flags.zn_regs_v2)
+ return reg;
+
+ switch (reg) {
+ case UMCCH_ADDR_MASK_SEC: return UMCCH_ADDR_MASK_SEC_DDR5;
+ case UMCCH_DIMM_CFG: return UMCCH_DIMM_CFG_DDR5;
+ }
+
+ WARN_ONCE(1, "%s: unknown register 0x%x", __func__, reg);
+ return 0;
+}
+
/* Per-node stuff */
static struct ecc_settings **ecc_stngs;
+/* Device for the PCI component */
+static struct device *pci_ctl_dev;
+
/*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
@@ -64,7 +82,7 @@ int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
amd64_warn("%s: error reading F%dx%03x.\n",
func, PCI_FUNC(pdev->devfn), offset);
- return err;
+ return pcibios_err_to_errno(err);
}
int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
@@ -77,7 +95,7 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
amd64_warn("%s: error writing to F%dx%03x.\n",
func, PCI_FUNC(pdev->devfn), offset);
- return err;
+ return pcibios_err_to_errno(err);
}
/*
@@ -164,21 +182,6 @@ static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
* other archs, we might not have access to the caches directly.
*/
-static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
-{
- /*
- * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
- * are shifted down by 0x5, so scrubval 0x5 is written to the register
- * as 0x0, scrubval 0x6 as 0x1, etc.
- */
- if (scrubval >= 0x5 && scrubval <= 0x14) {
- scrubval -= 0x5;
- pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
- pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
- } else {
- pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
- }
-}
/*
* Scan the scrub rate mapping table for a close or matching bandwidth value to
* issue. If requested is too big, then use last maximum value found.
@@ -211,9 +214,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- if (pvt->fam == 0x17) {
- __f17h_set_scrubval(pvt, scrubval);
- } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
+ if (pvt->fam == 0x15 && pvt->model == 0x60) {
f15h_select_dct(pvt, 0);
pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
f15h_select_dct(pvt, 1);
@@ -253,30 +254,17 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
int i, retval = -EINVAL;
u32 scrubval = 0;
- switch (pvt->fam) {
- case 0x15:
+ if (pvt->fam == 0x15) {
/* Erratum #505 */
if (pvt->model < 0x10)
f15h_select_dct(pvt, 0);
if (pvt->model == 0x60)
amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
- break;
-
- case 0x17:
- amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
- if (scrubval & BIT(0)) {
- amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
- scrubval &= 0xF;
- scrubval += 0x5;
- } else {
- scrubval = 0;
- }
- break;
-
- default:
+ else
+ amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
+ } else {
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
- break;
}
scrubval = scrubval & 0x001F;
@@ -448,6 +436,9 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
#define for_each_chip_select_mask(i, dct, pvt) \
for (i = 0; i < pvt->csels[dct].m_cnt; i++)
+#define for_each_umc(i) \
+ for (i = 0; i < pvt->max_mcs; i++)
+
/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
* csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
@@ -498,8 +489,8 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
* complete 32-bit values despite the fact that the bitfields in the DHAR
* only represent bits 31-24 of the base and offset values.
*/
-int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
- u64 *hole_offset, u64 *hole_size)
+static int get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
+ u64 *hole_offset, u64 *hole_size)
{
struct amd64_pvt *pvt = mci->pvt_info;
@@ -552,7 +543,287 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
return 0;
}
-EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
+
+#ifdef CONFIG_EDAC_DEBUG
+#define EDAC_DCT_ATTR_SHOW(reg) \
+static ssize_t reg##_show(struct device *dev, \
+ struct device_attribute *mattr, char *data) \
+{ \
+ struct mem_ctl_info *mci = to_mci(dev); \
+ struct amd64_pvt *pvt = mci->pvt_info; \
+ \
+ return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
+}
+
+EDAC_DCT_ATTR_SHOW(dhar);
+EDAC_DCT_ATTR_SHOW(dbam0);
+EDAC_DCT_ATTR_SHOW(top_mem);
+EDAC_DCT_ATTR_SHOW(top_mem2);
+
+static ssize_t dram_hole_show(struct device *dev, struct device_attribute *mattr,
+ char *data)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+
+ u64 hole_base = 0;
+ u64 hole_offset = 0;
+ u64 hole_size = 0;
+
+ get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
+
+ return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
+ hole_size);
+}
+
+/*
+ * update NUM_DBG_ATTRS in case you add new members
+ */
+static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
+static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
+static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
+static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
+static DEVICE_ATTR_RO(dram_hole);
+
+static struct attribute *dbg_attrs[] = {
+ &dev_attr_dhar.attr,
+ &dev_attr_dbam.attr,
+ &dev_attr_topmem.attr,
+ &dev_attr_topmem2.attr,
+ &dev_attr_dram_hole.attr,
+ NULL
+};
+
+static const struct attribute_group dbg_group = {
+ .attrs = dbg_attrs,
+};
+
+static ssize_t inject_section_show(struct device *dev,
+ struct device_attribute *mattr, char *buf)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ return sprintf(buf, "0x%x\n", pvt->injection.section);
+}
+
+/*
+ * store error injection section value which refers to one of 4 16-byte sections
+ * within a 64-byte cacheline
+ *
+ * range: 0..3
+ */
+static ssize_t inject_section_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(data, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value > 3) {
+ amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
+ return -EINVAL;
+ }
+
+ pvt->injection.section = (u32) value;
+ return count;
+}
+
+static ssize_t inject_word_show(struct device *dev,
+ struct device_attribute *mattr, char *buf)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ return sprintf(buf, "0x%x\n", pvt->injection.word);
+}
+
+/*
+ * store error injection word value which refers to one of 9 16-bit word of the
+ * 16-byte (128-bit + ECC bits) section
+ *
+ * range: 0..8
+ */
+static ssize_t inject_word_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(data, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value > 8) {
+ amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
+ return -EINVAL;
+ }
+
+ pvt->injection.word = (u32) value;
+ return count;
+}
+
+static ssize_t inject_ecc_vector_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *buf)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
+}
+
+/*
+ * store 16 bit error injection vector which enables injecting errors to the
+ * corresponding bit within the error injection word above. When used during a
+ * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
+ */
+static ssize_t inject_ecc_vector_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(data, 16, &value);
+ if (ret < 0)
+ return ret;
+
+ if (value & 0xFFFF0000) {
+ amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
+ return -EINVAL;
+ }
+
+ pvt->injection.bit_map = (u32) value;
+ return count;
+}
+
+/*
+ * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
+ * fields needed by the injection registers and read the NB Array Data Port.
+ */
+static ssize_t inject_read_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ u32 section, word_bits;
+ int ret;
+
+ ret = kstrtoul(data, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ /* Form value to choose 16-byte section of cacheline */
+ section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
+
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
+
+ word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
+
+ /* Issue 'word' and 'bit' along with the READ request */
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+
+ edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
+
+ return count;
+}
+
+/*
+ * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
+ * fields needed by the injection registers.
+ */
+static ssize_t inject_write_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+ u32 section, word_bits, tmp;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(data, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ /* Form value to choose 16-byte section of cacheline */
+ section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
+
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
+
+ word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
+
+ pr_notice_once("Don't forget to decrease MCE polling interval in\n"
+ "/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
+ "so that you can get the error report faster.\n");
+
+ on_each_cpu(disable_caches, NULL, 1);
+
+ /* Issue 'word' and 'bit' along with the READ request */
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+
+ retry:
+ /* wait until injection happens */
+ amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
+ if (tmp & F10_NB_ARR_ECC_WR_REQ) {
+ cpu_relax();
+ goto retry;
+ }
+
+ on_each_cpu(enable_caches, NULL, 1);
+
+ edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
+
+ return count;
+}
+
+/*
+ * update NUM_INJ_ATTRS in case you add new members
+ */
+
+static DEVICE_ATTR_RW(inject_section);
+static DEVICE_ATTR_RW(inject_word);
+static DEVICE_ATTR_RW(inject_ecc_vector);
+static DEVICE_ATTR_WO(inject_write);
+static DEVICE_ATTR_WO(inject_read);
+
+static struct attribute *inj_attrs[] = {
+ &dev_attr_inject_section.attr,
+ &dev_attr_inject_word.attr,
+ &dev_attr_inject_ecc_vector.attr,
+ &dev_attr_inject_write.attr,
+ &dev_attr_inject_read.attr,
+ NULL
+};
+
+static umode_t inj_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ /* Families which have that injection hw */
+ if (pvt->fam >= 0x10 && pvt->fam <= 0x16)
+ return attr->mode;
+
+ return 0;
+}
+
+static const struct attribute_group inj_group = {
+ .attrs = inj_attrs,
+ .is_visible = inj_is_visible,
+};
+#endif /* CONFIG_EDAC_DEBUG */
/*
* Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
@@ -591,8 +862,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
dram_base = get_dram_base(pvt, pvt->mc_node_id);
- ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
- &hole_size);
+ ret = get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
if (!ret) {
if ((sys_addr >= (1ULL << 32)) &&
(sys_addr < ((1ULL << 32) + hole_size))) {
@@ -707,46 +977,186 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
return csrow;
}
+/*
+ * See AMD PPR DF::LclNodeTypeMap
+ *
+ * This register gives information for nodes of the same type within a system.
+ *
+ * Reading this register from a GPU node will tell how many GPU nodes are in the
+ * system and what the lowest AMD Node ID value is for the GPU nodes. Use this
+ * info to fixup the Linux logical "Node ID" value set in the AMD NB code and EDAC.
+ */
+static struct local_node_map {
+ u16 node_count;
+ u16 base_node_id;
+} gpu_node_map;
+
+#define PCI_DEVICE_ID_AMD_MI200_DF_F1 0x14d1
+#define REG_LOCAL_NODE_TYPE_MAP 0x144
+
+/* Local Node Type Map (LNTM) fields */
+#define LNTM_NODE_COUNT GENMASK(27, 16)
+#define LNTM_BASE_NODE_ID GENMASK(11, 0)
+
+static int gpu_get_node_map(struct amd64_pvt *pvt)
+{
+ struct pci_dev *pdev;
+ int ret;
+ u32 tmp;
+
+ /*
+ * Mapping of nodes from hardware-provided AMD Node ID to a
+ * Linux logical one is applicable for MI200 models. Therefore,
+ * return early for other heterogeneous systems.
+ */
+ if (pvt->F3->device != PCI_DEVICE_ID_AMD_MI200_DF_F3)
+ return 0;
+
+ /*
+ * Node ID 0 is reserved for CPUs. Therefore, a non-zero Node ID
+ * means the values have been already cached.
+ */
+ if (gpu_node_map.base_node_id)
+ return 0;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F1, NULL);
+ if (!pdev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp);
+ if (ret) {
+ ret = pcibios_err_to_errno(ret);
+ goto out;
+ }
+
+ gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp);
+ gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp);
+
+out:
+ pci_dev_put(pdev);
+ return ret;
+}
+
+static int fixup_node_id(int node_id, struct mce *m)
+{
+ /* MCA_IPID[InstanceIdHi] give the AMD Node ID for the bank. */
+ u8 nid = (m->ipid >> 44) & 0xF;
+
+ if (smca_get_bank_type(m->extcpu, m->bank) != SMCA_UMC_V2)
+ return node_id;
+
+ /* Nodes below the GPU base node are CPU nodes and don't need a fixup. */
+ if (nid < gpu_node_map.base_node_id)
+ return node_id;
+
+ /* Convert the hardware-provided AMD Node ID to a Linux logical one. */
+ return nid - gpu_node_map.base_node_id + 1;
+}
+
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
/*
* Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
* are ECC capable.
*/
-static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
+static unsigned long dct_determine_edac_cap(struct amd64_pvt *pvt)
{
unsigned long edac_cap = EDAC_FLAG_NONE;
u8 bit;
- if (pvt->umc) {
- u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
+ bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
+ ? 19
+ : 17;
- for (i = 0; i < NUM_UMCS; i++) {
- if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
- continue;
+ if (pvt->dclr0 & BIT(bit))
+ edac_cap = EDAC_FLAG_SECDED;
+
+ return edac_cap;
+}
- umc_en_mask |= BIT(i);
+static unsigned long umc_determine_edac_cap(struct amd64_pvt *pvt)
+{
+ u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
+ unsigned long edac_cap = EDAC_FLAG_NONE;
- /* UMC Configuration bit 12 (DimmEccEn) */
- if (pvt->umc[i].umc_cfg & BIT(12))
- dimm_ecc_en_mask |= BIT(i);
- }
+ for_each_umc(i) {
+ if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
+ continue;
- if (umc_en_mask == dimm_ecc_en_mask)
- edac_cap = EDAC_FLAG_SECDED;
- } else {
- bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
- ? 19
- : 17;
+ umc_en_mask |= BIT(i);
- if (pvt->dclr0 & BIT(bit))
- edac_cap = EDAC_FLAG_SECDED;
+ /* UMC Configuration bit 12 (DimmEccEn) */
+ if (pvt->umc[i].umc_cfg & BIT(12))
+ dimm_ecc_en_mask |= BIT(i);
}
+ if (umc_en_mask == dimm_ecc_en_mask)
+ edac_cap = EDAC_FLAG_SECDED;
+
return edac_cap;
}
-static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
+/*
+ * debug routine to display the memory sizes of all logical DIMMs and its
+ * CSROWs
+ */
+static void dct_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
+{
+ u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
+ u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
+ int dimm, size0, size1;
+
+ if (pvt->fam == 0xf) {
+ /* K8 families < revF not supported yet */
+ if (pvt->ext_model < K8_REV_F)
+ return;
+
+ WARN_ON(ctrl != 0);
+ }
+
+ if (pvt->fam == 0x10) {
+ dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
+ : pvt->dbam0;
+ dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
+ pvt->csels[1].csbases :
+ pvt->csels[0].csbases;
+ } else if (ctrl) {
+ dbam = pvt->dbam0;
+ dcsb = pvt->csels[1].csbases;
+ }
+ edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
+ ctrl, dbam);
+
+ edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
+
+ /* Dump memory sizes for DIMM and its CSROWs */
+ for (dimm = 0; dimm < 4; dimm++) {
+ size0 = 0;
+ if (dcsb[dimm * 2] & DCSB_CS_ENABLE)
+ /*
+ * For F15m60h, we need multiplier for LRDIMM cs_size
+ * calculation. We pass dimm value to the dbam_to_cs
+ * mapper so we can find the multiplier from the
+ * corresponding DCSM.
+ */
+ size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
+ DBAM_DIMM(dimm, dbam),
+ dimm);
+
+ size1 = 0;
+ if (dcsb[dimm * 2 + 1] & DCSB_CS_ENABLE)
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
+ DBAM_DIMM(dimm, dbam),
+ dimm);
+
+ amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
+ dimm * 2, size0,
+ dimm * 2 + 1, size1);
+ }
+}
+
static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
{
@@ -762,42 +1172,179 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
}
- edac_dbg(1, "All DIMMs support ECC:%s\n",
- (dclr & BIT(19)) ? "yes" : "no");
+ edac_dbg(1, "All DIMMs support ECC: %s\n", str_yes_no(dclr & BIT(19)));
edac_dbg(1, " PAR/ERR parity: %s\n",
- (dclr & BIT(8)) ? "enabled" : "disabled");
+ str_enabled_disabled(dclr & BIT(8)));
if (pvt->fam == 0x10)
edac_dbg(1, " DCT 128bit mode width: %s\n",
(dclr & BIT(11)) ? "128b" : "64b");
edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
- (dclr & BIT(12)) ? "yes" : "no",
- (dclr & BIT(13)) ? "yes" : "no",
- (dclr & BIT(14)) ? "yes" : "no",
- (dclr & BIT(15)) ? "yes" : "no");
+ str_yes_no(dclr & BIT(12)),
+ str_yes_no(dclr & BIT(13)),
+ str_yes_no(dclr & BIT(14)),
+ str_yes_no(dclr & BIT(15)));
}
-static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
+#define CS_EVEN_PRIMARY BIT(0)
+#define CS_ODD_PRIMARY BIT(1)
+#define CS_EVEN_SECONDARY BIT(2)
+#define CS_ODD_SECONDARY BIT(3)
+#define CS_3R_INTERLEAVE BIT(4)
+
+#define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
+#define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
+
+static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
{
- int dimm, size0, size1, cs0, cs1;
+ u8 base, count = 0;
+ int cs_mode = 0;
- edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
+ if (csrow_enabled(2 * dimm, ctrl, pvt))
+ cs_mode |= CS_EVEN_PRIMARY;
- for (dimm = 0; dimm < 4; dimm++) {
- size0 = 0;
- cs0 = dimm * 2;
+ if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
+ cs_mode |= CS_ODD_PRIMARY;
- if (csrow_enabled(cs0, ctrl, pvt))
- size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0);
+ if (csrow_sec_enabled(2 * dimm, ctrl, pvt))
+ cs_mode |= CS_EVEN_SECONDARY;
- size1 = 0;
+ if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
+ cs_mode |= CS_ODD_SECONDARY;
+
+ /*
+ * 3 Rank inteleaving support.
+ * There should be only three bases enabled and their two masks should
+ * be equal.
+ */
+ for_each_chip_select(base, ctrl, pvt)
+ count += csrow_enabled(base, ctrl, pvt);
+
+ if (count == 3 &&
+ pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
+ edac_dbg(1, "3R interleaving in use.\n");
+ cs_mode |= CS_3R_INTERLEAVE;
+ }
+
+ return cs_mode;
+}
+
+static int calculate_cs_size(u32 mask, unsigned int cs_mode)
+{
+ int msb, weight, num_zero_bits;
+ u32 deinterleaved_mask;
+
+ if (!mask)
+ return 0;
+
+ /*
+ * The number of zero bits in the mask is equal to the number of bits
+ * in a full mask minus the number of bits in the current mask.
+ *
+ * The MSB is the number of bits in the full mask because BIT[0] is
+ * always 0.
+ *
+ * In the special 3 Rank interleaving case, a single bit is flipped
+ * without swapping with the most significant bit. This can be handled
+ * by keeping the MSB where it is and ignoring the single zero bit.
+ */
+ msb = fls(mask) - 1;
+ weight = hweight_long(mask);
+ num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
+
+ /* Take the number of zero bits off from the top of the mask. */
+ deinterleaved_mask = GENMASK(msb - num_zero_bits, 1);
+ edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", deinterleaved_mask);
+
+ return (deinterleaved_mask >> 2) + 1;
+}
+
+static int __addr_mask_to_cs_size(u32 addr_mask, u32 addr_mask_sec,
+ unsigned int cs_mode, int csrow_nr, int dimm)
+{
+ int size;
+
+ edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
+ edac_dbg(1, " Primary AddrMask: 0x%x\n", addr_mask);
+
+ /* Register [31:1] = Address [39:9]. Size is in kBs here. */
+ size = calculate_cs_size(addr_mask, cs_mode);
+
+ edac_dbg(1, " Secondary AddrMask: 0x%x\n", addr_mask_sec);
+ size += calculate_cs_size(addr_mask_sec, cs_mode);
+
+ /* Return size in MBs. */
+ return size >> 10;
+}
+
+static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
+ unsigned int cs_mode, int csrow_nr)
+{
+ u32 addr_mask = 0, addr_mask_sec = 0;
+ int cs_mask_nr = csrow_nr;
+ int dimm, size = 0;
+
+ /* No Chip Selects are enabled. */
+ if (!cs_mode)
+ return size;
+
+ /* Requested size of an even CS but none are enabled. */
+ if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
+ return size;
+
+ /* Requested size of an odd CS but none are enabled. */
+ if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
+ return size;
+
+ /*
+ * Family 17h introduced systems with one mask per DIMM,
+ * and two Chip Selects per DIMM.
+ *
+ * CS0 and CS1 -> MASK0 / DIMM0
+ * CS2 and CS3 -> MASK1 / DIMM1
+ *
+ * Family 19h Model 10h introduced systems with one mask per Chip Select,
+ * and two Chip Selects per DIMM.
+ *
+ * CS0 -> MASK0 -> DIMM0
+ * CS1 -> MASK1 -> DIMM0
+ * CS2 -> MASK2 -> DIMM1
+ * CS3 -> MASK3 -> DIMM1
+ *
+ * Keep the mask number equal to the Chip Select number for newer systems,
+ * and shift the mask number for older systems.
+ */
+ dimm = csrow_nr >> 1;
+
+ if (!pvt->flags.zn_regs_v2)
+ cs_mask_nr >>= 1;
+
+ if (cs_mode & (CS_EVEN_PRIMARY | CS_ODD_PRIMARY))
+ addr_mask = pvt->csels[umc].csmasks[cs_mask_nr];
+
+ if (cs_mode & (CS_EVEN_SECONDARY | CS_ODD_SECONDARY))
+ addr_mask_sec = pvt->csels[umc].csmasks_sec[cs_mask_nr];
+
+ return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, dimm);
+}
+
+static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
+{
+ int dimm, size0, size1, cs0, cs1, cs_mode;
+
+ edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
+
+ for (dimm = 0; dimm < 2; dimm++) {
+ cs0 = dimm * 2;
cs1 = dimm * 2 + 1;
- if (csrow_enabled(cs1, ctrl, pvt))
- size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
+ cs_mode = umc_get_cs_mode(dimm, ctrl, pvt);
+
+ size0 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs0);
+ size1 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs1);
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
cs0, size0,
@@ -805,61 +1352,44 @@ static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
}
}
-static void __dump_misc_regs_df(struct amd64_pvt *pvt)
+static void umc_dump_misc_regs(struct amd64_pvt *pvt)
{
struct amd64_umc *umc;
- u32 i, tmp, umc_base;
+ u32 i;
- for (i = 0; i < NUM_UMCS; i++) {
- umc_base = get_umc_base(i);
+ for_each_umc(i) {
umc = &pvt->umc[i];
edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
-
- amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
- edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
-
- amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
- edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
- i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
- (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
+ i, str_yes_no(umc->umc_cap_hi & BIT(30)),
+ str_yes_no(umc->umc_cap_hi & BIT(31)));
edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
- i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
+ i, str_yes_no(umc->umc_cfg & BIT(12)));
edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
- i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
+ i, str_yes_no(umc->dimm_cfg & BIT(6)));
edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
- i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
-
- if (pvt->dram_type == MEM_LRDDR4) {
- amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
- edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
- i, 1 << ((tmp >> 4) & 0x3));
- }
+ i, str_yes_no(umc->dimm_cfg & BIT(7)));
- debug_display_dimm_sizes_df(pvt, i);
+ umc_debug_display_dimm_sizes(pvt, i);
}
-
- edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
- pvt->dhar, dhar_base(pvt));
}
-/* Display and decode various NB registers for debug purposes. */
-static void __dump_misc_regs(struct amd64_pvt *pvt)
+static void dct_dump_misc_regs(struct amd64_pvt *pvt)
{
edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
edac_dbg(1, " NB two channel DRAM capable: %s\n",
- (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
+ str_yes_no(pvt->nbcap & NBCAP_DCT_DUAL));
edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
+ str_yes_no(pvt->nbcap & NBCAP_SECDED),
+ str_yes_no(pvt->nbcap & NBCAP_CHIPKILL));
debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
@@ -870,37 +1400,27 @@ static void __dump_misc_regs(struct amd64_pvt *pvt)
(pvt->fam == 0xf) ? k8_dhar_offset(pvt)
: f10_dhar_offset(pvt));
- debug_display_dimm_sizes(pvt, 0);
+ dct_debug_display_dimm_sizes(pvt, 0);
/* everything below this point is Fam10h and above */
if (pvt->fam == 0xf)
return;
- debug_display_dimm_sizes(pvt, 1);
+ dct_debug_display_dimm_sizes(pvt, 1);
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
-}
-/* Display and decode various NB registers for debug purposes. */
-static void dump_misc_regs(struct amd64_pvt *pvt)
-{
- if (pvt->umc)
- __dump_misc_regs_df(pvt);
- else
- __dump_misc_regs(pvt);
-
- edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
+ edac_dbg(1, " DramHoleValid: %s\n", str_yes_no(dhar_valid(pvt)));
- amd64_info("using %s syndromes.\n",
- ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
+ amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
}
/*
* See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
*/
-static void prep_chip_selects(struct amd64_pvt *pvt)
+static void dct_prep_chip_selects(struct amd64_pvt *pvt)
{
if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
@@ -914,87 +1434,160 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
}
}
-/*
- * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
- */
-static void read_dct_base_mask(struct amd64_pvt *pvt)
+static void umc_prep_chip_selects(struct amd64_pvt *pvt)
{
- int base_reg0, base_reg1, mask_reg0, mask_reg1, cs;
+ int umc;
- prep_chip_selects(pvt);
+ for_each_umc(umc) {
+ pvt->csels[umc].b_cnt = 4;
+ pvt->csels[umc].m_cnt = pvt->flags.zn_regs_v2 ? 4 : 2;
+ }
+}
- if (pvt->umc) {
- base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR;
- base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
- mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
- mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
- } else {
- base_reg0 = DCSB0;
- base_reg1 = DCSB1;
- mask_reg0 = DCSM0;
- mask_reg1 = DCSM1;
+static void umc_read_base_mask(struct amd64_pvt *pvt)
+{
+ u32 umc_base_reg, umc_base_reg_sec;
+ u32 umc_mask_reg, umc_mask_reg_sec;
+ u32 base_reg, base_reg_sec;
+ u32 mask_reg, mask_reg_sec;
+ u32 *base, *base_sec;
+ u32 *mask, *mask_sec;
+ int cs, umc;
+ u32 tmp;
+
+ for_each_umc(umc) {
+ umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
+ umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
+
+ for_each_chip_select(cs, umc, pvt) {
+ base = &pvt->csels[umc].csbases[cs];
+ base_sec = &pvt->csels[umc].csbases_sec[cs];
+
+ base_reg = umc_base_reg + (cs * 4);
+ base_reg_sec = umc_base_reg_sec + (cs * 4);
+
+ if (!amd_smn_read(pvt->mc_node_id, base_reg, &tmp)) {
+ *base = tmp;
+ edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *base, base_reg);
+ }
+
+ if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, &tmp)) {
+ *base_sec = tmp;
+ edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *base_sec, base_reg_sec);
+ }
+ }
+
+ umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
+ umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC);
+
+ for_each_chip_select_mask(cs, umc, pvt) {
+ mask = &pvt->csels[umc].csmasks[cs];
+ mask_sec = &pvt->csels[umc].csmasks_sec[cs];
+
+ mask_reg = umc_mask_reg + (cs * 4);
+ mask_reg_sec = umc_mask_reg_sec + (cs * 4);
+
+ if (!amd_smn_read(pvt->mc_node_id, mask_reg, &tmp)) {
+ *mask = tmp;
+ edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *mask, mask_reg);
+ }
+
+ if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, &tmp)) {
+ *mask_sec = tmp;
+ edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *mask_sec, mask_reg_sec);
+ }
+ }
}
+}
+
+/*
+ * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
+ */
+static void dct_read_base_mask(struct amd64_pvt *pvt)
+{
+ int cs;
for_each_chip_select(cs, 0, pvt) {
- int reg0 = base_reg0 + (cs * 4);
- int reg1 = base_reg1 + (cs * 4);
+ int reg0 = DCSB0 + (cs * 4);
+ int reg1 = DCSB1 + (cs * 4);
u32 *base0 = &pvt->csels[0].csbases[cs];
u32 *base1 = &pvt->csels[1].csbases[cs];
- if (pvt->umc) {
- if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
- edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n",
- cs, *base0, reg0);
+ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
+ edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
+ cs, *base0, reg0);
- if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
- edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n",
- cs, *base1, reg1);
- } else {
- if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
- edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, *base0, reg0);
-
- if (pvt->fam == 0xf)
- continue;
+ if (pvt->fam == 0xf)
+ continue;
- if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
- edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, *base1, (pvt->fam == 0x10) ? reg1
- : reg0);
- }
+ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
+ edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, (pvt->fam == 0x10) ? reg1
+ : reg0);
}
for_each_chip_select_mask(cs, 0, pvt) {
- int reg0 = mask_reg0 + (cs * 4);
- int reg1 = mask_reg1 + (cs * 4);
+ int reg0 = DCSM0 + (cs * 4);
+ int reg1 = DCSM1 + (cs * 4);
u32 *mask0 = &pvt->csels[0].csmasks[cs];
u32 *mask1 = &pvt->csels[1].csmasks[cs];
- if (pvt->umc) {
- if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
- edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n",
- cs, *mask0, reg0);
+ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
+ edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask0, reg0);
- if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
- edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n",
- cs, *mask1, reg1);
- } else {
- if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
- edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, *mask0, reg0);
+ if (pvt->fam == 0xf)
+ continue;
- if (pvt->fam == 0xf)
- continue;
+ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
+ edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, (pvt->fam == 0x10) ? reg1
+ : reg0);
+ }
+}
+
+static void umc_determine_memory_type(struct amd64_pvt *pvt)
+{
+ struct amd64_umc *umc;
+ u32 i;
+
+ for_each_umc(i) {
+ umc = &pvt->umc[i];
+
+ if (!(umc->sdp_ctrl & UMC_SDP_INIT)) {
+ umc->dram_type = MEM_EMPTY;
+ continue;
+ }
- if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
- edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, *mask1, (pvt->fam == 0x10) ? reg1
- : reg0);
+ /*
+ * Check if the system supports the "DDR Type" field in UMC Config
+ * and has DDR5 DIMMs in use.
+ */
+ if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
+ if (umc->dimm_cfg & BIT(5))
+ umc->dram_type = MEM_LRDDR5;
+ else if (umc->dimm_cfg & BIT(4))
+ umc->dram_type = MEM_RDDR5;
+ else
+ umc->dram_type = MEM_DDR5;
+ } else {
+ if (umc->dimm_cfg & BIT(5))
+ umc->dram_type = MEM_LRDDR4;
+ else if (umc->dimm_cfg & BIT(4))
+ umc->dram_type = MEM_RDDR4;
+ else
+ umc->dram_type = MEM_DDR4;
}
+
+ edac_dbg(1, " UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]);
}
}
-static void determine_memory_type(struct amd64_pvt *pvt)
+static void dct_determine_memory_type(struct amd64_pvt *pvt)
{
u32 dram_ctrl, dcsm;
@@ -1043,47 +1636,22 @@ static void determine_memory_type(struct amd64_pvt *pvt)
case 0x16:
goto ddr3;
- case 0x17:
- if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
- pvt->dram_type = MEM_LRDDR4;
- else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
- pvt->dram_type = MEM_RDDR4;
- else
- pvt->dram_type = MEM_DDR4;
- return;
-
default:
WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
pvt->dram_type = MEM_EMPTY;
}
+
+ edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
return;
ddr3:
pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
}
-/* Get the number of DCT channels the memory controller is using. */
-static int k8_early_channel_count(struct amd64_pvt *pvt)
-{
- int flag;
-
- if (pvt->ext_model >= K8_REV_F)
- /* RevF (NPT) and later */
- flag = pvt->dclr0 & WIDTH_128;
- else
- /* RevE and earlier */
- flag = pvt->dclr0 & REVE_WIDTH_128;
-
- /* not used */
- pvt->dclr1 = 0;
-
- return (flag) ? 2 : 1;
-}
-
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
{
- u16 mce_nid = amd_get_nb_id(m->extcpu);
+ u16 mce_nid = topology_amd_node_id(m->extcpu);
struct mem_ctl_info *mci;
u8 start_bit = 1;
u8 end_bit = 47;
@@ -1331,69 +1899,6 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
}
}
-/*
- * Get the number of DCT channels in use.
- *
- * Return:
- * number of Memory Channels in operation
- * Pass back:
- * contents of the DCL0_LOW register
- */
-static int f1x_early_channel_count(struct amd64_pvt *pvt)
-{
- int i, j, channels = 0;
-
- /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
- if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
- return 2;
-
- /*
- * Need to check if in unganged mode: In such, there are 2 channels,
- * but they are not in 128 bit mode and thus the above 'dclr0' status
- * bit will be OFF.
- *
- * Need to check DCT0[0] and DCT1[0] to see if only one of them has
- * their CSEnable bit on. If so, then SINGLE DIMM case.
- */
- edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
-
- /*
- * Check DRAM Bank Address Mapping values for each DIMM to see if there
- * is more than just one DIMM present in unganged mode. Need to check
- * both controllers since DIMMs can be placed in either one.
- */
- for (i = 0; i < 2; i++) {
- u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
-
- for (j = 0; j < 4; j++) {
- if (DBAM_DIMM(j, dbam) > 0) {
- channels++;
- break;
- }
- }
- }
-
- if (channels > 2)
- channels = 2;
-
- amd64_info("MCT channel count: %d\n", channels);
-
- return channels;
-}
-
-static int f17_early_channel_count(struct amd64_pvt *pvt)
-{
- int i, channels = 0;
-
- /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
- for (i = 0; i < NUM_UMCS; i++)
- channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
-
- amd64_info("MCT channel count: %d\n", channels);
-
- return channels;
-}
-
static int ddr3_cs_size(unsigned i, bool dct_width)
{
unsigned shift = 0;
@@ -1521,23 +2026,6 @@ static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
return ddr3_cs_size(cs_mode, false);
}
-static int f17_base_addr_to_cs_size(struct amd64_pvt *pvt, u8 umc,
- unsigned int cs_mode, int csrow_nr)
-{
- u32 base_addr = pvt->csels[umc].csbases[csrow_nr];
-
- /* Each mask is used for every two base addresses. */
- u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr >> 1];
-
- /* Register [31:1] = Address [39:9]. Size is in kBs here. */
- u32 size = ((addr_mask >> 1) - (base_addr >> 1) + 1) >> 1;
-
- edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr, addr_mask);
-
- /* Return size in MBs. */
- return size >> 10;
-}
-
static void read_dram_ctl_register(struct amd64_pvt *pvt)
{
@@ -1553,15 +2041,15 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
if (!dct_ganging_enabled(pvt))
edac_dbg(0, " Address range split per DCT: %s\n",
- (dct_high_range_enabled(pvt) ? "yes" : "no"));
+ str_yes_no(dct_high_range_enabled(pvt)));
edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
- (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
- (dct_memory_cleared(pvt) ? "yes" : "no"));
+ str_enabled_disabled(dct_data_intlv_enabled(pvt)),
+ str_yes_no(dct_memory_cleared(pvt)));
edac_dbg(0, " channel interleave: %s, "
"interleave bits selector: 0x%x\n",
- (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
+ str_enabled_disabled(dct_interleave_enabled(pvt)),
dct_sel_interleave_addr(pvt));
}
@@ -2061,148 +2549,6 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
}
/*
- * debug routine to display the memory sizes of all logical DIMMs and its
- * CSROWs
- */
-static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
-{
- int dimm, size0, size1;
- u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
- u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
-
- if (pvt->fam == 0xf) {
- /* K8 families < revF not supported yet */
- if (pvt->ext_model < K8_REV_F)
- return;
- else
- WARN_ON(ctrl != 0);
- }
-
- if (pvt->fam == 0x10) {
- dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
- : pvt->dbam0;
- dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
- pvt->csels[1].csbases :
- pvt->csels[0].csbases;
- } else if (ctrl) {
- dbam = pvt->dbam0;
- dcsb = pvt->csels[1].csbases;
- }
- edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
- ctrl, dbam);
-
- edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
-
- /* Dump memory sizes for DIMM and its CSROWs */
- for (dimm = 0; dimm < 4; dimm++) {
-
- size0 = 0;
- if (dcsb[dimm*2] & DCSB_CS_ENABLE)
- /*
- * For F15m60h, we need multiplier for LRDIMM cs_size
- * calculation. We pass dimm value to the dbam_to_cs
- * mapper so we can find the multiplier from the
- * corresponding DCSM.
- */
- size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
- DBAM_DIMM(dimm, dbam),
- dimm);
-
- size1 = 0;
- if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
- size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
- DBAM_DIMM(dimm, dbam),
- dimm);
-
- amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
- dimm * 2, size0,
- dimm * 2 + 1, size1);
- }
-}
-
-static struct amd64_family_type family_types[] = {
- [K8_CPUS] = {
- .ctl_name = "K8",
- .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
- .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
- .ops = {
- .early_channel_count = k8_early_channel_count,
- .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
- .dbam_to_cs = k8_dbam_to_chip_select,
- }
- },
- [F10_CPUS] = {
- .ctl_name = "F10h",
- .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
- .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f10_dbam_to_chip_select,
- }
- },
- [F15_CPUS] = {
- .ctl_name = "F15h",
- .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f15_dbam_to_chip_select,
- }
- },
- [F15_M30H_CPUS] = {
- .ctl_name = "F15h_M30h",
- .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f16_dbam_to_chip_select,
- }
- },
- [F15_M60H_CPUS] = {
- .ctl_name = "F15h_M60h",
- .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f15_m60h_dbam_to_chip_select,
- }
- },
- [F16_CPUS] = {
- .ctl_name = "F16h",
- .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f16_dbam_to_chip_select,
- }
- },
- [F16_M30H_CPUS] = {
- .ctl_name = "F16h_M30h",
- .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
- .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
- .ops = {
- .early_channel_count = f1x_early_channel_count,
- .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
- .dbam_to_cs = f16_dbam_to_chip_select,
- }
- },
- [F17_CPUS] = {
- .ctl_name = "F17h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
- .ops = {
- .early_channel_count = f17_early_channel_count,
- .dbam_to_cs = f17_base_addr_to_cs_size,
- }
- },
-};
-
-/*
* These are tables of eigenvectors (one per line) which can be used for the
* construction of the syndrome tables. The modified syndrome search algorithm
* uses those to find the symbol in error and thus the DIMM.
@@ -2313,14 +2659,11 @@ static int map_err_sym_to_channel(int err_sym, int sym_size)
case 0x20:
case 0x21:
return 0;
- break;
case 0x22:
case 0x23:
return 1;
- break;
default:
return err_sym >> 4;
- break;
}
/* x8 symbols */
else
@@ -2330,17 +2673,12 @@ static int map_err_sym_to_channel(int err_sym, int sym_size)
WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
err_sym);
return -1;
- break;
-
case 0x11:
return 0;
- break;
case 0x12:
return 1;
- break;
default:
return err_sym >> 3;
- break;
}
return -1;
}
@@ -2453,27 +2791,30 @@ static inline void decode_bus_error(int node_id, struct mce *m)
* To find the UMC channel represented by this bank we need to match on its
* instance_id. The instance_id of a bank is held in the lower 32 bits of its
* IPID.
+ *
+ * Currently, we can derive the channel number by looking at the 6th nibble in
+ * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
+ * number.
+ *
+ * For DRAM ECC errors, the Chip Select number is given in bits [2:0] of
+ * the MCA_SYND[ErrorInformation] field.
*/
-static int find_umc_channel(struct amd64_pvt *pvt, struct mce *m)
+static void umc_get_err_info(struct mce *m, struct err_info *err)
{
- u32 umc_instance_id[] = {0x50f00, 0x150f00};
- u32 instance_id = m->ipid & GENMASK(31, 0);
- int i, channel = -1;
-
- for (i = 0; i < ARRAY_SIZE(umc_instance_id); i++)
- if (umc_instance_id[i] == instance_id)
- channel = i;
-
- return channel;
+ err->channel = (m->ipid & GENMASK(31, 0)) >> 20;
+ err->csrow = m->synd & 0x7;
}
static void decode_umc_error(int node_id, struct mce *m)
{
u8 ecc_type = (m->status >> 45) & 0x3;
struct mem_ctl_info *mci;
+ unsigned long sys_addr;
struct amd64_pvt *pvt;
+ struct atl_err a_err;
struct err_info err;
- u64 sys_addr;
+
+ node_id = fixup_node_id(node_id, m);
mci = edac_mc_find(node_id);
if (!mci)
@@ -2486,19 +2827,6 @@ static void decode_umc_error(int node_id, struct mce *m)
if (m->status & MCI_STATUS_DEFERRED)
ecc_type = 3;
- err.channel = find_umc_channel(pvt, m);
- if (err.channel < 0) {
- err.err_code = ERR_CHANNEL;
- goto log_error;
- }
-
- if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
- err.err_code = ERR_NORM_ADDR;
- goto log_error;
- }
-
- error_address_to_page_and_offset(sys_addr, &err);
-
if (!(m->status & MCI_STATUS_SYNDV)) {
err.err_code = ERR_SYND;
goto log_error;
@@ -2513,7 +2841,19 @@ static void decode_umc_error(int node_id, struct mce *m)
err.err_code = ERR_CHANNEL;
}
- err.csrow = m->synd & 0x7;
+ pvt->ops->get_err_info(m, &err);
+
+ a_err.addr = m->addr;
+ a_err.ipid = m->ipid;
+ a_err.cpu = m->extcpu;
+
+ sys_addr = amd_convert_umc_mca_addr_to_sys_addr(&a_err);
+ if (IS_ERR_VALUE(sys_addr)) {
+ err.err_code = ERR_NORM_ADDR;
+ goto log_error;
+ }
+
+ error_address_to_page_and_offset(sys_addr, &err);
log_error:
__log_ecc_error(mci, &err, ecc_type);
@@ -2522,38 +2862,14 @@ log_error:
/*
* Use pvt->F3 which contains the F3 CPU PCI device to get the related
* F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
- * Reserve F0 and F6 on systems with a UMC.
*/
static int
reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
{
- if (pvt->umc) {
- pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
- if (!pvt->F0) {
- amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
- return -ENODEV;
- }
-
- pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
- if (!pvt->F6) {
- pci_dev_put(pvt->F0);
- pvt->F0 = NULL;
-
- amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
- return -ENODEV;
- }
-
- edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
- edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
- edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
-
- return 0;
- }
-
/* Reserve the ADDRESS MAP Device */
pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
if (!pvt->F1) {
- amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
+ edac_dbg(1, "F1 not found: device 0x%x\n", pci_id1);
return -ENODEV;
}
@@ -2563,10 +2879,13 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
pci_dev_put(pvt->F1);
pvt->F1 = NULL;
- amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
+ edac_dbg(1, "F2 not found: device 0x%x\n", pci_id2);
return -ENODEV;
}
+ if (!pci_ctl_dev)
+ pci_ctl_dev = &pvt->F2->dev;
+
edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
@@ -2574,36 +2893,10 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
return 0;
}
-static void free_mc_sibling_devs(struct amd64_pvt *pvt)
-{
- if (pvt->umc) {
- pci_dev_put(pvt->F0);
- pci_dev_put(pvt->F6);
- } else {
- pci_dev_put(pvt->F1);
- pci_dev_put(pvt->F2);
- }
-}
-
static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
{
pvt->ecc_sym_sz = 4;
- if (pvt->umc) {
- u8 i;
-
- for (i = 0; i < NUM_UMCS; i++) {
- /* Check enabled channels only: */
- if ((pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) &&
- (pvt->umc[i].ecc_ctrl & BIT(7))) {
- pvt->ecc_sym_sz = 8;
- break;
- }
- }
-
- return;
- }
-
if (pvt->fam >= 0x10) {
u32 tmp;
@@ -2621,23 +2914,32 @@ static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
/*
* Retrieve the hardware registers of the memory controller.
*/
-static void __read_mc_regs_df(struct amd64_pvt *pvt)
+static void umc_read_mc_regs(struct amd64_pvt *pvt)
{
u8 nid = pvt->mc_node_id;
struct amd64_umc *umc;
- u32 i, umc_base;
+ u32 i, tmp, umc_base;
/* Read registers from each UMC */
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
- amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
- amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
- amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
- amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
- amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
+ if (!amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &tmp))
+ umc->dimm_cfg = tmp;
+
+ if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &tmp))
+ umc->umc_cfg = tmp;
+
+ if (!amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &tmp))
+ umc->sdp_ctrl = tmp;
+
+ if (!amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &tmp))
+ umc->ecc_ctrl = tmp;
+
+ if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &tmp))
+ umc->umc_cap_hi = tmp;
}
}
@@ -2645,7 +2947,7 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt)
* Retrieve the hardware registers of the memory controller (this includes the
* 'Address Map' and 'Misc' device regs)
*/
-static void read_mc_regs(struct amd64_pvt *pvt)
+static void dct_read_mc_regs(struct amd64_pvt *pvt)
{
unsigned int range;
u64 msr_val;
@@ -2654,25 +2956,18 @@ static void read_mc_regs(struct amd64_pvt *pvt)
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
* those are Read-As-Zero.
*/
- rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
+ rdmsrq(MSR_K8_TOP_MEM1, pvt->top_mem);
edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
/* Check first whether TOP_MEM2 is enabled: */
- rdmsrl(MSR_K8_SYSCFG, msr_val);
+ rdmsrq(MSR_AMD64_SYSCFG, msr_val);
if (msr_val & BIT(21)) {
- rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
+ rdmsrq(MSR_K8_TOP_MEM2, pvt->top_mem2);
edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
} else {
edac_dbg(0, " TOP_MEM2 disabled\n");
}
- if (pvt->umc) {
- __read_mc_regs_df(pvt);
- amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
-
- goto skip;
- }
-
amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
read_dram_ctl_register(pvt);
@@ -2713,15 +3008,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
}
-skip:
- read_dct_base_mask(pvt);
-
- determine_memory_type(pvt);
- edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
-
determine_ecc_sym_sz(pvt);
-
- dump_misc_regs(pvt);
}
/*
@@ -2758,50 +3045,102 @@ skip:
* encompasses
*
*/
-static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
+static u32 dct_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{
u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
- int csrow_nr = csrow_nr_orig;
u32 cs_mode, nr_pages;
- if (!pvt->umc)
- csrow_nr >>= 1;
-
+ csrow_nr >>= 1;
cs_mode = DBAM_DIMM(csrow_nr, dbam);
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
nr_pages <<= 20 - PAGE_SHIFT;
edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
- csrow_nr_orig, dct, cs_mode);
+ csrow_nr, dct, cs_mode);
edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
return nr_pages;
}
+static u32 umc_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
+{
+ int csrow_nr = csrow_nr_orig;
+ u32 cs_mode, nr_pages;
+
+ cs_mode = umc_get_cs_mode(csrow_nr >> 1, dct, pvt);
+
+ nr_pages = umc_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
+ nr_pages <<= 20 - PAGE_SHIFT;
+
+ edac_dbg(0, "csrow: %d, channel: %d, cs_mode %d\n",
+ csrow_nr_orig, dct, cs_mode);
+ edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
+
+ return nr_pages;
+}
+
+static void umc_init_csrows(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ enum edac_type edac_mode = EDAC_NONE;
+ enum dev_type dev_type = DEV_UNKNOWN;
+ struct dimm_info *dimm;
+ u8 umc, cs;
+
+ if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
+ edac_mode = EDAC_S16ECD16ED;
+ dev_type = DEV_X16;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
+ edac_mode = EDAC_S8ECD8ED;
+ dev_type = DEV_X8;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
+ edac_mode = EDAC_S4ECD4ED;
+ dev_type = DEV_X4;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
+ edac_mode = EDAC_SECDED;
+ }
+
+ for_each_umc(umc) {
+ for_each_chip_select(cs, umc, pvt) {
+ if (!csrow_enabled(cs, umc, pvt))
+ continue;
+
+ dimm = mci->csrows[cs]->channels[umc]->dimm;
+
+ edac_dbg(1, "MC node: %d, csrow: %d\n",
+ pvt->mc_node_id, cs);
+
+ dimm->nr_pages = umc_get_csrow_nr_pages(pvt, umc, cs);
+ dimm->mtype = pvt->umc[umc].dram_type;
+ dimm->edac_mode = edac_mode;
+ dimm->dtype = dev_type;
+ dimm->grain = 64;
+ }
+ }
+}
+
/*
* Initialize the array of csrow attribute instances, based on the values
* from pci config hardware registers.
*/
-static int init_csrows(struct mem_ctl_info *mci)
+static void dct_init_csrows(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
enum edac_type edac_mode = EDAC_NONE;
struct csrow_info *csrow;
struct dimm_info *dimm;
- int i, j, empty = 1;
int nr_pages = 0;
+ int i, j;
u32 val;
- if (!pvt->umc) {
- amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
- pvt->nbcfg = val;
+ pvt->nbcfg = val;
- edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- pvt->mc_node_id, val,
- !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
- }
+ edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+ pvt->mc_node_id, val,
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
/*
* We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
@@ -2817,19 +3156,18 @@ static int init_csrows(struct mem_ctl_info *mci)
continue;
csrow = mci->csrows[i];
- empty = 0;
edac_dbg(1, "MC node: %d, csrow: %d\n",
pvt->mc_node_id, i);
if (row_dct0) {
- nr_pages = get_csrow_nr_pages(pvt, 0, i);
+ nr_pages = dct_get_csrow_nr_pages(pvt, 0, i);
csrow->channels[0]->dimm->nr_pages = nr_pages;
}
/* K8 has only one DCT */
if (pvt->fam != 0xf && row_dct1) {
- int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
+ int row_dct1_pages = dct_get_csrow_nr_pages(pvt, 1, i);
csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
nr_pages += row_dct1_pages;
@@ -2838,26 +3176,19 @@ static int init_csrows(struct mem_ctl_info *mci)
edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
/* Determine DIMM ECC mode: */
- if (pvt->umc) {
- if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED)
- edac_mode = EDAC_S4ECD4ED;
- else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED)
- edac_mode = EDAC_SECDED;
-
- } else if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
+ if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
? EDAC_S4ECD4ED
: EDAC_SECDED;
}
- for (j = 0; j < pvt->channel_count; j++) {
+ for (j = 0; j < pvt->max_mcs; j++) {
dimm = csrow->channels[j]->dimm;
dimm->mtype = pvt->dram_type;
dimm->edac_mode = edac_mode;
+ dimm->grain = 64;
}
}
-
- return empty;
}
/* get all cores on this DCT */
@@ -2866,7 +3197,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
int cpu;
for_each_online_cpu(cpu)
- if (amd_get_nb_id(cpu) == nid)
+ if (topology_amd_node_id(cpu) == nid)
cpumask_set_cpu(cpu, mask);
}
@@ -2891,8 +3222,7 @@ static bool nb_mce_bank_enabled_on_node(u16 nid)
nbe = reg->l & MSR_MCGCTL_NBE;
edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
- cpu, reg->q,
- (nbe ? "enabled" : "disabled"));
+ cpu, reg->q, str_enabled_disabled(nbe));
if (!nbe)
goto out;
@@ -3020,84 +3350,61 @@ static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
amd64_warn("Error restoring NB MCGCTL settings!\n");
}
-/*
- * EDAC requires that the BIOS have ECC enabled before
- * taking over the processing of ECC errors. A command line
- * option allows to force-enable hardware ECC later in
- * enable_ecc_error_reporting().
- */
-static const char *ecc_msg =
- "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
- " Either enable ECC checking or force module loading by setting "
- "'ecc_enable_override'.\n"
- " (Note that use of the override may cause unknown side effects.)\n";
-
-static bool ecc_enabled(struct pci_dev *F3, u16 nid)
+static bool dct_ecc_enabled(struct amd64_pvt *pvt)
{
+ u16 nid = pvt->mc_node_id;
bool nb_mce_en = false;
- u8 ecc_en = 0, i;
+ u8 ecc_en = 0;
u32 value;
- if (boot_cpu_data.x86 >= 0x17) {
- u8 umc_en_mask = 0, ecc_en_mask = 0;
-
- for (i = 0; i < NUM_UMCS; i++) {
- u32 base = get_umc_base(i);
-
- /* Only check enabled UMCs. */
- if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
- continue;
-
- if (!(value & UMC_SDP_INIT))
- continue;
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
- umc_en_mask |= BIT(i);
+ ecc_en = !!(value & NBCFG_ECC_ENABLE);
- if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
- continue;
+ nb_mce_en = nb_mce_bank_enabled_on_node(nid);
+ if (!nb_mce_en)
+ edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
+ MSR_IA32_MCG_CTL, nid);
- if (value & UMC_ECC_ENABLED)
- ecc_en_mask |= BIT(i);
- }
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, str_enabled_disabled(ecc_en));
- /* Check whether at least one UMC is enabled: */
- if (umc_en_mask)
- ecc_en = umc_en_mask == ecc_en_mask;
- else
- edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
+ return ecc_en && nb_mce_en;
+}
- /* Assume UMC MCA banks are enabled. */
- nb_mce_en = true;
- } else {
- amd64_read_pci_cfg(F3, NBCFG, &value);
+static bool umc_ecc_enabled(struct amd64_pvt *pvt)
+{
+ struct amd64_umc *umc;
+ bool ecc_en = false;
+ int i;
- ecc_en = !!(value & NBCFG_ECC_ENABLE);
+ /* Check whether at least one UMC is enabled: */
+ for_each_umc(i) {
+ umc = &pvt->umc[i];
- nb_mce_en = nb_mce_bank_enabled_on_node(nid);
- if (!nb_mce_en)
- edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
- MSR_IA32_MCG_CTL, nid);
+ if (umc->sdp_ctrl & UMC_SDP_INIT &&
+ umc->umc_cap_hi & UMC_ECC_ENABLED) {
+ ecc_en = true;
+ break;
+ }
}
- amd64_info("Node %d: DRAM ECC %s.\n",
- nid, (ecc_en ? "enabled" : "disabled"));
+ edac_dbg(3, "Node %d: DRAM ECC %s.\n", pvt->mc_node_id, str_enabled_disabled(ecc_en));
- if (!ecc_en || !nb_mce_en) {
- amd64_info("%s", ecc_msg);
- return false;
- }
- return true;
+ return ecc_en;
}
static inline void
-f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
+umc_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
{
- u8 i, ecc_en = 1, cpk_en = 1;
+ u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
+
+ dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
+ dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
}
}
@@ -3105,224 +3412,590 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
if (ecc_en) {
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
- if (cpk_en)
+ if (!cpk_en)
+ return;
+
+ if (dev_x4)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+ else if (dev_x16)
+ mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
+ else
+ mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
}
}
-static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
- struct amd64_family_type *fam)
+static void dct_setup_mci_misc_attrs(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
- if (pvt->umc) {
- f17h_determine_edac_ctl_cap(mci, pvt);
- } else {
- if (pvt->nbcap & NBCAP_SECDED)
- mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
+ if (pvt->nbcap & NBCAP_SECDED)
+ mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
- if (pvt->nbcap & NBCAP_CHIPKILL)
- mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
- }
+ if (pvt->nbcap & NBCAP_CHIPKILL)
+ mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
- mci->edac_cap = determine_edac_cap(pvt);
+ mci->edac_cap = dct_determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
- mci->mod_ver = EDAC_AMD64_VERSION;
- mci->ctl_name = fam->ctl_name;
+ mci->ctl_name = pvt->ctl_name;
mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
/* memory scrubber interface */
mci->set_sdram_scrub_rate = set_scrub_rate;
mci->get_sdram_scrub_rate = get_scrub_rate;
+
+ dct_init_csrows(mci);
+}
+
+static void umc_setup_mci_misc_attrs(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+
+ umc_determine_edac_ctl_cap(mci, pvt);
+
+ mci->edac_cap = umc_determine_edac_cap(pvt);
+ mci->mod_name = EDAC_MOD_STR;
+ mci->ctl_name = pvt->ctl_name;
+ mci->dev_name = pci_name(pvt->F3);
+ mci->ctl_page_to_phys = NULL;
+
+ umc_init_csrows(mci);
+}
+
+static int dct_hw_info_get(struct amd64_pvt *pvt)
+{
+ int ret = reserve_mc_sibling_devs(pvt, pvt->f1_id, pvt->f2_id);
+
+ if (ret)
+ return ret;
+
+ dct_prep_chip_selects(pvt);
+ dct_read_base_mask(pvt);
+ dct_read_mc_regs(pvt);
+ dct_determine_memory_type(pvt);
+
+ return 0;
+}
+
+static int umc_hw_info_get(struct amd64_pvt *pvt)
+{
+ pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
+ if (!pvt->umc)
+ return -ENOMEM;
+
+ umc_prep_chip_selects(pvt);
+ umc_read_base_mask(pvt);
+ umc_read_mc_regs(pvt);
+ umc_determine_memory_type(pvt);
+
+ return 0;
}
/*
- * returns a pointer to the family descriptor on success, NULL otherwise.
+ * The CPUs have one channel per UMC, so UMC number is equivalent to a
+ * channel number. The GPUs have 8 channels per UMC, so the UMC number no
+ * longer works as a channel number.
+ *
+ * The channel number within a GPU UMC is given in MCA_IPID[15:12].
+ * However, the IDs are split such that two UMC values go to one UMC, and
+ * the channel numbers are split in two groups of four.
+ *
+ * Refer to comment on gpu_get_umc_base().
+ *
+ * For example,
+ * UMC0 CH[3:0] = 0x0005[3:0]000
+ * UMC0 CH[7:4] = 0x0015[3:0]000
+ * UMC1 CH[3:0] = 0x0025[3:0]000
+ * UMC1 CH[7:4] = 0x0035[3:0]000
*/
-static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
+static void gpu_get_err_info(struct mce *m, struct err_info *err)
{
- struct amd64_family_type *fam_type = NULL;
+ u8 ch = (m->ipid & GENMASK(31, 0)) >> 20;
+ u8 phy = ((m->ipid >> 12) & 0xf);
+
+ err->channel = ch % 2 ? phy + 4 : phy;
+ err->csrow = phy;
+}
+
+static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
+ unsigned int cs_mode, int csrow_nr)
+{
+ u32 addr_mask = pvt->csels[umc].csmasks[csrow_nr];
+ u32 addr_mask_sec = pvt->csels[umc].csmasks_sec[csrow_nr];
+
+ return __addr_mask_to_cs_size(addr_mask, addr_mask_sec, cs_mode, csrow_nr, csrow_nr >> 1);
+}
+
+static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
+{
+ int size, cs_mode, cs = 0;
+
+ edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
+
+ cs_mode = CS_EVEN_PRIMARY | CS_ODD_PRIMARY;
+
+ for_each_chip_select(cs, ctrl, pvt) {
+ size = gpu_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs);
+ amd64_info(EDAC_MC ": %d: %5dMB\n", cs, size);
+ }
+}
+
+static void gpu_dump_misc_regs(struct amd64_pvt *pvt)
+{
+ struct amd64_umc *umc;
+ u32 i;
+
+ for_each_umc(i) {
+ umc = &pvt->umc[i];
+
+ edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
+ edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
+ edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
+ edac_dbg(1, "UMC%d All HBMs support ECC: yes\n", i);
+
+ gpu_debug_display_dimm_sizes(pvt, i);
+ }
+}
+static u32 gpu_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
+{
+ u32 nr_pages;
+ int cs_mode = CS_EVEN_PRIMARY | CS_ODD_PRIMARY;
+
+ nr_pages = gpu_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
+ nr_pages <<= 20 - PAGE_SHIFT;
+
+ edac_dbg(0, "csrow: %d, channel: %d\n", csrow_nr, dct);
+ edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
+
+ return nr_pages;
+}
+
+static void gpu_init_csrows(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ struct dimm_info *dimm;
+ u8 umc, cs;
+
+ for_each_umc(umc) {
+ for_each_chip_select(cs, umc, pvt) {
+ if (!csrow_enabled(cs, umc, pvt))
+ continue;
+
+ dimm = mci->csrows[umc]->channels[cs]->dimm;
+
+ edac_dbg(1, "MC node: %d, csrow: %d\n",
+ pvt->mc_node_id, cs);
+
+ dimm->nr_pages = gpu_get_csrow_nr_pages(pvt, umc, cs);
+ dimm->edac_mode = EDAC_SECDED;
+ dimm->mtype = pvt->dram_type;
+ dimm->dtype = DEV_X16;
+ dimm->grain = 64;
+ }
+ }
+}
+
+static void gpu_setup_mci_misc_attrs(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
+ mci->mtype_cap = MEM_FLAG_HBM2;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+
+ mci->edac_cap = EDAC_FLAG_EC;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->ctl_name = pvt->ctl_name;
+ mci->dev_name = pci_name(pvt->F3);
+ mci->ctl_page_to_phys = NULL;
+
+ gpu_init_csrows(mci);
+}
+
+/* ECC is enabled by default on GPU nodes */
+static bool gpu_ecc_enabled(struct amd64_pvt *pvt)
+{
+ return true;
+}
+
+static inline u32 gpu_get_umc_base(struct amd64_pvt *pvt, u8 umc, u8 channel)
+{
+ /*
+ * On CPUs, there is one channel per UMC, so UMC numbering equals
+ * channel numbering. On GPUs, there are eight channels per UMC,
+ * so the channel numbering is different from UMC numbering.
+ *
+ * On CPU nodes channels are selected in 6th nibble
+ * UMC chY[3:0]= [(chY*2 + 1) : (chY*2)]50000;
+ *
+ * On GPU nodes channels are selected in 3rd nibble
+ * HBM chX[3:0]= [Y ]5X[3:0]000;
+ * HBM chX[7:4]= [Y+1]5X[3:0]000
+ *
+ * On MI300 APU nodes, same as GPU nodes but channels are selected
+ * in the base address of 0x90000
+ */
+ umc *= 2;
+
+ if (channel >= 4)
+ umc++;
+
+ return pvt->gpu_umc_base + (umc << 20) + ((channel % 4) << 12);
+}
+
+static void gpu_read_mc_regs(struct amd64_pvt *pvt)
+{
+ u8 nid = pvt->mc_node_id;
+ struct amd64_umc *umc;
+ u32 i, tmp, umc_base;
+
+ /* Read registers from each UMC */
+ for_each_umc(i) {
+ umc_base = gpu_get_umc_base(pvt, i, 0);
+ umc = &pvt->umc[i];
+
+ if (!amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &tmp))
+ umc->umc_cfg = tmp;
+
+ if (!amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &tmp))
+ umc->sdp_ctrl = tmp;
+
+ if (!amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &tmp))
+ umc->ecc_ctrl = tmp;
+ }
+}
+
+static void gpu_read_base_mask(struct amd64_pvt *pvt)
+{
+ u32 base_reg, mask_reg;
+ u32 *base, *mask;
+ int umc, cs;
+
+ for_each_umc(umc) {
+ for_each_chip_select(cs, umc, pvt) {
+ base_reg = gpu_get_umc_base(pvt, umc, cs) + UMCCH_BASE_ADDR;
+ base = &pvt->csels[umc].csbases[cs];
+
+ if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) {
+ edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *base, base_reg);
+ }
+
+ mask_reg = gpu_get_umc_base(pvt, umc, cs) + UMCCH_ADDR_MASK;
+ mask = &pvt->csels[umc].csmasks[cs];
+
+ if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) {
+ edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
+ umc, cs, *mask, mask_reg);
+ }
+ }
+ }
+}
+
+static void gpu_prep_chip_selects(struct amd64_pvt *pvt)
+{
+ int umc;
+
+ for_each_umc(umc) {
+ pvt->csels[umc].b_cnt = 8;
+ pvt->csels[umc].m_cnt = 8;
+ }
+}
+
+static int gpu_hw_info_get(struct amd64_pvt *pvt)
+{
+ int ret;
+
+ ret = gpu_get_node_map(pvt);
+ if (ret)
+ return ret;
+
+ pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
+ if (!pvt->umc)
+ return -ENOMEM;
+
+ gpu_prep_chip_selects(pvt);
+ gpu_read_base_mask(pvt);
+ gpu_read_mc_regs(pvt);
+
+ return 0;
+}
+
+static void hw_info_put(struct amd64_pvt *pvt)
+{
+ pci_dev_put(pvt->F1);
+ pci_dev_put(pvt->F2);
+ kfree(pvt->umc);
+ kfree(pvt->csels);
+}
+
+static struct low_ops umc_ops = {
+ .hw_info_get = umc_hw_info_get,
+ .ecc_enabled = umc_ecc_enabled,
+ .setup_mci_misc_attrs = umc_setup_mci_misc_attrs,
+ .dump_misc_regs = umc_dump_misc_regs,
+ .get_err_info = umc_get_err_info,
+};
+
+static struct low_ops gpu_ops = {
+ .hw_info_get = gpu_hw_info_get,
+ .ecc_enabled = gpu_ecc_enabled,
+ .setup_mci_misc_attrs = gpu_setup_mci_misc_attrs,
+ .dump_misc_regs = gpu_dump_misc_regs,
+ .get_err_info = gpu_get_err_info,
+};
+
+/* Use Family 16h versions for defaults and adjust as needed below. */
+static struct low_ops dct_ops = {
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
+ .dbam_to_cs = f16_dbam_to_chip_select,
+ .hw_info_get = dct_hw_info_get,
+ .ecc_enabled = dct_ecc_enabled,
+ .setup_mci_misc_attrs = dct_setup_mci_misc_attrs,
+ .dump_misc_regs = dct_dump_misc_regs,
+};
+
+static int per_family_init(struct amd64_pvt *pvt)
+{
pvt->ext_model = boot_cpu_data.x86_model >> 4;
- pvt->stepping = boot_cpu_data.x86_mask;
+ pvt->stepping = boot_cpu_data.x86_stepping;
pvt->model = boot_cpu_data.x86_model;
pvt->fam = boot_cpu_data.x86;
+ char *tmp_name = NULL;
+ pvt->max_mcs = 2;
+
+ /*
+ * Decide on which ops group to use here and do any family/model
+ * overrides below.
+ */
+ if (pvt->fam >= 0x17)
+ pvt->ops = &umc_ops;
+ else
+ pvt->ops = &dct_ops;
switch (pvt->fam) {
case 0xf:
- fam_type = &family_types[K8_CPUS];
- pvt->ops = &family_types[K8_CPUS].ops;
+ tmp_name = (pvt->ext_model >= K8_REV_F) ?
+ "K8 revF or later" : "K8 revE or earlier";
+ pvt->f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL;
+ pvt->ops->map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow;
+ pvt->ops->dbam_to_cs = k8_dbam_to_chip_select;
break;
case 0x10:
- fam_type = &family_types[F10_CPUS];
- pvt->ops = &family_types[F10_CPUS].ops;
+ pvt->f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM;
+ pvt->ops->dbam_to_cs = f10_dbam_to_chip_select;
break;
case 0x15:
- if (pvt->model == 0x30) {
- fam_type = &family_types[F15_M30H_CPUS];
- pvt->ops = &family_types[F15_M30H_CPUS].ops;
+ switch (pvt->model) {
+ case 0x30:
+ pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2;
+ break;
+ case 0x60:
+ pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2;
+ pvt->ops->dbam_to_cs = f15_m60h_dbam_to_chip_select;
break;
- } else if (pvt->model == 0x60) {
- fam_type = &family_types[F15_M60H_CPUS];
- pvt->ops = &family_types[F15_M60H_CPUS].ops;
+ case 0x13:
+ /* Richland is only client */
+ return -ENODEV;
+ default:
+ pvt->f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2;
+ pvt->ops->dbam_to_cs = f15_dbam_to_chip_select;
break;
}
-
- fam_type = &family_types[F15_CPUS];
- pvt->ops = &family_types[F15_CPUS].ops;
break;
case 0x16:
- if (pvt->model == 0x30) {
- fam_type = &family_types[F16_M30H_CPUS];
- pvt->ops = &family_types[F16_M30H_CPUS].ops;
+ switch (pvt->model) {
+ case 0x30:
+ pvt->f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2;
+ break;
+ default:
+ pvt->f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1;
+ pvt->f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2;
break;
}
- fam_type = &family_types[F16_CPUS];
- pvt->ops = &family_types[F16_CPUS].ops;
break;
case 0x17:
- fam_type = &family_types[F17_CPUS];
- pvt->ops = &family_types[F17_CPUS].ops;
+ switch (pvt->model) {
+ case 0x30 ... 0x3f:
+ pvt->max_mcs = 8;
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case 0x18:
+ break;
+
+ case 0x19:
+ switch (pvt->model) {
+ case 0x00 ... 0x0f:
+ pvt->max_mcs = 8;
+ break;
+ case 0x10 ... 0x1f:
+ pvt->max_mcs = 12;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x30 ... 0x3f:
+ if (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) {
+ tmp_name = "MI200";
+ pvt->max_mcs = 4;
+ pvt->dram_type = MEM_HBM2;
+ pvt->gpu_umc_base = 0x50000;
+ pvt->ops = &gpu_ops;
+ } else {
+ pvt->max_mcs = 8;
+ }
+ break;
+ case 0x60 ... 0x6f:
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x70 ... 0x7f:
+ pvt->max_mcs = 4;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x90 ... 0x9f:
+ pvt->max_mcs = 4;
+ pvt->dram_type = MEM_HBM3;
+ pvt->gpu_umc_base = 0x90000;
+ pvt->ops = &gpu_ops;
+ break;
+ case 0xa0 ... 0xaf:
+ pvt->max_mcs = 12;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ }
+ break;
+
+ case 0x1A:
+ switch (pvt->model) {
+ case 0x00 ... 0x1f:
+ pvt->max_mcs = 12;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x40 ... 0x4f:
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x50 ... 0x57:
+ case 0xc0 ... 0xc7:
+ pvt->max_mcs = 16;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x90 ... 0x9f:
+ case 0xa0 ... 0xaf:
+ pvt->max_mcs = 8;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ }
break;
default:
amd64_err("Unsupported family!\n");
- return NULL;
+ return -ENODEV;
}
- amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
- (pvt->fam == 0xf ?
- (pvt->ext_model >= K8_REV_F ? "revF or later "
- : "revE or earlier ")
- : ""), pvt->mc_node_id);
- return fam_type;
+ if (tmp_name)
+ scnprintf(pvt->ctl_name, sizeof(pvt->ctl_name), tmp_name);
+ else
+ scnprintf(pvt->ctl_name, sizeof(pvt->ctl_name), "F%02Xh_M%02Xh",
+ pvt->fam, pvt->model);
+
+ pvt->csels = kcalloc(pvt->max_mcs, sizeof(*pvt->csels), GFP_KERNEL);
+ if (!pvt->csels)
+ return -ENOMEM;
+
+ return 0;
}
static const struct attribute_group *amd64_edac_attr_groups[] = {
#ifdef CONFIG_EDAC_DEBUG
- &amd64_edac_dbg_group,
-#endif
-#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
- &amd64_edac_inj_group,
+ &dbg_group,
+ &inj_group,
#endif
NULL
};
-static int init_one_instance(unsigned int nid)
+/*
+ * For heterogeneous and APU models EDAC CHIP_SELECT and CHANNEL layers
+ * should be swapped to fit into the layers.
+ */
+static unsigned int get_layer_size(struct amd64_pvt *pvt, u8 layer)
+{
+ bool is_gpu = (pvt->ops == &gpu_ops);
+
+ if (!layer)
+ return is_gpu ? pvt->max_mcs
+ : pvt->csels[0].b_cnt;
+ else
+ return is_gpu ? pvt->csels[0].b_cnt
+ : pvt->max_mcs;
+}
+
+static int init_one_instance(struct amd64_pvt *pvt)
{
- struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
- struct amd64_family_type *fam_type = NULL;
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
- struct amd64_pvt *pvt = NULL;
- u16 pci_id1, pci_id2;
- int err = 0, ret;
-
- ret = -ENOMEM;
- pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
- if (!pvt)
- goto err_ret;
-
- pvt->mc_node_id = nid;
- pvt->F3 = F3;
+ int ret = -ENOMEM;
- ret = -EINVAL;
- fam_type = per_family_init(pvt);
- if (!fam_type)
- goto err_free;
-
- if (pvt->fam >= 0x17) {
- pvt->umc = kcalloc(NUM_UMCS, sizeof(struct amd64_umc), GFP_KERNEL);
- if (!pvt->umc) {
- ret = -ENOMEM;
- goto err_free;
- }
-
- pci_id1 = fam_type->f0_id;
- pci_id2 = fam_type->f6_id;
- } else {
- pci_id1 = fam_type->f1_id;
- pci_id2 = fam_type->f2_id;
- }
-
- err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
- if (err)
- goto err_post_init;
-
- read_mc_regs(pvt);
-
- /*
- * We need to determine how many memory channels there are. Then use
- * that information for calculating the size of the dynamic instance
- * tables in the 'mci' structure.
- */
- ret = -EINVAL;
- pvt->channel_count = pvt->ops->early_channel_count(pvt);
- if (pvt->channel_count < 0)
- goto err_siblings;
-
- ret = -ENOMEM;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = pvt->csels[0].b_cnt;
+ layers[0].size = get_layer_size(pvt, 0);
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
-
- /*
- * Always allocate two channels since we can have setups with DIMMs on
- * only one channel. Also, this simplifies handling later for the price
- * of a couple of KBs tops.
- */
- layers[1].size = 2;
+ layers[1].size = get_layer_size(pvt, 1);
layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
+ mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
if (!mci)
- goto err_siblings;
+ return ret;
mci->pvt_info = pvt;
mci->pdev = &pvt->F3->dev;
- setup_mci_misc_attrs(mci, fam_type);
-
- if (init_csrows(mci))
- mci->edac_cap = EDAC_FLAG_NONE;
+ pvt->ops->setup_mci_misc_attrs(mci);
ret = -ENODEV;
if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
edac_dbg(1, "failed edac_mc_add_mc()\n");
- goto err_add_mc;
+ edac_mc_free(mci);
+ return ret;
}
return 0;
+}
-err_add_mc:
- edac_mc_free(mci);
-
-err_siblings:
- free_mc_sibling_devs(pvt);
-
-err_post_init:
- if (pvt->fam >= 0x17)
- kfree(pvt->umc);
+static bool instance_has_memory(struct amd64_pvt *pvt)
+{
+ bool cs_enabled = false;
+ int cs = 0, dct = 0;
-err_free:
- kfree(pvt);
+ for (dct = 0; dct < pvt->max_mcs; dct++) {
+ for_each_chip_select(cs, dct, pvt)
+ cs_enabled |= csrow_enabled(cs, dct, pvt);
+ }
-err_ret:
- return ret;
+ return cs_enabled;
}
static int probe_one_instance(unsigned int nid)
{
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
+ struct amd64_pvt *pvt = NULL;
struct ecc_settings *s;
int ret;
@@ -3333,8 +4006,29 @@ static int probe_one_instance(unsigned int nid)
ecc_stngs[nid] = s;
- if (!ecc_enabled(F3, nid)) {
- ret = 0;
+ pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
+ if (!pvt)
+ goto err_settings;
+
+ pvt->mc_node_id = nid;
+ pvt->F3 = F3;
+
+ ret = per_family_init(pvt);
+ if (ret < 0)
+ goto err_enable;
+
+ ret = pvt->ops->hw_info_get(pvt);
+ if (ret < 0)
+ goto err_enable;
+
+ ret = 0;
+ if (!instance_has_memory(pvt)) {
+ amd64_info("Node %d: No DIMMs detected.\n", nid);
+ goto err_enable;
+ }
+
+ if (!pvt->ops->ecc_enabled(pvt)) {
+ ret = -ENODEV;
if (!ecc_enable_override)
goto err_enable;
@@ -3349,7 +4043,7 @@ static int probe_one_instance(unsigned int nid)
goto err_enable;
}
- ret = init_one_instance(nid);
+ ret = init_one_instance(pvt);
if (ret < 0) {
amd64_err("Error probing instance: %d\n", nid);
@@ -3359,9 +4053,18 @@ static int probe_one_instance(unsigned int nid)
goto err_enable;
}
+ amd64_info("%s detected (node %d).\n", pvt->ctl_name, pvt->mc_node_id);
+
+ /* Display and decode various registers for debug purposes. */
+ pvt->ops->dump_misc_regs(pvt);
+
return ret;
err_enable:
+ hw_info_put(pvt);
+ kfree(pvt);
+
+err_settings:
kfree(s);
ecc_stngs[nid] = NULL;
@@ -3376,9 +4079,6 @@ static void remove_one_instance(unsigned int nid)
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- mci = find_mci_by_dev(&F3->dev);
- WARN_ON(!mci);
-
/* Remove from EDAC CORE tracking list */
mci = edac_mc_del_mc(&F3->dev);
if (!mci)
@@ -3388,35 +4088,23 @@ static void remove_one_instance(unsigned int nid)
restore_ecc_error_reporting(s, nid, F3);
- free_mc_sibling_devs(pvt);
-
kfree(ecc_stngs[nid]);
ecc_stngs[nid] = NULL;
/* Free the EDAC CORE resources */
mci->pvt_info = NULL;
+ hw_info_put(pvt);
kfree(pvt);
edac_mc_free(mci);
}
static void setup_pci_device(void)
{
- struct mem_ctl_info *mci;
- struct amd64_pvt *pvt;
-
if (pci_ctl)
return;
- mci = edac_mc_find(0);
- if (!mci)
- return;
-
- pvt = mci->pvt_info;
- if (pvt->umc)
- pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
- else
- pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
+ pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
if (!pci_ctl) {
pr_warn("%s(): Unable to create PCI control\n", __func__);
pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
@@ -3424,30 +4112,41 @@ static void setup_pci_device(void)
}
static const struct x86_cpu_id amd64_cpuids[] = {
- { X86_VENDOR_AMD, 0xF, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
- { X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
- { X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
- { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
- { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ X86_MATCH_VENDOR_FAM(AMD, 0x0F, NULL),
+ X86_MATCH_VENDOR_FAM(AMD, 0x10, NULL),
+ X86_MATCH_VENDOR_FAM(AMD, 0x15, NULL),
+ X86_MATCH_VENDOR_FAM(AMD, 0x16, NULL),
+ X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
+ X86_MATCH_VENDOR_FAM(HYGON, 0x18, NULL),
+ X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL),
+ X86_MATCH_VENDOR_FAM(AMD, 0x1A, NULL),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
static int __init amd64_edac_init(void)
{
+ const char *owner;
int err = -ENODEV;
int i;
+ if (ghes_get_devices())
+ return -EBUSY;
+
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
if (!x86_match_cpu(amd64_cpuids))
return -ENODEV;
- if (amd_cache_northbridges() < 0)
+ if (!amd_nb_num())
return -ENODEV;
opstate_init();
err = -ENOMEM;
- ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
+ ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
if (!ecc_stngs)
goto err_free;
@@ -3472,25 +4171,22 @@ static int __init amd64_edac_init(void)
}
/* register stuff with EDAC MCE */
- if (report_gart_errors)
- amd_report_gart_errors(true);
-
- if (boot_cpu_data.x86 >= 0x17)
+ if (boot_cpu_data.x86 >= 0x17) {
amd_register_ecc_decoder(decode_umc_error);
- else
+ } else {
amd_register_ecc_decoder(decode_bus_error);
-
- setup_pci_device();
+ setup_pci_device();
+ }
#ifdef CONFIG_X86_32
amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
#endif
- printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
-
return 0;
err_pci:
+ pci_ctl_dev = NULL;
+
msrs_free(msrs);
msrs = NULL;
@@ -3509,8 +4205,6 @@ static void __exit amd64_edac_exit(void)
edac_pci_release_generic_ctl(pci_ctl);
/* unregister from EDAC MCE */
- amd_report_gart_errors(false);
-
if (boot_cpu_data.x86 >= 0x17)
amd_unregister_ecc_decoder(decode_umc_error);
else
@@ -3522,6 +4216,8 @@ static void __exit amd64_edac_exit(void)
kfree(ecc_stngs);
ecc_stngs = NULL;
+ pci_ctl_dev = NULL;
+
msrs_free(msrs);
msrs = NULL;
}
@@ -3530,10 +4226,8 @@ module_init(amd64_edac_init);
module_exit(amd64_edac_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
- "Dave Peterson, Thayne Harbaugh");
-MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
- EDAC_AMD64_VERSION);
+MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, Dave Peterson, Thayne Harbaugh; AMD");
+MODULE_DESCRIPTION("MC support for AMD64 memory controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");