diff options
author | Dan Williams <dan.j.williams@intel.com> | 2023-10-31 10:59:00 -0700 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2023-10-31 10:59:00 -0700 |
commit | 7f946e6d830fbdf411cd0641314edf11831efc88 (patch) | |
tree | 4e3cfe2157e52e0d5aba6c548cd643f297a393c7 /drivers/cxl | |
parent | 8f61d48c83f6e3525a770e44692604595693f787 (diff) | |
parent | e8db0701605bccbeb8d7907ecd2e50f346a725bd (diff) |
Merge branch 'for-6.7/cxl-rch-eh' into cxl/next
Restricted CXL Host (RCH) Error Handling undoes the topology munging of
CXL 1.1 to enabled some AER recovery, and lands some base infrastructure
for handling Root-Complex-Event-Collectors (RCECs) with CXL. Include
this long running series finally for v6.7.
Diffstat (limited to 'drivers/cxl')
-rw-r--r-- | drivers/cxl/core/core.h | 1 | ||||
-rw-r--r-- | drivers/cxl/core/hdm.c | 48 | ||||
-rw-r--r-- | drivers/cxl/core/mbox.c | 2 | ||||
-rw-r--r-- | drivers/cxl/core/pci.c | 223 | ||||
-rw-r--r-- | drivers/cxl/core/port.c | 129 | ||||
-rw-r--r-- | drivers/cxl/core/regs.c | 73 | ||||
-rw-r--r-- | drivers/cxl/cxl.h | 34 | ||||
-rw-r--r-- | drivers/cxl/cxlmem.h | 4 | ||||
-rw-r--r-- | drivers/cxl/mem.c | 7 | ||||
-rw-r--r-- | drivers/cxl/pci.c | 14 |
10 files changed, 406 insertions, 129 deletions
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 8e5f3d84311e..86d7ba23235e 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -73,6 +73,7 @@ struct cxl_rcrb_info; resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri, enum cxl_rcrb which); +u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb); extern struct rw_semaphore cxl_dpa_rwsem; extern struct rw_semaphore cxl_region_rwsem; diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 506c9e14cdf9..d3d338509d96 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -81,26 +81,6 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm) cxlhdm->interleave_mask |= GENMASK(14, 12); } -static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb, - struct cxl_component_regs *regs) -{ - struct cxl_register_map map = { - .dev = &port->dev, - .resource = port->component_reg_phys, - .base = crb, - .max_size = CXL_COMPONENT_REG_BLOCK_SIZE, - }; - - cxl_probe_component_regs(&port->dev, crb, &map.component_map); - if (!map.component_map.hdm_decoder.valid) { - dev_dbg(&port->dev, "HDM decoder registers not implemented\n"); - /* unique error code to indicate no HDM decoder capability */ - return -ENODEV; - } - - return cxl_map_component_regs(&map, regs, BIT(CXL_CM_CAP_CAP_ID_HDM)); -} - static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info) { struct cxl_hdm *cxlhdm; @@ -153,9 +133,9 @@ static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info) struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, struct cxl_endpoint_dvsec_info *info) { + struct cxl_register_map *reg_map = &port->reg_map; struct device *dev = &port->dev; struct cxl_hdm *cxlhdm; - void __iomem *crb; int rc; cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL); @@ -164,19 +144,29 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, cxlhdm->port = port; dev_set_drvdata(dev, cxlhdm); - crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE); - if (!crb && info && info->mem_enabled) { + /* Memory devices can configure device HDM using DVSEC range regs. */ + if (reg_map->resource == CXL_RESOURCE_NONE) { + if (!info && !info->mem_enabled) { + dev_err(dev, "No component registers mapped\n"); + return ERR_PTR(-ENXIO); + } + cxlhdm->decoder_count = info->ranges; return cxlhdm; - } else if (!crb) { - dev_err(dev, "No component registers mapped\n"); - return ERR_PTR(-ENXIO); } - rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs); - iounmap(crb); - if (rc) + if (!reg_map->component_map.hdm_decoder.valid) { + dev_dbg(&port->dev, "HDM decoder registers not implemented\n"); + /* unique error code to indicate no HDM decoder capability */ + return ERR_PTR(-ENODEV); + } + + rc = cxl_map_component_regs(reg_map, &cxlhdm->regs, + BIT(CXL_CM_CAP_CAP_ID_HDM)); + if (rc) { + dev_err(dev, "Failed to map HDM capability.\n"); return ERR_PTR(rc); + } parse_hdm_decoder_caps(cxlhdm); if (cxlhdm->decoder_count == 0) { diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index b91bb9886991..72ee522e747d 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -1402,6 +1402,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) mutex_init(&mds->mbox_mutex); mutex_init(&mds->event.log_lock); mds->cxlds.dev = dev; + mds->cxlds.reg_map.host = dev; + mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE; mds->cxlds.type = CXL_DEVTYPE_CLASSMEM; return mds; diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index c7a7887ebdcf..3da195caa4ad 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -5,6 +5,7 @@ #include <linux/delay.h> #include <linux/pci.h> #include <linux/pci-doe.h> +#include <linux/aer.h> #include <cxlpci.h> #include <cxlmem.h> #include <cxl.h> @@ -646,32 +647,36 @@ void read_cdat_data(struct cxl_port *port) } EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL); -void cxl_cor_error_detected(struct pci_dev *pdev) +static void __cxl_handle_cor_ras(struct cxl_dev_state *cxlds, + void __iomem *ras_base) { - struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); void __iomem *addr; u32 status; - if (!cxlds->regs.ras) + if (!ras_base) return; - addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET; + addr = ras_base + CXL_RAS_CORRECTABLE_STATUS_OFFSET; status = readl(addr); if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) { writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr); trace_cxl_aer_correctable_error(cxlds->cxlmd, status); } } -EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL); + +static void cxl_handle_endpoint_cor_ras(struct cxl_dev_state *cxlds) +{ + return __cxl_handle_cor_ras(cxlds, cxlds->regs.ras); +} /* CXL spec rev3.0 8.2.4.16.1 */ -static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log) +static void header_log_copy(void __iomem *ras_base, u32 *log) { void __iomem *addr; u32 *log_addr; int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32); - addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET; + addr = ras_base + CXL_RAS_HEADER_LOG_OFFSET; log_addr = log; for (i = 0; i < log_u32_size; i++) { @@ -685,17 +690,18 @@ static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log) * Log the state of the RAS status registers and prepare them to log the * next error status. Return 1 if reset needed. */ -static bool cxl_report_and_clear(struct cxl_dev_state *cxlds) +static bool __cxl_handle_ras(struct cxl_dev_state *cxlds, + void __iomem *ras_base) { u32 hl[CXL_HEADERLOG_SIZE_U32]; void __iomem *addr; u32 status; u32 fe; - if (!cxlds->regs.ras) + if (!ras_base) return false; - addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET; + addr = ras_base + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET; status = readl(addr); if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK)) return false; @@ -703,7 +709,7 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds) /* If multiple errors, log header points to first error from ctrl reg */ if (hweight32(status) > 1) { void __iomem *rcc_addr = - cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET; + ras_base + CXL_RAS_CAP_CONTROL_OFFSET; fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK, readl(rcc_addr))); @@ -711,13 +717,201 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds) fe = status; } - header_log_copy(cxlds, hl); + header_log_copy(ras_base, hl); trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe, hl); writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr); return true; } +static bool cxl_handle_endpoint_ras(struct cxl_dev_state *cxlds) +{ + return __cxl_handle_ras(cxlds, cxlds->regs.ras); +} + +#ifdef CONFIG_PCIEAER_CXL + +static void cxl_dport_map_rch_aer(struct cxl_dport *dport) +{ + struct cxl_rcrb_info *ri = &dport->rcrb; + void __iomem *dport_aer = NULL; + resource_size_t aer_phys; + struct device *host; + + if (dport->rch && ri->aer_cap) { + host = dport->reg_map.host; + aer_phys = ri->aer_cap + ri->base; + dport_aer = devm_cxl_iomap_block(host, aer_phys, + sizeof(struct aer_capability_regs)); + } + + dport->regs.dport_aer = dport_aer; +} + +static void cxl_dport_map_regs(struct cxl_dport *dport) +{ + struct cxl_register_map *map = &dport->reg_map; + struct device *dev = dport->dport_dev; + + if (!map->component_map.ras.valid) + dev_dbg(dev, "RAS registers not found\n"); + else if (cxl_map_component_regs(map, &dport->regs.component, + BIT(CXL_CM_CAP_CAP_ID_RAS))) + dev_dbg(dev, "Failed to map RAS capability.\n"); + + if (dport->rch) + cxl_dport_map_rch_aer(dport); +} + +static void cxl_disable_rch_root_ints(struct cxl_dport *dport) +{ + void __iomem *aer_base = dport->regs.dport_aer; + struct pci_host_bridge *bridge; + u32 aer_cmd_mask, aer_cmd; + + if (!aer_base) + return; + + bridge = to_pci_host_bridge(dport->dport_dev); + + /* + * Disable RCH root port command interrupts. + * CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors + * + * This sequence may not be necessary. CXL spec states disabling + * the root cmd register's interrupts is required. But, PCI spec + * shows these are disabled by default on reset. + */ + if (bridge->native_cxl_error) { + aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN | + PCI_ERR_ROOT_CMD_NONFATAL_EN | + PCI_ERR_ROOT_CMD_FATAL_EN); + aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND); + aer_cmd &= ~aer_cmd_mask; + writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND); + } +} + +void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport) +{ + struct device *dport_dev = dport->dport_dev; + struct pci_host_bridge *host_bridge; + + host_bridge = to_pci_host_bridge(dport_dev); + if (host_bridge->native_cxl_error) + dport->rcrb.aer_cap = cxl_rcrb_to_aer(dport_dev, dport->rcrb.base); + + dport->reg_map.host = host; + cxl_dport_map_regs(dport); + + if (dport->rch) + cxl_disable_rch_root_ints(dport); +} +EXPORT_SYMBOL_NS_GPL(cxl_setup_parent_dport, CXL); + +static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds, + struct cxl_dport *dport) +{ + return __cxl_handle_cor_ras(cxlds, dport->regs.ras); +} + +static bool cxl_handle_rdport_ras(struct cxl_dev_state *cxlds, + struct cxl_dport *dport) +{ + return __cxl_handle_ras(cxlds, dport->regs.ras); +} + +/* + * Copy the AER capability registers using 32 bit read accesses. + * This is necessary because RCRB AER capability is MMIO mapped. Clear the + * status after copying. + * + * @aer_base: base address of AER capability block in RCRB + * @aer_regs: destination for copying AER capability + */ +static bool cxl_rch_get_aer_info(void __iomem *aer_base, + struct aer_capability_regs *aer_regs) +{ + int read_cnt = sizeof(struct aer_capability_regs) / sizeof(u32); + u32 *aer_regs_buf = (u32 *)aer_regs; + int n; + + if (!aer_base) + return false; + + /* Use readl() to guarantee 32-bit accesses */ + for (n = 0; n < read_cnt; n++) + aer_regs_buf[n] = readl(aer_base + n * sizeof(u32)); + + writel(aer_regs->uncor_status, aer_base + PCI_ERR_UNCOR_STATUS); + writel(aer_regs->cor_status, aer_base + PCI_ERR_COR_STATUS); + + return true; +} + +/* Get AER severity. Return false if there is no error. */ +static bool cxl_rch_get_aer_severity(struct aer_capability_regs *aer_regs, + int *severity) +{ + if (aer_regs->uncor_status & ~aer_regs->uncor_mask) { + if (aer_regs->uncor_status & PCI_ERR_ROOT_FATAL_RCV) + *severity = AER_FATAL; + else + *severity = AER_NONFATAL; + return true; + } + + if (aer_regs->cor_status & ~aer_regs->cor_mask) { + *severity = AER_CORRECTABLE; + return true; + } + + return false; +} + +static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) +{ + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + struct aer_capability_regs aer_regs; + struct cxl_dport *dport; + struct cxl_port *port; + int severity; + + port = cxl_pci_find_port(pdev, &dport); + if (!port) + return; + + put_device(&port->dev); + + if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs)) + return; + + if (!cxl_rch_get_aer_severity(&aer_regs, &severity)) + return; + + pci_print_aer(pdev, severity, &aer_regs); + + if (severity == AER_CORRECTABLE) + cxl_handle_rdport_cor_ras(cxlds, dport); + else + cxl_handle_rdport_ras(cxlds, dport); +} + +#else +static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { } +#endif + +void cxl_cor_error_detected(struct pci_dev *pdev) +{ + struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); + + if (cxlds->rcd) + cxl_handle_rdport_errors(cxlds); + + cxl_handle_endpoint_cor_ras(cxlds); +} +EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL); + pci_ers_result_t cxl_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { @@ -726,13 +920,16 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev, struct device *dev = &cxlmd->dev; bool ue; + if (cxlds->rcd) + cxl_handle_rdport_errors(cxlds); + /* * A frozen channel indicates an impending reset which is fatal to * CXL.mem operation, and will likely crash the system. On the off * chance the situation is recoverable dump the status of the RAS * capability registers and bounce the active state of the memdev. */ - ue = cxl_report_and_clear(cxlds); + ue = cxl_handle_endpoint_ras(cxlds); switch (state) { case pci_channel_io_normal: diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index 5ba606c6e03f..e2f888224362 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -625,7 +625,6 @@ static int devm_cxl_link_parent_dport(struct device *host, static struct lock_class_key cxl_port_key; static struct cxl_port *cxl_port_alloc(struct device *uport_dev, - resource_size_t component_reg_phys, struct cxl_dport *parent_dport) { struct cxl_port *port; @@ -676,7 +675,6 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev, } else dev->parent = uport_dev; - port->component_reg_phys = component_reg_phys; ida_init(&port->decoder_ida); port->hdm_end = -1; port->commit_end = -1; @@ -697,19 +695,21 @@ err: return ERR_PTR(rc); } -static int cxl_setup_comp_regs(struct device *dev, struct cxl_register_map *map, +static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map, resource_size_t component_reg_phys) { - if (component_reg_phys == CXL_RESOURCE_NONE) - return 0; - *map = (struct cxl_register_map) { - .dev = dev, - .reg_type = CXL_REGLOC_RBI_COMPONENT, + .host = host, + .reg_type = CXL_REGLOC_RBI_EMPTY, .resource = component_reg_phys, - .max_size = CXL_COMPONENT_REG_BLOCK_SIZE, }; + if (component_reg_phys == CXL_RESOURCE_NONE) + return 0; + + map->reg_type = CXL_REGLOC_RBI_COMPONENT; + map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE; + return cxl_setup_regs(map); } @@ -718,17 +718,27 @@ static int cxl_port_setup_regs(struct cxl_port *port, { if (dev_is_platform(port->uport_dev)) return 0; - return cxl_setup_comp_regs(&port->dev, &port->comp_map, + return cxl_setup_comp_regs(&port->dev, &port->reg_map, component_reg_phys); } -static int cxl_dport_setup_regs(struct cxl_dport *dport, +static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport, resource_size_t component_reg_phys) { + int rc; + if (dev_is_platform(dport->dport_dev)) return 0; - return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map, - component_reg_phys); + + /* + * use @dport->dport_dev for the context for error messages during + * register probing, and fixup @host after the fact, since @host may be + * NULL. + */ + rc = cxl_setup_comp_regs(dport->dport_dev, &dport->reg_map, + component_reg_phys); + dport->reg_map.host = host; + return rc; } static struct cxl_port *__devm_cxl_add_port(struct device *host, @@ -740,21 +750,36 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host, struct device *dev; int rc; - port = cxl_port_alloc(uport_dev, component_reg_phys, parent_dport); + port = cxl_port_alloc(uport_dev, parent_dport); if (IS_ERR(port)) return port; dev = &port->dev; - if (is_cxl_memdev(uport_dev)) + if (is_cxl_memdev(uport_dev)) { + struct cxl_memdev *cxlmd = to_cxl_memdev(uport_dev); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + rc = dev_set_name(dev, "endpoint%d", port->id); - else if (parent_dport) + if (rc) + goto err; + + /* + * The endpoint driver already enumerated the component and RAS + * registers. Reuse that enumeration while prepping them to be + * mapped by the cxl_port driver. + */ + port->reg_map = cxlds->reg_map; + port->reg_map.host = &port->dev; + } else if (parent_dport) { rc = dev_set_name(dev, "port%d", port->id); - else - rc = dev_set_name(dev, "root%d", port->id); - if (rc) - goto err; + if (rc) + goto err; - rc = cxl_port_setup_regs(port, component_reg_phys); + rc = cxl_port_setup_regs(port, component_reg_phys); + if (rc) + goto err; + } else + rc = dev_set_name(dev, "root%d", port->id); if (rc) goto err; @@ -989,7 +1014,16 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, if (!dport) return ERR_PTR(-ENOMEM); - if (rcrb != CXL_RESOURCE_NONE) { + dport->dport_dev = dport_dev; + dport->port_id = port_id; + dport->port = port; + + if (rcrb == CXL_RESOURCE_NONE) { + rc = cxl_dport_setup_regs(&port->dev, dport, + component_reg_phys); + if (rc) + return ERR_PTR(rc); + } else { dport->rcrb.base = rcrb; component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb, CXL_RCRB_DOWNSTREAM); @@ -998,6 +1032,14 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, return ERR_PTR(-ENXIO); } + /* + * RCH @dport is not ready to map until associated with its + * memdev + */ + rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys); + if (rc) + return ERR_PTR(rc); + dport->rch = true; } @@ -1005,14 +1047,6 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, dev_dbg(dport_dev, "Component Registers found for dport: %pa\n", &component_reg_phys); - dport->dport_dev = dport_dev; - dport->port_id = port_id; - dport->port = port; - - rc = cxl_dport_setup_regs(dport, component_reg_phys); - if (rc) - return ERR_PTR(rc); - cond_cxl_root_lock(port); rc = add_dport(port, dport); cond_cxl_root_unlock(port); @@ -1223,35 +1257,39 @@ static struct device *grandparent(struct device *dev) return NULL; } +static struct device *endpoint_host(struct cxl_port *endpoint) +{ + struct cxl_port *port = to_cxl_port(endpoint->dev.parent); + + if (is_cxl_root(port)) + return port->uport_dev; + return &port->dev; +} + static void delete_endpoint(void *data) { struct cxl_memdev *cxlmd = data; struct cxl_port *endpoint = cxlmd->endpoint; - struct cxl_port *parent_port; - struct device *parent; + struct device *host = endpoint_host(endpoint); - parent_port = cxl_mem_find_port(cxlmd, NULL); - if (!parent_port) - goto out; - parent = &parent_port->dev; - - device_lock(parent); - if (parent->driver && !endpoint->dead) { - devm_release_action(parent, cxl_unlink_parent_dport, endpoint); - devm_release_action(parent, cxl_unlink_uport, endpoint); - devm_release_action(parent, unregister_port, endpoint); + device_lock(host); + if (host->driver && !endpoint->dead) { + devm_release_action(host, cxl_unlink_parent_dport, endpoint); + devm_release_action(host, cxl_unlink_uport, endpoint); + devm_release_action(host, unregister_port, endpoint); } cxlmd->endpoint = NULL; - device_unlock(parent); - put_device(parent); -out: + device_unlock(host); put_device(&endpoint->dev); + put_device(host); } int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint) { + struct device *host = endpoint_host(endpoint); struct device *dev = &cxlmd->dev; + get_device(host); get_device(&endpoint->dev); cxlmd->endpoint = endpoint; cxlmd->depth = endpoint->depth; @@ -2068,3 +2106,4 @@ static void cxl_core_exit(void) subsys_initcall(cxl_core_init); module_exit(cxl_core_exit); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(CXL); diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index 6281127b3e9d..372786f80955 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -204,7 +204,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map, struct cxl_component_regs *regs, unsigned long map_mask) { - struct device *dev = map->dev; + struct device *host = map->host; struct mapinfo { const struct cxl_reg_map *rmap; void __iomem **addr; @@ -216,16 +216,16 @@ int cxl_map_component_regs(const struct cxl_register_map *map, for (i = 0; i < ARRAY_SIZE(mapinfo); i++) { struct mapinfo *mi = &mapinfo[i]; - resource_size_t phys_addr; + resource_size_t addr; resource_size_t length; if (!mi->rmap->valid) continue; if (!test_bit(mi->rmap->id, &map_mask)) continue; - phys_addr = map->resource + mi->rmap->offset; + addr = map->resource + mi->rmap->offset; length = mi->rmap->size; - *(mi->addr) = devm_cxl_iomap_block(dev, phys_addr, length); + *(mi->addr) = devm_cxl_iomap_block(host, addr, length); if (!*(mi->addr)) return -ENOMEM; } @@ -237,7 +237,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL); int cxl_map_device_regs(const struct cxl_register_map *map, struct cxl_device_regs *regs) { - struct device *dev = map->dev; + struct device *host = map->host; resource_size_t phys_addr = map->resource; struct mapinfo { const struct cxl_reg_map *rmap; @@ -259,7 +259,7 @@ int cxl_map_device_regs(const struct cxl_register_map *map, addr = phys_addr + mi->rmap->offset; length = mi->rmap->size; - *(mi->addr) = devm_cxl_iomap_block(dev, addr, length); + *(mi->addr) = devm_cxl_iomap_block(host, addr, length); if (!*(mi->addr)) return -ENOMEM; } @@ -309,7 +309,7 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type, int regloc, i; *map = (struct cxl_register_map) { - .dev = &pdev->dev, + .host = &pdev->dev, .resource = CXL_RESOURCE_NONE, }; @@ -386,10 +386,9 @@ int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type) } EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, CXL); -int cxl_map_pmu_regs(struct pci_dev *pdev, struct cxl_pmu_regs *regs, - struct cxl_register_map *map) +int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs) { - struct device *dev = &pdev->dev; + struct device *dev = map->host; resource_size_t phys_addr; phys_addr = map->resource; @@ -403,15 +402,15 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, CXL); static int cxl_map_regblock(struct cxl_register_map *map) { - struct device *dev = map->dev; + struct device *host = map->host; map->base = ioremap(map->resource, map->max_size); if (!map->base) { - dev_err(dev, "failed to map registers\n"); + dev_err(host, "failed to map registers\n"); return -ENOMEM; } - dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource); + dev_dbg(host, "Mapped CXL Memory Device resource %pa\n", &map->resource); return 0; } @@ -425,28 +424,28 @@ static int cxl_probe_regs(struct cxl_register_map *map) { struct cxl_component_reg_map *comp_map; struct cxl_device_reg_map *dev_map; - struct device *dev = map->dev; + struct device *host = map->host; void __iomem *base = map->base; switch (map->reg_type) { case CXL_REGLOC_RBI_COMPONENT: comp_map = &map->component_map; - cxl_probe_component_regs(dev, base, comp_map); - dev_dbg(dev, "Set up component registers\n"); + cxl_probe_component_regs(host, base, comp_map); + dev_dbg(host, "Set up component registers\n"); break; case CXL_REGLOC_RBI_MEMDEV: dev_map = &map->device_map; - cxl_probe_device_regs(dev, base, dev_map); + cxl_probe_device_regs(host, base, dev_map); if (!dev_map->status.valid || !dev_map->mbox.valid || !dev_map->memdev.valid) { - dev_err(dev, "registers not found: %s%s%s\n", + dev_err(host, "registers not found: %s%s%s\n", !dev_map->status.valid ? "status " : "", !dev_map->mbox.valid ? "mbox " : "", !dev_map->memdev.valid ? "memdev " : ""); return -ENXIO; } - dev_dbg(dev, "Probing device registers...\n"); + dev_dbg(host, "Probing device registers...\n"); break; default: break; @@ -470,6 +469,42 @@ int cxl_setup_regs(struct cxl_register_map *map) } EXPORT_SYMBOL_NS_GPL(cxl_setup_regs, CXL); +u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb) +{ + void __iomem *addr; + u16 offset = 0; + u32 cap_hdr; + + if (WARN_ON_ONCE(rcrb == CXL_RESOURCE_NONE)) + return 0; + + if (!request_mem_region(rcrb, SZ_4K, dev_name(dev))) + return 0; + + addr = ioremap(rcrb, SZ_4K); + if (!addr) + goto out; + + cap_hdr = readl(addr + offset); + while (PCI_EXT_CAP_ID(cap_hdr) != PCI_EXT_CAP_ID_ERR) { + offset = PCI_EXT_CAP_NEXT(cap_hdr); + + /* Offset 0 terminates capability list. */ + if (!offset) + break; + cap_hdr = readl(addr + offset); + } + + if (offset) + dev_dbg(dev, "found AER extended capability (0x%x)\n", offset); + + iounmap(addr); +out: + release_mem_region(rcrb, SZ_4K); + + return offset; +} + resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri, enum cxl_rcrb which) { diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 76d92561af29..378fc96ff7ff 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -221,6 +221,14 @@ struct cxl_regs { struct_group_tagged(cxl_pmu_regs, pmu_regs, void __iomem *pmu; ); + + /* + * RCH downstream port specific RAS register + * @aer: CXL 3.0 8.2.1.1 RCH Downstream Port RCRB + */ + struct_group_tagged(cxl_rch_regs, rch_regs, + void __iomem *dport_aer; + ); }; struct cxl_reg_map { @@ -247,7 +255,7 @@ struct cxl_pmu_reg_map { /** * struct cxl_register_map - DVSEC harvested register block mapping parameters - * @dev: device for devm operations and logging + * @host: device for devm operations and logging * @base: virtual base of the register-block-BAR + @block_offset * @resource: physical resource base of the register block * @max_size: maximum mapping size to perform register search @@ -257,7 +265,7 @@ struct cxl_pmu_reg_map { * @pmu_map: cxl_reg_maps for CXL Performance Monitoring Units */ struct cxl_register_map { - struct device *dev; + struct device *host; void __iomem *base; resource_size_t resource; resource_size_t max_size; @@ -278,8 +286,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map, unsigned long map_mask); int cxl_map_device_regs(const struct cxl_register_map *map, struct cxl_device_regs *regs); -int cxl_map_pmu_regs(struct pci_dev *pdev, struct cxl_pmu_regs *regs, - struct cxl_register_map *map); +int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs); enum cxl_regloc_type; int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type); @@ -572,11 +579,10 @@ struct cxl_dax_region { * @regions: cxl_region_ref instances, regions mapped by this port * @parent_dport: dport that points to this port in the parent * @decoder_ida: allocator for decoder ids - * @comp_map: component register capability mappings + * @reg_map: component and ras register mapping parameters * @nr_dports: number of entries in @dports * @hdm_end: track last allocated HDM decoder instance for allocation ordering * @commit_end: cursor to track highest committed decoder for commit ordering - * @component_reg_phys: component register capability base address (optional) * @dead: last ep has been removed, force port re-creation * @depth: How deep this port is relative to the root. depth 0 is the root. * @cdat: Cached CDAT data @@ -592,11 +598,10 @@ struct cxl_port { struct xarray regions; struct cxl_dport *parent_dport; struct ida decoder_ida; - struct cxl_register_map comp_map; + struct cxl_register_map reg_map; int nr_dports; int hdm_end; int commit_end; - resource_size_t component_reg_phys; bool dead; unsigned int depth; struct cxl_cdat { @@ -620,19 +625,21 @@ struct cxl_rcrb_info { /** * struct cxl_dport - CXL downstream port * @dport_dev: PCI bridge or firmware device representing the downstream link - * @comp_map: component register capability mappings + * @reg_map: component and ras register mapping parameters * @port_id: unique hardware identifier for dport in decoder target list * @rcrb: Data about the Root Complex Register Block layout * @rch: Indicate whether this dport was enumerated in RCH or VH mode * @port: reference to cxl_port that contains this downstream port + * @regs: Dport parsed register blocks */ struct cxl_dport { struct device *dport_dev; - struct cxl_register_map comp_map; + struct cxl_register_map reg_map; int port_id; struct cxl_rcrb_info rcrb; bool rch; struct cxl_port *port; + struct cxl_regs regs; }; /** @@ -706,6 +713,13 @@ struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, struct device *dport_dev, int port_id, resource_size_t rcrb); +#ifdef CONFIG_PCIEAER_CXL +void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport); +#else +static inline void cxl_setup_parent_dport(struct device *host, + struct cxl_dport *dport) { } +#endif + struct cxl_decoder *to_cxl_decoder(struct device *dev); struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev); struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev); diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 6933bc20e76b..a2fcbca253f3 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -400,6 +400,7 @@ enum cxl_devtype { * * @dev: The device associated with this CXL state * @cxlmd: The device representing the CXL.mem capabilities of @dev + * @reg_map: component and ras register mapping parameters * @regs: Parsed register blocks * @cxl_dvsec: Offset to the PCIe device DVSEC * @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH) @@ -407,13 +408,13 @@ enum cxl_devtype { * @dpa_res: Overall DPA resource tree for the device * @pmem_res: Active Persistent memory capacity configuration * @ram_res: Active Volatile memory capacity configuration - * @component_reg_phys: register base of component registers * @serial: PCIe Device Serial Number * @type: Generic Memory Class device or Vendor Specific Memory device */ struct cxl_dev_state { struct device *dev; struct cxl_memdev *cxlmd; + struct cxl_register_map reg_map; struct cxl_regs regs; int cxl_dvsec; bool rcd; @@ -421,7 +422,6 @@ struct cxl_dev_state { struct resource dpa_res; struct resource pmem_res; struct resource ram_res; - resource_size_t component_reg_phys; u64 serial; enum cxl_devtype type; }; diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index 317c7548e4e9..e087febf9af0 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -49,7 +49,6 @@ static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd, struct cxl_dport *parent_dport) { struct cxl_port *parent_port = parent_dport->port; - struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_port *endpoint, *iter, *down; int rc; @@ -65,8 +64,8 @@ static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd, ep->next = down; } - endpoint = devm_cxl_add_port(host, &cxlmd->dev, - cxlds->component_reg_phys, + /* Note: endpoint port component registers are derived from @cxlds */ + endpoint = devm_cxl_add_port(host, &cxlmd->dev, CXL_RESOURCE_NONE, parent_dport); if (IS_ERR(endpoint)) return PTR_ERR(endpoint); @@ -158,6 +157,8 @@ static int cxl_mem_probe(struct device *dev) else endpoint_parent = &parent_port->dev; + cxl_setup_parent_dport(dev, dport); + device_lock(endpoint_parent); if (!endpoint_parent->driver) { dev_err(dev, "CXL port topology %s not enabled\n", diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 06fafe59c054..4067afca6389 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -474,7 +474,7 @@ static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev, resource_size_t component_reg_phys; *map = (struct cxl_register_map) { - .dev = &pdev->dev, + .host = &pdev->dev, .resource = CXL_RESOURCE_NONE, }; @@ -824,16 +824,14 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) * If the component registers can't be found, the cxl_pci driver may * still be useful for management functions so don't return an error. */ - cxlds->component_reg_phys = CXL_RESOURCE_NONE; - rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map); + rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, + &cxlds->reg_map); if (rc) dev_warn(&pdev->dev, "No component registers (%d)\n", rc); - else if (!map.component_map.ras.valid) + else if (!cxlds->reg_map.component_map.ras.valid) dev_dbg(&pdev->dev, "RAS registers not found\n"); - cxlds->component_reg_phys = map.resource; - - rc = cxl_map_component_regs(&map, &cxlds->regs.component, + rc = cxl_map_component_regs(&cxlds->reg_map, &cxlds->regs.component, BIT(CXL_CM_CAP_CAP_ID_RAS)); if (rc) dev_dbg(&pdev->dev, "Failed to map RAS capability.\n"); @@ -894,7 +892,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) break; } - rc = cxl_map_pmu_regs(pdev, &pmu_regs, &map); + rc = cxl_map_pmu_regs(&map, &pmu_regs); if (rc) { dev_dbg(&pdev->dev, "Could not map PMU regs\n"); break; |