diff options
Diffstat (limited to 'drivers/cxl/core/pci.c')
-rw-r--r-- | drivers/cxl/core/pci.c | 362 |
1 files changed, 243 insertions, 119 deletions
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 0df09bd79408..b50551601c2e 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -101,7 +101,7 @@ int devm_cxl_port_enumerate_dports(struct cxl_port *port) return ctx.error; return ctx.count; } -EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, "CXL"); static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id) { @@ -209,38 +209,7 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds) return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL); - -static int wait_for_valid(struct pci_dev *pdev, int d) -{ - u32 val; - int rc; - - /* - * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high - * and Size Low registers are valid. Must be set within 1 second of - * deassertion of reset to CXL device. Likely it is already set by the - * time this runs, but otherwise give a 1.5 second timeout in case of - * clock skew. - */ - rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); - if (rc) - return rc; - - if (val & CXL_DVSEC_MEM_INFO_VALID) - return 0; - - msleep(1500); - - rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); - if (rc) - return rc; - - if (val & CXL_DVSEC_MEM_INFO_VALID) - return 0; - - return -ETIMEDOUT; -} +EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, "CXL"); static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val) { @@ -283,9 +252,9 @@ static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds) } /* require dvsec ranges to be covered by a locked platform window */ -static int dvsec_range_allowed(struct device *dev, void *arg) +static int dvsec_range_allowed(struct device *dev, const void *arg) { - struct range *dev_range = arg; + const struct range *dev_range = arg; struct cxl_decoder *cxld; if (!is_root_decoder(dev)) @@ -322,11 +291,13 @@ static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm) return devm_add_action_or_reset(host, disable_hdm, cxlhdm); } -int cxl_dvsec_rr_decode(struct device *dev, int d, +int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds, struct cxl_endpoint_dvsec_info *info) { - struct pci_dev *pdev = to_pci_dev(dev); + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + struct device *dev = cxlds->dev; int hdm_count, rc, i, ranges = 0; + int d = cxlds->cxl_dvsec; u16 cap, ctrl; if (!d) { @@ -338,10 +309,6 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, if (rc) return rc; - rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl); - if (rc) - return rc; - if (!(cap & CXL_DVSEC_MEM_CAPABLE)) { dev_dbg(dev, "Not MEM Capable\n"); return -ENXIO; @@ -357,17 +324,15 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, if (!hdm_count || hdm_count > 2) return -EINVAL; - rc = wait_for_valid(pdev, d); - if (rc) { - dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc); - return rc; - } - /* * The current DVSEC values are moot if the memory capability is * disabled, and they will remain moot after the HDM Decoder * capability is enabled. */ + rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl); + if (rc) + return rc; + info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl); if (!info->mem_enabled) return 0; @@ -376,6 +341,10 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, u64 base, size; u32 temp; + rc = cxl_dvsec_mem_range_valid(cxlds, i); + if (rc) + return rc; + rc = pci_read_config_dword( pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp); if (rc) @@ -390,10 +359,6 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK; if (!size) { - info->dvsec_range[i] = (struct range) { - .start = 0, - .end = CXL_RESOURCE_NONE, - }; continue; } @@ -411,19 +376,17 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK; - info->dvsec_range[i] = (struct range) { + info->dvsec_range[ranges++] = (struct range) { .start = base, .end = base + size - 1 }; - - ranges++; } info->ranges = ranges; return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, "CXL"); /** * cxl_hdm_decode_init() - Setup HDM decoding for the endpoint @@ -452,9 +415,40 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, */ if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled)) return devm_cxl_enable_mem(&port->dev, cxlds); - else if (!hdm) + + /* + * If the HDM Decoder Capability does not exist and DVSEC was + * not setup, the DVSEC based emulation cannot be used. + */ + if (!hdm) return -ENODEV; + /* The HDM Decoder Capability exists but is globally disabled. */ + + /* + * If the DVSEC CXL Range registers are not enabled, just + * enable and use the HDM Decoder Capability registers. + */ + if (!info->mem_enabled) { + rc = devm_cxl_enable_hdm(&port->dev, cxlhdm); + if (rc) + return rc; + + return devm_cxl_enable_mem(&port->dev, cxlds); + } + + /* + * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base + * [High,Low] when HDM operation is enabled the range register values + * are ignored by the device, but the spec also recommends matching the + * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges + * are expected even though Linux does not require or maintain that + * match. Check if at least one DVSEC range is enabled and allowed by + * the platform. That is, the DVSEC range must be covered by a locked + * platform window (CFMWS). Fail otherwise as the endpoint's decoders + * cannot be used. + */ + root = to_cxl_port(port->dev.parent); while (!is_cxl_root(root) && is_cxl_port(root->dev.parent)) root = to_cxl_port(root->dev.parent); @@ -463,7 +457,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, return -ENODEV; } - for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) { + for (i = 0, allowed = 0; i < info->ranges; i++) { struct device *cxld_dev; cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i], @@ -477,30 +471,14 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, allowed++; } - if (!allowed && info->mem_enabled) { + if (!allowed) { dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n"); return -ENXIO; } - /* - * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base - * [High,Low] when HDM operation is enabled the range register values - * are ignored by the device, but the spec also recommends matching the - * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges - * are expected even though Linux does not require or maintain that - * match. If at least one DVSEC range is enabled and allowed, skip HDM - * Decoder Capability Enable. - */ - if (info->mem_enabled) - return 0; - - rc = devm_cxl_enable_hdm(&port->dev, cxlhdm); - if (rc) - return rc; - - return devm_cxl_enable_mem(&port->dev, cxlds); + return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, "CXL"); #define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff #define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0 @@ -525,7 +503,7 @@ static int cxl_cdat_get_length(struct device *dev, __le32 response[2]; int rc; - rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL, + rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS, &request, sizeof(request), &response, sizeof(response)); @@ -555,7 +533,7 @@ static int cxl_cdat_read_table(struct device *dev, __le32 request = CDAT_DOE_REQ(entry_handle); int rc; - rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL, + rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS, &request, sizeof(request), rsp, sizeof(*rsp) + remaining); @@ -640,7 +618,7 @@ void read_cdat_data(struct cxl_port *port) if (!pdev) return; - doe_mb = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL, + doe_mb = pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS); if (!doe_mb) { dev_dbg(dev, "No CDAT mailbox\n"); @@ -684,7 +662,7 @@ err: devm_kfree(dev, buf); dev_err(dev, "Failed to read/validate CDAT.\n"); } -EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL); +EXPORT_SYMBOL_NS_GPL(read_cdat_data, "CXL"); static void __cxl_handle_cor_ras(struct cxl_dev_state *cxlds, void __iomem *ras_base) @@ -772,22 +750,20 @@ static bool cxl_handle_endpoint_ras(struct cxl_dev_state *cxlds) static void cxl_dport_map_rch_aer(struct cxl_dport *dport) { - struct cxl_rcrb_info *ri = &dport->rcrb; - void __iomem *dport_aer = NULL; resource_size_t aer_phys; struct device *host; + u16 aer_cap; - if (dport->rch && ri->aer_cap) { + aer_cap = cxl_rcrb_to_aer(dport->dport_dev, dport->rcrb.base); + if (aer_cap) { host = dport->reg_map.host; - aer_phys = ri->aer_cap + ri->base; - dport_aer = devm_cxl_iomap_block(host, aer_phys, - sizeof(struct aer_capability_regs)); + aer_phys = aer_cap + dport->rcrb.base; + dport->regs.dport_aer = devm_cxl_iomap_block(host, aer_phys, + sizeof(struct aer_capability_regs)); } - - dport->regs.dport_aer = dport_aer; } -static void cxl_dport_map_regs(struct cxl_dport *dport) +static void cxl_dport_map_ras(struct cxl_dport *dport) { struct cxl_register_map *map = &dport->reg_map; struct device *dev = dport->dport_dev; @@ -797,22 +773,16 @@ static void cxl_dport_map_regs(struct cxl_dport *dport) else if (cxl_map_component_regs(map, &dport->regs.component, BIT(CXL_CM_CAP_CAP_ID_RAS))) dev_dbg(dev, "Failed to map RAS capability.\n"); - - if (dport->rch) - cxl_dport_map_rch_aer(dport); } static void cxl_disable_rch_root_ints(struct cxl_dport *dport) { void __iomem *aer_base = dport->regs.dport_aer; - struct pci_host_bridge *bridge; u32 aer_cmd_mask, aer_cmd; if (!aer_base) return; - bridge = to_pci_host_bridge(dport->dport_dev); - /* * Disable RCH root port command interrupts. * CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors @@ -821,32 +791,35 @@ static void cxl_disable_rch_root_ints(struct cxl_dport *dport) * the root cmd register's interrupts is required. But, PCI spec * shows these are disabled by default on reset. */ - if (bridge->native_aer) { - aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN | - PCI_ERR_ROOT_CMD_NONFATAL_EN | - PCI_ERR_ROOT_CMD_FATAL_EN); - aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND); - aer_cmd &= ~aer_cmd_mask; - writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND); - } + aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN | + PCI_ERR_ROOT_CMD_NONFATAL_EN | + PCI_ERR_ROOT_CMD_FATAL_EN); + aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND); + aer_cmd &= ~aer_cmd_mask; + writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND); } -void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport) +/** + * cxl_dport_init_ras_reporting - Setup CXL RAS report on this dport + * @dport: the cxl_dport that needs to be initialized + * @host: host device for devm operations + */ +void cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host) { - struct device *dport_dev = dport->dport_dev; - struct pci_host_bridge *host_bridge; + dport->reg_map.host = host; + cxl_dport_map_ras(dport); - host_bridge = to_pci_host_bridge(dport_dev); - if (host_bridge->native_aer) - dport->rcrb.aer_cap = cxl_rcrb_to_aer(dport_dev, dport->rcrb.base); + if (dport->rch) { + struct pci_host_bridge *host_bridge = to_pci_host_bridge(dport->dport_dev); - dport->reg_map.host = host; - cxl_dport_map_regs(dport); + if (!host_bridge->native_aer) + return; - if (dport->rch) + cxl_dport_map_rch_aer(dport); cxl_disable_rch_root_ints(dport); + } } -EXPORT_SYMBOL_NS_GPL(cxl_setup_parent_dport, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_dport_init_ras_reporting, "CXL"); static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds, struct cxl_dport *dport) @@ -913,15 +886,13 @@ static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) struct pci_dev *pdev = to_pci_dev(cxlds->dev); struct aer_capability_regs aer_regs; struct cxl_dport *dport; - struct cxl_port *port; int severity; - port = cxl_pci_find_port(pdev, &dport); + struct cxl_port *port __free(put_cxl_port) = + cxl_pci_find_port(pdev, &dport); if (!port) return; - put_device(&port->dev); - if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs)) return; @@ -959,7 +930,7 @@ void cxl_cor_error_detected(struct pci_dev *pdev) cxl_handle_endpoint_cor_ras(cxlds); } } -EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, "CXL"); pci_ers_result_t cxl_error_detected(struct pci_dev *pdev, pci_channel_state_t state) @@ -1009,7 +980,7 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev, } return PCI_ERS_RESULT_NEED_RESET; } -EXPORT_SYMBOL_NS_GPL(cxl_error_detected, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_error_detected, "CXL"); static int cxl_flit_size(struct pci_dev *pdev) { @@ -1045,3 +1016,156 @@ long cxl_pci_get_latency(struct pci_dev *pdev) return cxl_flit_size(pdev) * MEGA / bw; } + +static int __cxl_endpoint_decoder_reset_detected(struct device *dev, void *data) +{ + struct cxl_port *port = data; + struct cxl_decoder *cxld; + struct cxl_hdm *cxlhdm; + void __iomem *hdm; + u32 ctrl; + + if (!is_endpoint_decoder(dev)) + return 0; + + cxld = to_cxl_decoder(dev); + if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) + return 0; + + cxlhdm = dev_get_drvdata(&port->dev); + hdm = cxlhdm->regs.hdm_decoder; + ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); + + return !FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl); +} + +bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port) +{ + return device_for_each_child(&port->dev, port, + __cxl_endpoint_decoder_reset_detected); +} +EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, "CXL"); + +int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c) +{ + int speed, bw; + u16 lnksta; + u32 width; + + speed = pcie_link_speed_mbps(pdev); + if (speed < 0) + return speed; + speed /= BITS_PER_BYTE; + + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); + width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta); + bw = speed * width; + + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + c[i].read_bandwidth = bw; + c[i].write_bandwidth = bw; + } + + return 0; +} + +/* + * Set max timeout such that platforms will optimize GPF flow to avoid + * the implied worst-case scenario delays. On a sane platform, all + * devices should always complete GPF within the energy budget of + * the GPF flow. The kernel does not have enough information to pick + * anything better than "maximize timeouts and hope it works". + * + * A misbehaving device could block forward progress of GPF for all + * the other devices, exhausting the energy budget of the platform. + * However, the spec seems to assume that moving on from slow to respond + * devices is a virtue. It is not possible to know that, in actuality, + * the slow to respond device is *the* most critical device in the + * system to wait. + */ +#define GPF_TIMEOUT_BASE_MAX 2 +#define GPF_TIMEOUT_SCALE_MAX 7 /* 10 seconds */ + +u16 cxl_gpf_get_dvsec(struct device *dev) +{ + struct pci_dev *pdev; + bool is_port = true; + u16 dvsec; + + if (!dev_is_pci(dev)) + return 0; + + pdev = to_pci_dev(dev); + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT) + is_port = false; + + dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL, + is_port ? CXL_DVSEC_PORT_GPF : CXL_DVSEC_DEVICE_GPF); + if (!dvsec) + dev_warn(dev, "%s GPF DVSEC not present\n", + is_port ? "Port" : "Device"); + return dvsec; +} +EXPORT_SYMBOL_NS_GPL(cxl_gpf_get_dvsec, "CXL"); + +static int update_gpf_port_dvsec(struct pci_dev *pdev, int dvsec, int phase) +{ + u64 base, scale; + int rc, offset; + u16 ctrl; + + switch (phase) { + case 1: + offset = CXL_DVSEC_PORT_GPF_PHASE_1_CONTROL_OFFSET; + base = CXL_DVSEC_PORT_GPF_PHASE_1_TMO_BASE_MASK; + scale = CXL_DVSEC_PORT_GPF_PHASE_1_TMO_SCALE_MASK; + break; + case 2: + offset = CXL_DVSEC_PORT_GPF_PHASE_2_CONTROL_OFFSET; + base = CXL_DVSEC_PORT_GPF_PHASE_2_TMO_BASE_MASK; + scale = CXL_DVSEC_PORT_GPF_PHASE_2_TMO_SCALE_MASK; + break; + default: + return -EINVAL; + } + + rc = pci_read_config_word(pdev, dvsec + offset, &ctrl); + if (rc) + return rc; + + if (FIELD_GET(base, ctrl) == GPF_TIMEOUT_BASE_MAX && + FIELD_GET(scale, ctrl) == GPF_TIMEOUT_SCALE_MAX) + return 0; + + ctrl = FIELD_PREP(base, GPF_TIMEOUT_BASE_MAX); + ctrl |= FIELD_PREP(scale, GPF_TIMEOUT_SCALE_MAX); + + rc = pci_write_config_word(pdev, dvsec + offset, ctrl); + if (!rc) + pci_dbg(pdev, "Port GPF phase %d timeout: %d0 secs\n", + phase, GPF_TIMEOUT_BASE_MAX); + + return rc; +} + +int cxl_gpf_port_setup(struct cxl_dport *dport) +{ + if (!dport) + return -EINVAL; + + if (!dport->gpf_dvsec) { + struct pci_dev *pdev; + int dvsec; + + dvsec = cxl_gpf_get_dvsec(dport->dport_dev); + if (!dvsec) + return -EINVAL; + + dport->gpf_dvsec = dvsec; + pdev = to_pci_dev(dport->dport_dev); + update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 1); + update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 2); + } + + return 0; +} |