summaryrefslogtreecommitdiff
path: root/drivers/cxl/core/pci.c
diff options
context:
space:
mode:
authorMauro Carvalho Chehab <mchehab@kernel.org>2023-06-09 10:12:41 +0100
committerMauro Carvalho Chehab <mchehab@kernel.org>2023-06-09 10:12:41 +0100
commitbe9aac187433af6abba5fcc2e73d91d0794ba360 (patch)
tree1bb929c3f885241617a0bba2086466daab6d6b50 /drivers/cxl/core/pci.c
parentaafeeaf3d2a8a91a5407c774c578abec79dcff00 (diff)
parent9561de3a55bed6bdd44a12820ba81ec416e705a7 (diff)
Merge tag 'v6.4-rc5' into media_stage
Linux 6.4-rc5 * tag 'v6.4-rc5': (919 commits) Linux 6.4-rc5 leds: qcom-lpg: Fix PWM period limits selftests/ftrace: Choose target function for filter test from samples KVM: selftests: Add test for race in kvm_recalculate_apic_map() KVM: x86: Bail from kvm_recalculate_phys_map() if x2APIC ID is out-of-bounds KVM: x86: Account fastpath-only VM-Exits in vCPU stats KVM: SVM: vNMI pending bit is V_NMI_PENDING_MASK not V_NMI_BLOCKING_MASK KVM: x86/mmu: Grab memslot for correct address space in NX recovery worker tpm, tpm_tis: correct tpm_tis_flags enumeration values Revert "ext4: remove ac->ac_found > sbi->s_mb_min_to_scan dead check in ext4_mb_check_limits" media: uvcvideo: Don't expose unsupported formats to userspace media: v4l2-subdev: Fix missing kerneldoc for client_caps media: staging: media: imx: initialize hs_settle to avoid warning media: v4l2-mc: Drop subdev check in v4l2_create_fwnode_links_to_pad() riscv: Implement missing huge_ptep_get riscv: Fix huge_ptep_set_wrprotect when PTE is a NAPOT module/decompress: Fix error checking on zstd decompression fork, vhost: Use CLONE_THREAD to fix freezer/ps regression dt-bindings: serial: 8250_omap: add rs485-rts-active-high selinux: don't use make's grouped targets feature yet ...
Diffstat (limited to 'drivers/cxl/core/pci.c')
-rw-r--r--drivers/cxl/core/pci.c112
1 files changed, 99 insertions, 13 deletions
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index f332fe7af92b..67f4ab6daa34 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -101,23 +101,57 @@ int devm_cxl_port_enumerate_dports(struct cxl_port *port)
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL);
-/*
- * Wait up to @media_ready_timeout for the device to report memory
- * active.
- */
-int cxl_await_media_ready(struct cxl_dev_state *cxlds)
+static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id)
+{
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ int d = cxlds->cxl_dvsec;
+ bool valid = false;
+ int rc, i;
+ u32 temp;
+
+ if (id > CXL_DVSEC_RANGE_MAX)
+ return -EINVAL;
+
+ /* Check MEM INFO VALID bit first, give up after 1s */
+ i = 1;
+ do {
+ rc = pci_read_config_dword(pdev,
+ d + CXL_DVSEC_RANGE_SIZE_LOW(id),
+ &temp);
+ if (rc)
+ return rc;
+
+ valid = FIELD_GET(CXL_DVSEC_MEM_INFO_VALID, temp);
+ if (valid)
+ break;
+ msleep(1000);
+ } while (i--);
+
+ if (!valid) {
+ dev_err(&pdev->dev,
+ "Timeout awaiting memory range %d valid after 1s.\n",
+ id);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int cxl_dvsec_mem_range_active(struct cxl_dev_state *cxlds, int id)
{
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
int d = cxlds->cxl_dvsec;
bool active = false;
- u64 md_status;
int rc, i;
+ u32 temp;
- for (i = media_ready_timeout; i; i--) {
- u32 temp;
+ if (id > CXL_DVSEC_RANGE_MAX)
+ return -EINVAL;
+ /* Check MEM ACTIVE bit, up to 60s timeout by default */
+ for (i = media_ready_timeout; i; i--) {
rc = pci_read_config_dword(
- pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp);
+ pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(id), &temp);
if (rc)
return rc;
@@ -134,6 +168,39 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds)
return -ETIMEDOUT;
}
+ return 0;
+}
+
+/*
+ * Wait up to @media_ready_timeout for the device to report memory
+ * active.
+ */
+int cxl_await_media_ready(struct cxl_dev_state *cxlds)
+{
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ int d = cxlds->cxl_dvsec;
+ int rc, i, hdm_count;
+ u64 md_status;
+ u16 cap;
+
+ rc = pci_read_config_word(pdev,
+ d + CXL_DVSEC_CAP_OFFSET, &cap);
+ if (rc)
+ return rc;
+
+ hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
+ for (i = 0; i < hdm_count; i++) {
+ rc = cxl_dvsec_mem_range_valid(cxlds, i);
+ if (rc)
+ return rc;
+ }
+
+ for (i = 0; i < hdm_count; i++) {
+ rc = cxl_dvsec_mem_range_active(cxlds, i);
+ if (rc)
+ return rc;
+ }
+
md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
if (!CXLMDEV_READY(md_status))
return -EIO;
@@ -241,17 +308,36 @@ static void disable_hdm(void *_cxlhdm)
hdm + CXL_HDM_DECODER_CTRL_OFFSET);
}
-static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
+int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm)
{
- void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ void __iomem *hdm;
u32 global_ctrl;
+ /*
+ * If the hdm capability was not mapped there is nothing to enable and
+ * the caller is responsible for what happens next. For example,
+ * emulate a passthrough decoder.
+ */
+ if (IS_ERR(cxlhdm))
+ return 0;
+
+ hdm = cxlhdm->regs.hdm_decoder;
global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
+
+ /*
+ * If the HDM decoder capability was enabled on entry, skip
+ * registering disable_hdm() since this decode capability may be
+ * owned by platform firmware.
+ */
+ if (global_ctrl & CXL_HDM_DECODER_ENABLE)
+ return 0;
+
writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
hdm + CXL_HDM_DECODER_CTRL_OFFSET);
- return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
+ return devm_add_action_or_reset(&port->dev, disable_hdm, cxlhdm);
}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_enable_hdm, CXL);
int cxl_dvsec_rr_decode(struct device *dev, int d,
struct cxl_endpoint_dvsec_info *info)
@@ -425,7 +511,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
if (info->mem_enabled)
return 0;
- rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
+ rc = devm_cxl_enable_hdm(port, cxlhdm);
if (rc)
return rc;