diff options
Diffstat (limited to 'drivers/cxl/core')
-rw-r--r-- | drivers/cxl/core/Makefile | 5 | ||||
-rw-r--r-- | drivers/cxl/core/acpi.c | 11 | ||||
-rw-r--r-- | drivers/cxl/core/cdat.c | 613 | ||||
-rw-r--r-- | drivers/cxl/core/core.h | 54 | ||||
-rw-r--r-- | drivers/cxl/core/edac.c | 2110 | ||||
-rw-r--r-- | drivers/cxl/core/features.c | 703 | ||||
-rw-r--r-- | drivers/cxl/core/hdm.c | 482 | ||||
-rw-r--r-- | drivers/cxl/core/mbox.c | 450 | ||||
-rw-r--r-- | drivers/cxl/core/mce.c | 65 | ||||
-rw-r--r-- | drivers/cxl/core/mce.h | 20 | ||||
-rw-r--r-- | drivers/cxl/core/memdev.c | 215 | ||||
-rw-r--r-- | drivers/cxl/core/pci.c | 362 | ||||
-rw-r--r-- | drivers/cxl/core/pmem.c | 43 | ||||
-rw-r--r-- | drivers/cxl/core/pmu.c | 2 | ||||
-rw-r--r-- | drivers/cxl/core/port.c | 388 | ||||
-rw-r--r-- | drivers/cxl/core/ras.c | 126 | ||||
-rw-r--r-- | drivers/cxl/core/region.c | 1056 | ||||
-rw-r--r-- | drivers/cxl/core/regs.c | 140 | ||||
-rw-r--r-- | drivers/cxl/core/suspend.c | 4 | ||||
-rw-r--r-- | drivers/cxl/core/trace.c | 91 | ||||
-rw-r--r-- | drivers/cxl/core/trace.h | 425 |
21 files changed, 6023 insertions, 1342 deletions
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile index 9259bcc6773c..79e2ef81fde8 100644 --- a/drivers/cxl/core/Makefile +++ b/drivers/cxl/core/Makefile @@ -14,5 +14,10 @@ cxl_core-y += pci.o cxl_core-y += hdm.o cxl_core-y += pmu.o cxl_core-y += cdat.o +cxl_core-y += ras.o +cxl_core-y += acpi.o cxl_core-$(CONFIG_TRACING) += trace.o cxl_core-$(CONFIG_CXL_REGION) += region.o +cxl_core-$(CONFIG_CXL_MCE) += mce.o +cxl_core-$(CONFIG_CXL_FEATURES) += features.o +cxl_core-$(CONFIG_CXL_EDAC_MEM_FEATURES) += edac.o diff --git a/drivers/cxl/core/acpi.c b/drivers/cxl/core/acpi.c new file mode 100644 index 000000000000..f13b4dae6ac5 --- /dev/null +++ b/drivers/cxl/core/acpi.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation. All rights reserved. */ +#include <linux/acpi.h> +#include "cxl.h" +#include "core.h" + +int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res, + int nid, resource_size_t *size) +{ + return hmat_get_extended_linear_cache_size(backing_res, nid, size); +} diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c index bb83867d9fec..0ccef2f2a26a 100644 --- a/drivers/cxl/core/cdat.c +++ b/drivers/cxl/core/cdat.c @@ -9,13 +9,12 @@ #include "cxlmem.h" #include "core.h" #include "cxl.h" -#include "core.h" struct dsmas_entry { struct range dpa_range; u8 handle; struct access_coordinate coord[ACCESS_COORDINATE_MAX]; - + struct access_coordinate cdat_coord[ACCESS_COORDINATE_MAX]; int entries; int qos_class; }; @@ -29,7 +28,7 @@ static u32 cdat_normalize(u16 entry, u64 base, u8 type) */ if (entry == 0xffff || !entry) return 0; - else if (base > (UINT_MAX / (entry))) + if (base > (UINT_MAX / (entry))) return 0; /* @@ -163,7 +162,7 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg, val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base), dslbis->data_type); - cxl_access_coordinate_set(dent->coord, dslbis->data_type, val); + cxl_access_coordinate_set(dent->cdat_coord, dslbis->data_type, val); return 0; } @@ -220,7 +219,7 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port, xa_for_each(dsmas_xa, index, dent) { int qos_class; - cxl_coordinates_combine(dent->coord, dent->coord, ep_c); + cxl_coordinates_combine(dent->coord, dent->cdat_coord, ep_c); dent->entries = 1; rc = cxl_root->ops->qos_class(cxl_root, &dent->coord[ACCESS_COORDINATE_CPU], @@ -241,13 +240,15 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port, static void update_perf_entry(struct device *dev, struct dsmas_entry *dent, struct cxl_dpa_perf *dpa_perf) { - for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { dpa_perf->coord[i] = dent->coord[i]; + dpa_perf->cdat_coord[i] = dent->cdat_coord[i]; + } dpa_perf->dpa_range = dent->dpa_range; dpa_perf->qos_class = dent->qos_class; dev_dbg(dev, - "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n", - dent->dpa_range.start, dpa_perf->qos_class, + "DSMAS: dpa: %pra qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n", + &dent->dpa_range, dpa_perf->qos_class, dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth, dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth, dent->coord[ACCESS_COORDINATE_CPU].read_latency, @@ -257,29 +258,31 @@ static void update_perf_entry(struct device *dev, struct dsmas_entry *dent, static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds, struct xarray *dsmas_xa) { - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); struct device *dev = cxlds->dev; - struct range pmem_range = { - .start = cxlds->pmem_res.start, - .end = cxlds->pmem_res.end, - }; - struct range ram_range = { - .start = cxlds->ram_res.start, - .end = cxlds->ram_res.end, - }; struct dsmas_entry *dent; unsigned long index; xa_for_each(dsmas_xa, index, dent) { - if (resource_size(&cxlds->ram_res) && - range_contains(&ram_range, &dent->dpa_range)) - update_perf_entry(dev, dent, &mds->ram_perf); - else if (resource_size(&cxlds->pmem_res) && - range_contains(&pmem_range, &dent->dpa_range)) - update_perf_entry(dev, dent, &mds->pmem_perf); - else - dev_dbg(dev, "no partition for dsmas dpa: %#llx\n", - dent->dpa_range.start); + bool found = false; + + for (int i = 0; i < cxlds->nr_partitions; i++) { + struct resource *res = &cxlds->part[i].res; + struct range range = { + .start = res->start, + .end = res->end, + }; + + if (range_contains(&range, &dent->dpa_range)) { + update_perf_entry(dev, dent, + &cxlds->part[i].perf); + found = true; + break; + } + } + + if (!found) + dev_dbg(dev, "no partition for dsmas dpa: %pra\n", + &dent->dpa_range); } } @@ -342,36 +345,46 @@ static int match_cxlrd_hb(struct device *dev, void *data) return 0; } -static int cxl_qos_class_verify(struct cxl_memdev *cxlmd) +static void cxl_qos_class_verify(struct cxl_memdev *cxlmd) { struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); struct cxl_port *root_port; - int rc; struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(cxlmd->endpoint); + /* + * No need to reset_dpa_perf() here as find_cxl_root() is guaranteed to + * succeed when called in the cxl_endpoint_port_probe() path. + */ if (!cxl_root) - return -ENODEV; + return; root_port = &cxl_root->port; - /* Check that the QTG IDs are all sane between end device and root decoders */ - if (!cxl_qos_match(root_port, &mds->ram_perf)) - reset_dpa_perf(&mds->ram_perf); - if (!cxl_qos_match(root_port, &mds->pmem_perf)) - reset_dpa_perf(&mds->pmem_perf); - - /* Check to make sure that the device's host bridge is under a root decoder */ - rc = device_for_each_child(&root_port->dev, - cxlmd->endpoint->host_bridge, match_cxlrd_hb); - if (!rc) { - reset_dpa_perf(&mds->ram_perf); - reset_dpa_perf(&mds->pmem_perf); + /* + * Save userspace from needing to check if a qos class has any matches + * by hiding qos class info if the memdev is not mapped by a root + * decoder, or the partition class does not match any root decoder + * class. + */ + if (!device_for_each_child(&root_port->dev, + cxlmd->endpoint->host_bridge, + match_cxlrd_hb)) { + for (int i = 0; i < cxlds->nr_partitions; i++) { + struct cxl_dpa_perf *perf = &cxlds->part[i].perf; + + reset_dpa_perf(perf); + } + return; } - return rc; + for (int i = 0; i < cxlds->nr_partitions; i++) { + struct cxl_dpa_perf *perf = &cxlds->part[i].perf; + + if (!cxl_qos_match(root_port, perf)) + reset_dpa_perf(perf); + } } static void discard_dsmas(struct xarray *xa) @@ -415,7 +428,7 @@ void cxl_endpoint_parse_cdat(struct cxl_port *port) cxl_qos_class_verify(cxlmd); cxl_memdev_update_perf(cxlmd); } -EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, "CXL"); static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg, const unsigned long end) @@ -512,7 +525,7 @@ void cxl_switch_parse_cdat(struct cxl_port *port) if (rc) dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc); } -EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, "CXL"); static void __cxl_coordinates_combine(struct access_coordinate *out, struct access_coordinate *c1, @@ -544,34 +557,510 @@ void cxl_coordinates_combine(struct access_coordinate *out, __cxl_coordinates_combine(&out[i], &c1[i], &c2[i]); } -MODULE_IMPORT_NS(CXL); +MODULE_IMPORT_NS("CXL"); -void cxl_region_perf_data_calculate(struct cxl_region *cxlr, - struct cxl_endpoint_decoder *cxled) +static void cxl_bandwidth_add(struct access_coordinate *coord, + struct access_coordinate *c1, + struct access_coordinate *c2) +{ + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + coord[i].read_bandwidth = c1[i].read_bandwidth + + c2[i].read_bandwidth; + coord[i].write_bandwidth = c1[i].write_bandwidth + + c2[i].write_bandwidth; + } +} + +static bool dpa_perf_contains(struct cxl_dpa_perf *perf, + struct resource *dpa_res) { - struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); - struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); struct range dpa = { - .start = cxled->dpa_res->start, - .end = cxled->dpa_res->end, + .start = dpa_res->start, + .end = dpa_res->end, }; + + return range_contains(&perf->dpa_range, &dpa); +} + +static struct cxl_dpa_perf *cxled_get_dpa_perf(struct cxl_endpoint_decoder *cxled) +{ + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_dpa_perf *perf; - switch (cxlr->mode) { - case CXL_DECODER_RAM: - perf = &mds->ram_perf; - break; - case CXL_DECODER_PMEM: - perf = &mds->pmem_perf; - break; - default: + if (cxled->part < 0) + return ERR_PTR(-EINVAL); + perf = &cxlds->part[cxled->part].perf; + + if (!perf) + return ERR_PTR(-EINVAL); + + if (!dpa_perf_contains(perf, cxled->dpa_res)) + return ERR_PTR(-EINVAL); + + return perf; +} + +/* + * Transient context for containing the current calculation of bandwidth when + * doing walking the port hierarchy to deal with shared upstream link. + */ +struct cxl_perf_ctx { + struct access_coordinate coord[ACCESS_COORDINATE_MAX]; + struct cxl_port *port; +}; + +/** + * cxl_endpoint_gather_bandwidth - collect all the endpoint bandwidth in an xarray + * @cxlr: CXL region for the bandwidth calculation + * @cxled: endpoint decoder to start on + * @usp_xa: (output) the xarray that collects all the bandwidth coordinates + * indexed by the upstream device with data of 'struct cxl_perf_ctx'. + * @gp_is_root: (output) bool of whether the grandparent is cxl root. + * + * Return: 0 for success or -errno + * + * Collects aggregated endpoint bandwidth and store the bandwidth in + * an xarray indexed by the upstream device of the switch or the RP + * device. Each endpoint consists the minimum of the bandwidth from DSLBIS + * from the endpoint CDAT, the endpoint upstream link bandwidth, and the + * bandwidth from the SSLBIS of the switch CDAT for the switch upstream port to + * the downstream port that's associated with the endpoint. If the + * device is directly connected to a RP, then no SSLBIS is involved. + */ +static int cxl_endpoint_gather_bandwidth(struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled, + struct xarray *usp_xa, + bool *gp_is_root) +{ + struct cxl_port *endpoint = to_cxl_port(cxled->cxld.dev.parent); + struct cxl_port *parent_port = to_cxl_port(endpoint->dev.parent); + struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent); + struct access_coordinate pci_coord[ACCESS_COORDINATE_MAX]; + struct access_coordinate sw_coord[ACCESS_COORDINATE_MAX]; + struct access_coordinate ep_coord[ACCESS_COORDINATE_MAX]; + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + struct cxl_perf_ctx *perf_ctx; + struct cxl_dpa_perf *perf; + unsigned long index; + void *ptr; + int rc; + + if (!dev_is_pci(cxlds->dev)) + return -ENODEV; + + if (cxlds->rcd) + return -ENODEV; + + perf = cxled_get_dpa_perf(cxled); + if (IS_ERR(perf)) + return PTR_ERR(perf); + + *gp_is_root = is_cxl_root(gp_port); + + /* + * If the grandparent is cxl root, then index is the root port, + * otherwise it's the parent switch upstream device. + */ + if (*gp_is_root) + index = (unsigned long)endpoint->parent_dport->dport_dev; + else + index = (unsigned long)parent_port->uport_dev; + + perf_ctx = xa_load(usp_xa, index); + if (!perf_ctx) { + struct cxl_perf_ctx *c __free(kfree) = + kzalloc(sizeof(*perf_ctx), GFP_KERNEL); + + if (!c) + return -ENOMEM; + ptr = xa_store(usp_xa, index, c, GFP_KERNEL); + if (xa_is_err(ptr)) + return xa_err(ptr); + perf_ctx = no_free_ptr(c); + perf_ctx->port = parent_port; + } + + /* Direct upstream link from EP bandwidth */ + rc = cxl_pci_get_bandwidth(pdev, pci_coord); + if (rc < 0) + return rc; + + /* + * Min of upstream link bandwidth and Endpoint CDAT bandwidth from + * DSLBIS. + */ + cxl_coordinates_combine(ep_coord, pci_coord, perf->cdat_coord); + + /* + * If grandparent port is root, then there's no switch involved and + * the endpoint is connected to a root port. + */ + if (!*gp_is_root) { + /* + * Retrieve the switch SSLBIS for switch downstream port + * associated with the endpoint bandwidth. + */ + rc = cxl_port_get_switch_dport_bandwidth(endpoint, sw_coord); + if (rc) + return rc; + + /* + * Min of the earlier coordinates with the switch SSLBIS + * bandwidth + */ + cxl_coordinates_combine(ep_coord, ep_coord, sw_coord); + } + + /* + * Aggregate the computed bandwidth with the current aggregated bandwidth + * of the endpoints with the same switch upstream device or RP. + */ + cxl_bandwidth_add(perf_ctx->coord, perf_ctx->coord, ep_coord); + + return 0; +} + +static void free_perf_xa(struct xarray *xa) +{ + struct cxl_perf_ctx *ctx; + unsigned long index; + + if (!xa) return; + + xa_for_each(xa, index, ctx) + kfree(ctx); + xa_destroy(xa); + kfree(xa); +} +DEFINE_FREE(free_perf_xa, struct xarray *, if (_T) free_perf_xa(_T)) + +/** + * cxl_switch_gather_bandwidth - collect all the bandwidth at switch level in an xarray + * @cxlr: The region being operated on + * @input_xa: xarray indexed by upstream device of a switch with data of 'struct + * cxl_perf_ctx' + * @gp_is_root: (output) bool of whether the grandparent is cxl root. + * + * Return: a xarray of resulting cxl_perf_ctx per parent switch or root port + * or ERR_PTR(-errno) + * + * Iterate through the xarray. Take the minimum of the downstream calculated + * bandwidth, the upstream link bandwidth, and the SSLBIS of the upstream + * switch if exists. Sum the resulting bandwidth under the switch upstream + * device or a RP device. The function can be iterated over multiple switches + * if the switches are present. + */ +static struct xarray *cxl_switch_gather_bandwidth(struct cxl_region *cxlr, + struct xarray *input_xa, + bool *gp_is_root) +{ + struct xarray *res_xa __free(free_perf_xa) = + kzalloc(sizeof(*res_xa), GFP_KERNEL); + struct access_coordinate coords[ACCESS_COORDINATE_MAX]; + struct cxl_perf_ctx *ctx, *us_ctx; + unsigned long index, us_index; + int dev_count = 0; + int gp_count = 0; + void *ptr; + int rc; + + if (!res_xa) + return ERR_PTR(-ENOMEM); + xa_init(res_xa); + + xa_for_each(input_xa, index, ctx) { + struct device *dev = (struct device *)index; + struct cxl_port *port = ctx->port; + struct cxl_port *parent_port = to_cxl_port(port->dev.parent); + struct cxl_port *gp_port = to_cxl_port(parent_port->dev.parent); + struct cxl_dport *dport = port->parent_dport; + bool is_root = false; + + dev_count++; + if (is_cxl_root(gp_port)) { + is_root = true; + gp_count++; + } + + /* + * If the grandparent is cxl root, then index is the root port, + * otherwise it's the parent switch upstream device. + */ + if (is_root) + us_index = (unsigned long)port->parent_dport->dport_dev; + else + us_index = (unsigned long)parent_port->uport_dev; + + us_ctx = xa_load(res_xa, us_index); + if (!us_ctx) { + struct cxl_perf_ctx *n __free(kfree) = + kzalloc(sizeof(*n), GFP_KERNEL); + + if (!n) + return ERR_PTR(-ENOMEM); + + ptr = xa_store(res_xa, us_index, n, GFP_KERNEL); + if (xa_is_err(ptr)) + return ERR_PTR(xa_err(ptr)); + us_ctx = no_free_ptr(n); + us_ctx->port = parent_port; + } + + /* + * If the device isn't an upstream PCIe port, there's something + * wrong with the topology. + */ + if (!dev_is_pci(dev)) + return ERR_PTR(-EINVAL); + + /* Retrieve the upstream link bandwidth */ + rc = cxl_pci_get_bandwidth(to_pci_dev(dev), coords); + if (rc) + return ERR_PTR(-ENXIO); + + /* + * Take the min of downstream bandwidth and the upstream link + * bandwidth. + */ + cxl_coordinates_combine(coords, coords, ctx->coord); + + /* + * Take the min of the calculated bandwdith and the upstream + * switch SSLBIS bandwidth if there's a parent switch + */ + if (!is_root) + cxl_coordinates_combine(coords, coords, dport->coord); + + /* + * Aggregate the calculated bandwidth common to an upstream + * switch. + */ + cxl_bandwidth_add(us_ctx->coord, us_ctx->coord, coords); + } + + /* Asymmetric topology detected. */ + if (gp_count) { + if (gp_count != dev_count) { + dev_dbg(&cxlr->dev, + "Asymmetric hierarchy detected, bandwidth not updated\n"); + return ERR_PTR(-EOPNOTSUPP); + } + *gp_is_root = true; } + return no_free_ptr(res_xa); +} + +/** + * cxl_rp_gather_bandwidth - handle the root port level bandwidth collection + * @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated + * below each root port device. + * + * Return: xarray that holds cxl_perf_ctx per host bridge or ERR_PTR(-errno) + */ +static struct xarray *cxl_rp_gather_bandwidth(struct xarray *xa) +{ + struct xarray *hb_xa __free(free_perf_xa) = + kzalloc(sizeof(*hb_xa), GFP_KERNEL); + struct cxl_perf_ctx *ctx; + unsigned long index; + + if (!hb_xa) + return ERR_PTR(-ENOMEM); + xa_init(hb_xa); + + xa_for_each(xa, index, ctx) { + struct cxl_port *port = ctx->port; + unsigned long hb_index = (unsigned long)port->uport_dev; + struct cxl_perf_ctx *hb_ctx; + void *ptr; + + hb_ctx = xa_load(hb_xa, hb_index); + if (!hb_ctx) { + struct cxl_perf_ctx *n __free(kfree) = + kzalloc(sizeof(*n), GFP_KERNEL); + + if (!n) + return ERR_PTR(-ENOMEM); + ptr = xa_store(hb_xa, hb_index, n, GFP_KERNEL); + if (xa_is_err(ptr)) + return ERR_PTR(xa_err(ptr)); + hb_ctx = no_free_ptr(n); + hb_ctx->port = port; + } + + cxl_bandwidth_add(hb_ctx->coord, hb_ctx->coord, ctx->coord); + } + + return no_free_ptr(hb_xa); +} + +/** + * cxl_hb_gather_bandwidth - handle the host bridge level bandwidth collection + * @xa: the xarray that holds the cxl_perf_ctx that has the bandwidth calculated + * below each host bridge. + * + * Return: xarray that holds cxl_perf_ctx per ACPI0017 device or ERR_PTR(-errno) + */ +static struct xarray *cxl_hb_gather_bandwidth(struct xarray *xa) +{ + struct xarray *mw_xa __free(free_perf_xa) = + kzalloc(sizeof(*mw_xa), GFP_KERNEL); + struct cxl_perf_ctx *ctx; + unsigned long index; + + if (!mw_xa) + return ERR_PTR(-ENOMEM); + xa_init(mw_xa); + + xa_for_each(xa, index, ctx) { + struct cxl_port *port = ctx->port; + struct cxl_port *parent_port; + struct cxl_perf_ctx *mw_ctx; + struct cxl_dport *dport; + unsigned long mw_index; + void *ptr; + + parent_port = to_cxl_port(port->dev.parent); + mw_index = (unsigned long)parent_port->uport_dev; + + mw_ctx = xa_load(mw_xa, mw_index); + if (!mw_ctx) { + struct cxl_perf_ctx *n __free(kfree) = + kzalloc(sizeof(*n), GFP_KERNEL); + + if (!n) + return ERR_PTR(-ENOMEM); + ptr = xa_store(mw_xa, mw_index, n, GFP_KERNEL); + if (xa_is_err(ptr)) + return ERR_PTR(xa_err(ptr)); + mw_ctx = no_free_ptr(n); + } + + dport = port->parent_dport; + cxl_coordinates_combine(ctx->coord, ctx->coord, dport->coord); + cxl_bandwidth_add(mw_ctx->coord, mw_ctx->coord, ctx->coord); + } + + return no_free_ptr(mw_xa); +} + +/** + * cxl_region_update_bandwidth - Update the bandwidth access coordinates of a region + * @cxlr: The region being operated on + * @input_xa: xarray holds cxl_perf_ctx wht calculated bandwidth per ACPI0017 instance + */ +static void cxl_region_update_bandwidth(struct cxl_region *cxlr, + struct xarray *input_xa) +{ + struct access_coordinate coord[ACCESS_COORDINATE_MAX]; + struct cxl_perf_ctx *ctx; + unsigned long index; + + memset(coord, 0, sizeof(coord)); + xa_for_each(input_xa, index, ctx) + cxl_bandwidth_add(coord, coord, ctx->coord); + + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + cxlr->coord[i].read_bandwidth = coord[i].read_bandwidth; + cxlr->coord[i].write_bandwidth = coord[i].write_bandwidth; + } +} + +/** + * cxl_region_shared_upstream_bandwidth_update - Recalculate the bandwidth for + * the region + * @cxlr: the cxl region to recalculate + * + * The function walks the topology from bottom up and calculates the bandwidth. It + * starts at the endpoints, processes at the switches if any, processes at the rootport + * level, at the host bridge level, and finally aggregates at the region. + */ +void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr) +{ + struct xarray *working_xa; + int root_count = 0; + bool is_root; + int rc; + + lockdep_assert_held(&cxl_dpa_rwsem); + + struct xarray *usp_xa __free(free_perf_xa) = + kzalloc(sizeof(*usp_xa), GFP_KERNEL); + + if (!usp_xa) + return; + + xa_init(usp_xa); + + /* Collect bandwidth data from all the endpoints. */ + for (int i = 0; i < cxlr->params.nr_targets; i++) { + struct cxl_endpoint_decoder *cxled = cxlr->params.targets[i]; + + is_root = false; + rc = cxl_endpoint_gather_bandwidth(cxlr, cxled, usp_xa, &is_root); + if (rc) + return; + root_count += is_root; + } + + /* Detect asymmetric hierarchy with some direct attached endpoints. */ + if (root_count && root_count != cxlr->params.nr_targets) { + dev_dbg(&cxlr->dev, + "Asymmetric hierarchy detected, bandwidth not updated\n"); + return; + } + + /* + * Walk up one or more switches to deal with the bandwidth of the + * switches if they exist. Endpoints directly attached to RPs skip + * over this part. + */ + if (!root_count) { + do { + working_xa = cxl_switch_gather_bandwidth(cxlr, usp_xa, + &is_root); + if (IS_ERR(working_xa)) + return; + free_perf_xa(usp_xa); + usp_xa = working_xa; + } while (!is_root); + } + + /* Handle the bandwidth at the root port of the hierarchy */ + working_xa = cxl_rp_gather_bandwidth(usp_xa); + if (IS_ERR(working_xa)) + return; + free_perf_xa(usp_xa); + usp_xa = working_xa; + + /* Handle the bandwidth at the host bridge of the hierarchy */ + working_xa = cxl_hb_gather_bandwidth(usp_xa); + if (IS_ERR(working_xa)) + return; + free_perf_xa(usp_xa); + usp_xa = working_xa; + + /* + * Aggregate all the bandwidth collected per CFMWS (ACPI0017) and + * update the region bandwidth with the final calculated values. + */ + cxl_region_update_bandwidth(cxlr, usp_xa); +} + +void cxl_region_perf_data_calculate(struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled) +{ + struct cxl_dpa_perf *perf; + lockdep_assert_held(&cxl_dpa_rwsem); - if (!range_contains(&perf->dpa_range, &dpa)) + perf = cxled_get_dpa_perf(cxled); + if (IS_ERR(perf)) return; for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index bc5a95665aa0..29b61828a847 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -4,6 +4,8 @@ #ifndef __CXL_CORE_H__ #define __CXL_CORE_H__ +#include <cxl/mailbox.h> + extern const struct device_type cxl_nvdimm_bridge_type; extern const struct device_type cxl_nvdimm_type; extern const struct device_type cxl_pmu_type; @@ -27,7 +29,21 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled); int cxl_region_init(void); void cxl_region_exit(void); int cxl_get_poison_by_endpoint(struct cxl_port *port); +struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa); +u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, + u64 dpa); + #else +static inline u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, + const struct cxl_memdev *cxlmd, u64 dpa) +{ + return ULLONG_MAX; +} +static inline +struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa) +{ + return NULL; +} static inline int cxl_get_poison_by_endpoint(struct cxl_port *port) { return 0; @@ -51,16 +67,16 @@ static inline void cxl_region_exit(void) struct cxl_send_command; struct cxl_mem_query_commands; -int cxl_query_cmd(struct cxl_memdev *cxlmd, +int cxl_query_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_mem_query_commands __user *q); -int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s); +int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s); void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr, resource_size_t length); struct dentry *cxl_debugfs_create_dir(const char *dir); -int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled, - enum cxl_decoder_mode mode); -int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size); +int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled, + enum cxl_partition_mode mode); +int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size); int cxl_dpa_free(struct cxl_endpoint_decoder *cxled); resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled); resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled); @@ -75,6 +91,11 @@ resource_size_t __rcrb_to_component(struct device *dev, enum cxl_rcrb which); u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb); +#define PCI_RCRB_CAP_LIST_ID_MASK GENMASK(7, 0) +#define PCI_RCRB_CAP_HDR_ID_MASK GENMASK(7, 0) +#define PCI_RCRB_CAP_HDR_NEXT_MASK GENMASK(15, 8) +#define PCI_CAP_EXP_SIZEOF 0x3c + extern struct rw_semaphore cxl_dpa_rwsem; extern struct rw_semaphore cxl_region_rwsem; @@ -89,9 +110,30 @@ enum cxl_poison_trace_type { }; long cxl_pci_get_latency(struct pci_dev *pdev); - +int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c); int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr, enum access_coordinate_class access); bool cxl_need_node_perf_attrs_update(int nid); +int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port, + struct access_coordinate *c); + +int cxl_ras_init(void); +void cxl_ras_exit(void); +int cxl_gpf_port_setup(struct cxl_dport *dport); +int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res, + int nid, resource_size_t *size); + +#ifdef CONFIG_CXL_FEATURES +struct cxl_feat_entry * +cxl_feature_info(struct cxl_features_state *cxlfs, const uuid_t *uuid); +size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid, + enum cxl_get_feat_selection selection, + void *feat_out, size_t feat_out_size, u16 offset, + u16 *return_code); +int cxl_set_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid, + u8 feat_version, const void *feat_data, + size_t feat_data_size, u32 feat_flag, u16 offset, + u16 *return_code); +#endif #endif /* __CXL_CORE_H__ */ diff --git a/drivers/cxl/core/edac.c b/drivers/cxl/core/edac.c new file mode 100644 index 000000000000..623aaa4439c4 --- /dev/null +++ b/drivers/cxl/core/edac.c @@ -0,0 +1,2110 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * CXL EDAC memory feature driver. + * + * Copyright (c) 2024-2025 HiSilicon Limited. + * + * - Supports functions to configure EDAC features of the + * CXL memory devices. + * - Registers with the EDAC device subsystem driver to expose + * the features sysfs attributes to the user for configuring + * CXL memory RAS feature. + */ + +#include <linux/cleanup.h> +#include <linux/edac.h> +#include <linux/limits.h> +#include <linux/unaligned.h> +#include <linux/xarray.h> +#include <cxl/features.h> +#include <cxl.h> +#include <cxlmem.h> +#include "core.h" +#include "trace.h" + +#define CXL_NR_EDAC_DEV_FEATURES 7 + +#define CXL_SCRUB_NO_REGION -1 + +struct cxl_patrol_scrub_context { + u8 instance; + u16 get_feat_size; + u16 set_feat_size; + u8 get_version; + u8 set_version; + u16 effects; + struct cxl_memdev *cxlmd; + struct cxl_region *cxlr; +}; + +/* + * See CXL spec rev 3.2 @8.2.10.9.11.1 Table 8-222 Device Patrol Scrub Control + * Feature Readable Attributes. + */ +struct cxl_scrub_rd_attrbs { + u8 scrub_cycle_cap; + __le16 scrub_cycle_hours; + u8 scrub_flags; +} __packed; + +/* + * See CXL spec rev 3.2 @8.2.10.9.11.1 Table 8-223 Device Patrol Scrub Control + * Feature Writable Attributes. + */ +struct cxl_scrub_wr_attrbs { + u8 scrub_cycle_hours; + u8 scrub_flags; +} __packed; + +#define CXL_SCRUB_CONTROL_CHANGEABLE BIT(0) +#define CXL_SCRUB_CONTROL_REALTIME BIT(1) +#define CXL_SCRUB_CONTROL_CYCLE_MASK GENMASK(7, 0) +#define CXL_SCRUB_CONTROL_MIN_CYCLE_MASK GENMASK(15, 8) +#define CXL_SCRUB_CONTROL_ENABLE BIT(0) + +#define CXL_GET_SCRUB_CYCLE_CHANGEABLE(cap) \ + FIELD_GET(CXL_SCRUB_CONTROL_CHANGEABLE, cap) +#define CXL_GET_SCRUB_CYCLE(cycle) \ + FIELD_GET(CXL_SCRUB_CONTROL_CYCLE_MASK, cycle) +#define CXL_GET_SCRUB_MIN_CYCLE(cycle) \ + FIELD_GET(CXL_SCRUB_CONTROL_MIN_CYCLE_MASK, cycle) +#define CXL_GET_SCRUB_EN_STS(flags) FIELD_GET(CXL_SCRUB_CONTROL_ENABLE, flags) + +#define CXL_SET_SCRUB_CYCLE(cycle) \ + FIELD_PREP(CXL_SCRUB_CONTROL_CYCLE_MASK, cycle) +#define CXL_SET_SCRUB_EN(en) FIELD_PREP(CXL_SCRUB_CONTROL_ENABLE, en) + +static int cxl_mem_scrub_get_attrbs(struct cxl_mailbox *cxl_mbox, u8 *cap, + u16 *cycle, u8 *flags, u8 *min_cycle) +{ + size_t rd_data_size = sizeof(struct cxl_scrub_rd_attrbs); + size_t data_size; + struct cxl_scrub_rd_attrbs *rd_attrbs __free(kfree) = + kzalloc(rd_data_size, GFP_KERNEL); + if (!rd_attrbs) + return -ENOMEM; + + data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID, + CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs, + rd_data_size, 0, NULL); + if (!data_size) + return -EIO; + + *cap = rd_attrbs->scrub_cycle_cap; + *cycle = le16_to_cpu(rd_attrbs->scrub_cycle_hours); + *flags = rd_attrbs->scrub_flags; + if (min_cycle) + *min_cycle = CXL_GET_SCRUB_MIN_CYCLE(*cycle); + + return 0; +} + +static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx, + u8 *cap, u16 *cycle, u8 *flags, u8 *min_cycle) +{ + struct cxl_mailbox *cxl_mbox; + struct cxl_region_params *p; + struct cxl_memdev *cxlmd; + struct cxl_region *cxlr; + u8 min_scrub_cycle = 0; + int i, ret; + + if (!cxl_ps_ctx->cxlr) { + cxl_mbox = &cxl_ps_ctx->cxlmd->cxlds->cxl_mbox; + return cxl_mem_scrub_get_attrbs(cxl_mbox, cap, cycle, + flags, min_cycle); + } + + struct rw_semaphore *region_lock __free(rwsem_read_release) = + rwsem_read_intr_acquire(&cxl_region_rwsem); + if (!region_lock) + return -EINTR; + + cxlr = cxl_ps_ctx->cxlr; + p = &cxlr->params; + + for (i = 0; i < p->nr_targets; i++) { + struct cxl_endpoint_decoder *cxled = p->targets[i]; + + cxlmd = cxled_to_memdev(cxled); + cxl_mbox = &cxlmd->cxlds->cxl_mbox; + ret = cxl_mem_scrub_get_attrbs(cxl_mbox, cap, cycle, flags, + min_cycle); + if (ret) + return ret; + + /* + * The min_scrub_cycle of a region is the max of minimum scrub + * cycles supported by memdevs that back the region. + */ + if (min_cycle) + min_scrub_cycle = max(*min_cycle, min_scrub_cycle); + } + + if (min_cycle) + *min_cycle = min_scrub_cycle; + + return 0; +} + +static int cxl_scrub_set_attrbs_region(struct device *dev, + struct cxl_patrol_scrub_context *cxl_ps_ctx, + u8 cycle, u8 flags) +{ + struct cxl_scrub_wr_attrbs wr_attrbs; + struct cxl_mailbox *cxl_mbox; + struct cxl_region_params *p; + struct cxl_memdev *cxlmd; + struct cxl_region *cxlr; + int ret, i; + + struct rw_semaphore *region_lock __free(rwsem_read_release) = + rwsem_read_intr_acquire(&cxl_region_rwsem); + if (!region_lock) + return -EINTR; + + cxlr = cxl_ps_ctx->cxlr; + p = &cxlr->params; + wr_attrbs.scrub_cycle_hours = cycle; + wr_attrbs.scrub_flags = flags; + + for (i = 0; i < p->nr_targets; i++) { + struct cxl_endpoint_decoder *cxled = p->targets[i]; + + cxlmd = cxled_to_memdev(cxled); + cxl_mbox = &cxlmd->cxlds->cxl_mbox; + ret = cxl_set_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID, + cxl_ps_ctx->set_version, &wr_attrbs, + sizeof(wr_attrbs), + CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET, + 0, NULL); + if (ret) + return ret; + + if (cycle != cxlmd->scrub_cycle) { + if (cxlmd->scrub_region_id != CXL_SCRUB_NO_REGION) + dev_info(dev, + "Device scrub rate(%d hours) set by region%d rate overwritten by region%d scrub rate(%d hours)\n", + cxlmd->scrub_cycle, + cxlmd->scrub_region_id, cxlr->id, + cycle); + + cxlmd->scrub_cycle = cycle; + cxlmd->scrub_region_id = cxlr->id; + } + } + + return 0; +} + +static int cxl_scrub_set_attrbs_device(struct device *dev, + struct cxl_patrol_scrub_context *cxl_ps_ctx, + u8 cycle, u8 flags) +{ + struct cxl_scrub_wr_attrbs wr_attrbs; + struct cxl_mailbox *cxl_mbox; + struct cxl_memdev *cxlmd; + int ret; + + wr_attrbs.scrub_cycle_hours = cycle; + wr_attrbs.scrub_flags = flags; + + cxlmd = cxl_ps_ctx->cxlmd; + cxl_mbox = &cxlmd->cxlds->cxl_mbox; + ret = cxl_set_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID, + cxl_ps_ctx->set_version, &wr_attrbs, + sizeof(wr_attrbs), + CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET, 0, + NULL); + if (ret) + return ret; + + if (cycle != cxlmd->scrub_cycle) { + if (cxlmd->scrub_region_id != CXL_SCRUB_NO_REGION) + dev_info(dev, + "Device scrub rate(%d hours) set by region%d rate overwritten with device local scrub rate(%d hours)\n", + cxlmd->scrub_cycle, cxlmd->scrub_region_id, + cycle); + + cxlmd->scrub_cycle = cycle; + cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION; + } + + return 0; +} + +static int cxl_scrub_set_attrbs(struct device *dev, + struct cxl_patrol_scrub_context *cxl_ps_ctx, + u8 cycle, u8 flags) +{ + if (cxl_ps_ctx->cxlr) + return cxl_scrub_set_attrbs_region(dev, cxl_ps_ctx, cycle, flags); + + return cxl_scrub_set_attrbs_device(dev, cxl_ps_ctx, cycle, flags); +} + +static int cxl_patrol_scrub_get_enabled_bg(struct device *dev, void *drv_data, + bool *enabled) +{ + struct cxl_patrol_scrub_context *ctx = drv_data; + u8 cap, flags; + u16 cycle; + int ret; + + ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, NULL); + if (ret) + return ret; + + *enabled = CXL_GET_SCRUB_EN_STS(flags); + + return 0; +} + +static int cxl_patrol_scrub_set_enabled_bg(struct device *dev, void *drv_data, + bool enable) +{ + struct cxl_patrol_scrub_context *ctx = drv_data; + u8 cap, flags, wr_cycle; + u16 rd_cycle; + int ret; + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + ret = cxl_scrub_get_attrbs(ctx, &cap, &rd_cycle, &flags, NULL); + if (ret) + return ret; + + wr_cycle = CXL_GET_SCRUB_CYCLE(rd_cycle); + flags = CXL_SET_SCRUB_EN(enable); + + return cxl_scrub_set_attrbs(dev, ctx, wr_cycle, flags); +} + +static int cxl_patrol_scrub_get_min_scrub_cycle(struct device *dev, + void *drv_data, u32 *min) +{ + struct cxl_patrol_scrub_context *ctx = drv_data; + u8 cap, flags, min_cycle; + u16 cycle; + int ret; + + ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, &min_cycle); + if (ret) + return ret; + + *min = min_cycle * 3600; + + return 0; +} + +static int cxl_patrol_scrub_get_max_scrub_cycle(struct device *dev, + void *drv_data, u32 *max) +{ + *max = U8_MAX * 3600; /* Max set by register size */ + + return 0; +} + +static int cxl_patrol_scrub_get_scrub_cycle(struct device *dev, void *drv_data, + u32 *scrub_cycle_secs) +{ + struct cxl_patrol_scrub_context *ctx = drv_data; + u8 cap, flags; + u16 cycle; + int ret; + + ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, NULL); + if (ret) + return ret; + + *scrub_cycle_secs = CXL_GET_SCRUB_CYCLE(cycle) * 3600; + + return 0; +} + +static int cxl_patrol_scrub_set_scrub_cycle(struct device *dev, void *drv_data, + u32 scrub_cycle_secs) +{ + struct cxl_patrol_scrub_context *ctx = drv_data; + u8 scrub_cycle_hours = scrub_cycle_secs / 3600; + u8 cap, wr_cycle, flags, min_cycle; + u16 rd_cycle; + int ret; + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + ret = cxl_scrub_get_attrbs(ctx, &cap, &rd_cycle, &flags, &min_cycle); + if (ret) + return ret; + + if (!CXL_GET_SCRUB_CYCLE_CHANGEABLE(cap)) + return -EOPNOTSUPP; + + if (scrub_cycle_hours < min_cycle) { + dev_dbg(dev, "Invalid CXL patrol scrub cycle(%d) to set\n", + scrub_cycle_hours); + dev_dbg(dev, + "Minimum supported CXL patrol scrub cycle in hour %d\n", + min_cycle); + return -EINVAL; + } + wr_cycle = CXL_SET_SCRUB_CYCLE(scrub_cycle_hours); + + return cxl_scrub_set_attrbs(dev, ctx, wr_cycle, flags); +} + +static const struct edac_scrub_ops cxl_ps_scrub_ops = { + .get_enabled_bg = cxl_patrol_scrub_get_enabled_bg, + .set_enabled_bg = cxl_patrol_scrub_set_enabled_bg, + .get_min_cycle = cxl_patrol_scrub_get_min_scrub_cycle, + .get_max_cycle = cxl_patrol_scrub_get_max_scrub_cycle, + .get_cycle_duration = cxl_patrol_scrub_get_scrub_cycle, + .set_cycle_duration = cxl_patrol_scrub_set_scrub_cycle, +}; + +static int cxl_memdev_scrub_init(struct cxl_memdev *cxlmd, + struct edac_dev_feature *ras_feature, + u8 scrub_inst) +{ + struct cxl_patrol_scrub_context *cxl_ps_ctx; + struct cxl_feat_entry *feat_entry; + u8 cap, flags; + u16 cycle; + int rc; + + feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds), + &CXL_FEAT_PATROL_SCRUB_UUID); + if (IS_ERR(feat_entry)) + return -EOPNOTSUPP; + + if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE)) + return -EOPNOTSUPP; + + cxl_ps_ctx = devm_kzalloc(&cxlmd->dev, sizeof(*cxl_ps_ctx), GFP_KERNEL); + if (!cxl_ps_ctx) + return -ENOMEM; + + *cxl_ps_ctx = (struct cxl_patrol_scrub_context){ + .get_feat_size = le16_to_cpu(feat_entry->get_feat_size), + .set_feat_size = le16_to_cpu(feat_entry->set_feat_size), + .get_version = feat_entry->get_feat_ver, + .set_version = feat_entry->set_feat_ver, + .effects = le16_to_cpu(feat_entry->effects), + .instance = scrub_inst, + .cxlmd = cxlmd, + }; + + rc = cxl_mem_scrub_get_attrbs(&cxlmd->cxlds->cxl_mbox, &cap, &cycle, + &flags, NULL); + if (rc) + return rc; + + cxlmd->scrub_cycle = CXL_GET_SCRUB_CYCLE(cycle); + cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION; + + ras_feature->ft_type = RAS_FEAT_SCRUB; + ras_feature->instance = cxl_ps_ctx->instance; + ras_feature->scrub_ops = &cxl_ps_scrub_ops; + ras_feature->ctx = cxl_ps_ctx; + + return 0; +} + +static int cxl_region_scrub_init(struct cxl_region *cxlr, + struct edac_dev_feature *ras_feature, + u8 scrub_inst) +{ + struct cxl_patrol_scrub_context *cxl_ps_ctx; + struct cxl_region_params *p = &cxlr->params; + struct cxl_feat_entry *feat_entry = NULL; + struct cxl_memdev *cxlmd; + u8 cap, flags; + u16 cycle; + int i, rc; + + /* + * The cxl_region_rwsem must be held if the code below is used in a context + * other than when the region is in the probe state, as shown here. + */ + for (i = 0; i < p->nr_targets; i++) { + struct cxl_endpoint_decoder *cxled = p->targets[i]; + + cxlmd = cxled_to_memdev(cxled); + feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds), + &CXL_FEAT_PATROL_SCRUB_UUID); + if (IS_ERR(feat_entry)) + return -EOPNOTSUPP; + + if (!(le32_to_cpu(feat_entry->flags) & + CXL_FEATURE_F_CHANGEABLE)) + return -EOPNOTSUPP; + + rc = cxl_mem_scrub_get_attrbs(&cxlmd->cxlds->cxl_mbox, &cap, + &cycle, &flags, NULL); + if (rc) + return rc; + + cxlmd->scrub_cycle = CXL_GET_SCRUB_CYCLE(cycle); + cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION; + } + + cxl_ps_ctx = devm_kzalloc(&cxlr->dev, sizeof(*cxl_ps_ctx), GFP_KERNEL); + if (!cxl_ps_ctx) + return -ENOMEM; + + *cxl_ps_ctx = (struct cxl_patrol_scrub_context){ + .get_feat_size = le16_to_cpu(feat_entry->get_feat_size), + .set_feat_size = le16_to_cpu(feat_entry->set_feat_size), + .get_version = feat_entry->get_feat_ver, + .set_version = feat_entry->set_feat_ver, + .effects = le16_to_cpu(feat_entry->effects), + .instance = scrub_inst, + .cxlr = cxlr, + }; + + ras_feature->ft_type = RAS_FEAT_SCRUB; + ras_feature->instance = cxl_ps_ctx->instance; + ras_feature->scrub_ops = &cxl_ps_scrub_ops; + ras_feature->ctx = cxl_ps_ctx; + + return 0; +} + +struct cxl_ecs_context { + u16 num_media_frus; + u16 get_feat_size; + u16 set_feat_size; + u8 get_version; + u8 set_version; + u16 effects; + struct cxl_memdev *cxlmd; +}; + +/* + * See CXL spec rev 3.2 @8.2.10.9.11.2 Table 8-225 DDR5 ECS Control Feature + * Readable Attributes. + */ +struct cxl_ecs_fru_rd_attrbs { + u8 ecs_cap; + __le16 ecs_config; + u8 ecs_flags; +} __packed; + +struct cxl_ecs_rd_attrbs { + u8 ecs_log_cap; + struct cxl_ecs_fru_rd_attrbs fru_attrbs[]; +} __packed; + +/* + * See CXL spec rev 3.2 @8.2.10.9.11.2 Table 8-226 DDR5 ECS Control Feature + * Writable Attributes. + */ +struct cxl_ecs_fru_wr_attrbs { + __le16 ecs_config; +} __packed; + +struct cxl_ecs_wr_attrbs { + u8 ecs_log_cap; + struct cxl_ecs_fru_wr_attrbs fru_attrbs[]; +} __packed; + +#define CXL_ECS_LOG_ENTRY_TYPE_MASK GENMASK(1, 0) +#define CXL_ECS_REALTIME_REPORT_CAP_MASK BIT(0) +#define CXL_ECS_THRESHOLD_COUNT_MASK GENMASK(2, 0) +#define CXL_ECS_COUNT_MODE_MASK BIT(3) +#define CXL_ECS_RESET_COUNTER_MASK BIT(4) +#define CXL_ECS_RESET_COUNTER 1 + +enum { + ECS_THRESHOLD_256 = 256, + ECS_THRESHOLD_1024 = 1024, + ECS_THRESHOLD_4096 = 4096, +}; + +enum { + ECS_THRESHOLD_IDX_256 = 3, + ECS_THRESHOLD_IDX_1024 = 4, + ECS_THRESHOLD_IDX_4096 = 5, +}; + +static const u16 ecs_supp_threshold[] = { + [ECS_THRESHOLD_IDX_256] = 256, + [ECS_THRESHOLD_IDX_1024] = 1024, + [ECS_THRESHOLD_IDX_4096] = 4096, +}; + +enum { + ECS_LOG_ENTRY_TYPE_DRAM = 0x0, + ECS_LOG_ENTRY_TYPE_MEM_MEDIA_FRU = 0x1, +}; + +enum cxl_ecs_count_mode { + ECS_MODE_COUNTS_ROWS = 0, + ECS_MODE_COUNTS_CODEWORDS = 1, +}; + +static int cxl_mem_ecs_get_attrbs(struct device *dev, + struct cxl_ecs_context *cxl_ecs_ctx, + int fru_id, u8 *log_cap, u16 *config) +{ + struct cxl_memdev *cxlmd = cxl_ecs_ctx->cxlmd; + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; + struct cxl_ecs_fru_rd_attrbs *fru_rd_attrbs; + size_t rd_data_size; + size_t data_size; + + rd_data_size = cxl_ecs_ctx->get_feat_size; + + struct cxl_ecs_rd_attrbs *rd_attrbs __free(kvfree) = + kvzalloc(rd_data_size, GFP_KERNEL); + if (!rd_attrbs) + return -ENOMEM; + + data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_ECS_UUID, + CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs, + rd_data_size, 0, NULL); + if (!data_size) + return -EIO; + + fru_rd_attrbs = rd_attrbs->fru_attrbs; + *log_cap = rd_attrbs->ecs_log_cap; + *config = le16_to_cpu(fru_rd_attrbs[fru_id].ecs_config); + + return 0; +} + +static int cxl_mem_ecs_set_attrbs(struct device *dev, + struct cxl_ecs_context *cxl_ecs_ctx, + int fru_id, u8 log_cap, u16 config) +{ + struct cxl_memdev *cxlmd = cxl_ecs_ctx->cxlmd; + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; + struct cxl_ecs_fru_rd_attrbs *fru_rd_attrbs; + struct cxl_ecs_fru_wr_attrbs *fru_wr_attrbs; + size_t rd_data_size, wr_data_size; + u16 num_media_frus, count; + size_t data_size; + + num_media_frus = cxl_ecs_ctx->num_media_frus; + rd_data_size = cxl_ecs_ctx->get_feat_size; + wr_data_size = cxl_ecs_ctx->set_feat_size; + struct cxl_ecs_rd_attrbs *rd_attrbs __free(kvfree) = + kvzalloc(rd_data_size, GFP_KERNEL); + if (!rd_attrbs) + return -ENOMEM; + + data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_ECS_UUID, + CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs, + rd_data_size, 0, NULL); + if (!data_size) + return -EIO; + + struct cxl_ecs_wr_attrbs *wr_attrbs __free(kvfree) = + kvzalloc(wr_data_size, GFP_KERNEL); + if (!wr_attrbs) + return -ENOMEM; + + /* + * Fill writable attributes from the current attributes read + * for all the media FRUs. + */ + fru_rd_attrbs = rd_attrbs->fru_attrbs; + fru_wr_attrbs = wr_attrbs->fru_attrbs; + wr_attrbs->ecs_log_cap = log_cap; + for (count = 0; count < num_media_frus; count++) + fru_wr_attrbs[count].ecs_config = + fru_rd_attrbs[count].ecs_config; + + fru_wr_attrbs[fru_id].ecs_config = cpu_to_le16(config); + + return cxl_set_feature(cxl_mbox, &CXL_FEAT_ECS_UUID, + cxl_ecs_ctx->set_version, wr_attrbs, + wr_data_size, + CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET, + 0, NULL); +} + +static u8 cxl_get_ecs_log_entry_type(u8 log_cap, u16 config) +{ + return FIELD_GET(CXL_ECS_LOG_ENTRY_TYPE_MASK, log_cap); +} + +static u16 cxl_get_ecs_threshold(u8 log_cap, u16 config) +{ + u8 index = FIELD_GET(CXL_ECS_THRESHOLD_COUNT_MASK, config); + + return ecs_supp_threshold[index]; +} + +static u8 cxl_get_ecs_count_mode(u8 log_cap, u16 config) +{ + return FIELD_GET(CXL_ECS_COUNT_MODE_MASK, config); +} + +#define CXL_ECS_GET_ATTR(attrb) \ + static int cxl_ecs_get_##attrb(struct device *dev, void *drv_data, \ + int fru_id, u32 *val) \ + { \ + struct cxl_ecs_context *ctx = drv_data; \ + u8 log_cap; \ + u16 config; \ + int ret; \ + \ + ret = cxl_mem_ecs_get_attrbs(dev, ctx, fru_id, &log_cap, \ + &config); \ + if (ret) \ + return ret; \ + \ + *val = cxl_get_ecs_##attrb(log_cap, config); \ + \ + return 0; \ + } + +CXL_ECS_GET_ATTR(log_entry_type) +CXL_ECS_GET_ATTR(count_mode) +CXL_ECS_GET_ATTR(threshold) + +static int cxl_set_ecs_log_entry_type(struct device *dev, u8 *log_cap, + u16 *config, u32 val) +{ + if (val != ECS_LOG_ENTRY_TYPE_DRAM && + val != ECS_LOG_ENTRY_TYPE_MEM_MEDIA_FRU) + return -EINVAL; + + *log_cap = FIELD_PREP(CXL_ECS_LOG_ENTRY_TYPE_MASK, val); + + return 0; +} + +static int cxl_set_ecs_threshold(struct device *dev, u8 *log_cap, u16 *config, + u32 val) +{ + *config &= ~CXL_ECS_THRESHOLD_COUNT_MASK; + + switch (val) { + case ECS_THRESHOLD_256: + *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK, + ECS_THRESHOLD_IDX_256); + break; + case ECS_THRESHOLD_1024: + *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK, + ECS_THRESHOLD_IDX_1024); + break; + case ECS_THRESHOLD_4096: + *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK, + ECS_THRESHOLD_IDX_4096); + break; + default: + dev_dbg(dev, "Invalid CXL ECS threshold count(%d) to set\n", + val); + dev_dbg(dev, "Supported ECS threshold counts: %u, %u, %u\n", + ECS_THRESHOLD_256, ECS_THRESHOLD_1024, + ECS_THRESHOLD_4096); + return -EINVAL; + } + + return 0; +} + +static int cxl_set_ecs_count_mode(struct device *dev, u8 *log_cap, u16 *config, + u32 val) +{ + if (val != ECS_MODE_COUNTS_ROWS && val != ECS_MODE_COUNTS_CODEWORDS) { + dev_dbg(dev, "Invalid CXL ECS scrub mode(%d) to set\n", val); + dev_dbg(dev, + "Supported ECS Modes: 0: ECS counts rows with errors," + " 1: ECS counts codewords with errors\n"); + return -EINVAL; + } + + *config &= ~CXL_ECS_COUNT_MODE_MASK; + *config |= FIELD_PREP(CXL_ECS_COUNT_MODE_MASK, val); + + return 0; +} + +static int cxl_set_ecs_reset_counter(struct device *dev, u8 *log_cap, + u16 *config, u32 val) +{ + if (val != CXL_ECS_RESET_COUNTER) + return -EINVAL; + + *config &= ~CXL_ECS_RESET_COUNTER_MASK; + *config |= FIELD_PREP(CXL_ECS_RESET_COUNTER_MASK, val); + + return 0; +} + +#define CXL_ECS_SET_ATTR(attrb) \ + static int cxl_ecs_set_##attrb(struct device *dev, void *drv_data, \ + int fru_id, u32 val) \ + { \ + struct cxl_ecs_context *ctx = drv_data; \ + u8 log_cap; \ + u16 config; \ + int ret; \ + \ + if (!capable(CAP_SYS_RAWIO)) \ + return -EPERM; \ + \ + ret = cxl_mem_ecs_get_attrbs(dev, ctx, fru_id, &log_cap, \ + &config); \ + if (ret) \ + return ret; \ + \ + ret = cxl_set_ecs_##attrb(dev, &log_cap, &config, val); \ + if (ret) \ + return ret; \ + \ + return cxl_mem_ecs_set_attrbs(dev, ctx, fru_id, log_cap, \ + config); \ + } +CXL_ECS_SET_ATTR(log_entry_type) +CXL_ECS_SET_ATTR(count_mode) +CXL_ECS_SET_ATTR(reset_counter) +CXL_ECS_SET_ATTR(threshold) + +static const struct edac_ecs_ops cxl_ecs_ops = { + .get_log_entry_type = cxl_ecs_get_log_entry_type, + .set_log_entry_type = cxl_ecs_set_log_entry_type, + .get_mode = cxl_ecs_get_count_mode, + .set_mode = cxl_ecs_set_count_mode, + .reset = cxl_ecs_set_reset_counter, + .get_threshold = cxl_ecs_get_threshold, + .set_threshold = cxl_ecs_set_threshold, +}; + +static int cxl_memdev_ecs_init(struct cxl_memdev *cxlmd, + struct edac_dev_feature *ras_feature) +{ + struct cxl_ecs_context *cxl_ecs_ctx; + struct cxl_feat_entry *feat_entry; + int num_media_frus; + + feat_entry = + cxl_feature_info(to_cxlfs(cxlmd->cxlds), &CXL_FEAT_ECS_UUID); + if (IS_ERR(feat_entry)) + return -EOPNOTSUPP; + + if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE)) + return -EOPNOTSUPP; + + num_media_frus = (le16_to_cpu(feat_entry->get_feat_size) - + sizeof(struct cxl_ecs_rd_attrbs)) / + sizeof(struct cxl_ecs_fru_rd_attrbs); + if (!num_media_frus) + return -EOPNOTSUPP; + + cxl_ecs_ctx = + devm_kzalloc(&cxlmd->dev, sizeof(*cxl_ecs_ctx), GFP_KERNEL); + if (!cxl_ecs_ctx) + return -ENOMEM; + + *cxl_ecs_ctx = (struct cxl_ecs_context){ + .get_feat_size = le16_to_cpu(feat_entry->get_feat_size), + .set_feat_size = le16_to_cpu(feat_entry->set_feat_size), + .get_version = feat_entry->get_feat_ver, + .set_version = feat_entry->set_feat_ver, + .effects = le16_to_cpu(feat_entry->effects), + .num_media_frus = num_media_frus, + .cxlmd = cxlmd, + }; + + ras_feature->ft_type = RAS_FEAT_ECS; + ras_feature->ecs_ops = &cxl_ecs_ops; + ras_feature->ctx = cxl_ecs_ctx; + ras_feature->ecs_info.num_media_frus = num_media_frus; + + return 0; +} + +/* + * Perform Maintenance CXL 3.2 Spec 8.2.10.7.1 + */ + +/* + * Perform Maintenance input payload + * CXL rev 3.2 section 8.2.10.7.1 Table 8-117 + */ +struct cxl_mbox_maintenance_hdr { + u8 op_class; + u8 op_subclass; +} __packed; + +static int cxl_perform_maintenance(struct cxl_mailbox *cxl_mbox, u8 class, + u8 subclass, void *data_in, + size_t data_in_size) +{ + struct cxl_memdev_maintenance_pi { + struct cxl_mbox_maintenance_hdr hdr; + u8 data[]; + } __packed; + struct cxl_mbox_cmd mbox_cmd; + size_t hdr_size; + + struct cxl_memdev_maintenance_pi *pi __free(kvfree) = + kvzalloc(cxl_mbox->payload_size, GFP_KERNEL); + if (!pi) + return -ENOMEM; + + pi->hdr.op_class = class; + pi->hdr.op_subclass = subclass; + hdr_size = sizeof(pi->hdr); + /* + * Check minimum mbox payload size is available for + * the maintenance data transfer. + */ + if (hdr_size + data_in_size > cxl_mbox->payload_size) + return -ENOMEM; + + memcpy(pi->data, data_in, data_in_size); + mbox_cmd = (struct cxl_mbox_cmd){ + .opcode = CXL_MBOX_OP_DO_MAINTENANCE, + .size_in = hdr_size + data_in_size, + .payload_in = pi, + }; + + return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); +} + +/* + * Support for finding a memory operation attributes + * are from the current boot or not. + */ + +struct cxl_mem_err_rec { + struct xarray rec_gen_media; + struct xarray rec_dram; +}; + +enum cxl_mem_repair_type { + CXL_PPR, + CXL_CACHELINE_SPARING, + CXL_ROW_SPARING, + CXL_BANK_SPARING, + CXL_RANK_SPARING, + CXL_REPAIR_MAX, +}; + +/** + * struct cxl_mem_repair_attrbs - CXL memory repair attributes + * @dpa: DPA of memory to repair + * @nibble_mask: nibble mask, identifies one or more nibbles on the memory bus + * @row: row of memory to repair + * @column: column of memory to repair + * @channel: channel of memory to repair + * @sub_channel: sub channel of memory to repair + * @rank: rank of memory to repair + * @bank_group: bank group of memory to repair + * @bank: bank of memory to repair + * @repair_type: repair type. For eg. PPR, memory sparing etc. + */ +struct cxl_mem_repair_attrbs { + u64 dpa; + u32 nibble_mask; + u32 row; + u16 column; + u8 channel; + u8 sub_channel; + u8 rank; + u8 bank_group; + u8 bank; + enum cxl_mem_repair_type repair_type; +}; + +static struct cxl_event_gen_media * +cxl_find_rec_gen_media(struct cxl_memdev *cxlmd, + struct cxl_mem_repair_attrbs *attrbs) +{ + struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array; + struct cxl_event_gen_media *rec; + + if (!array_rec) + return NULL; + + rec = xa_load(&array_rec->rec_gen_media, attrbs->dpa); + if (!rec) + return NULL; + + if (attrbs->repair_type == CXL_PPR) + return rec; + + return NULL; +} + +static struct cxl_event_dram * +cxl_find_rec_dram(struct cxl_memdev *cxlmd, + struct cxl_mem_repair_attrbs *attrbs) +{ + struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array; + struct cxl_event_dram *rec; + u16 validity_flags; + + if (!array_rec) + return NULL; + + rec = xa_load(&array_rec->rec_dram, attrbs->dpa); + if (!rec) + return NULL; + + validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags); + if (!(validity_flags & CXL_DER_VALID_CHANNEL) || + !(validity_flags & CXL_DER_VALID_RANK)) + return NULL; + + switch (attrbs->repair_type) { + case CXL_PPR: + if (!(validity_flags & CXL_DER_VALID_NIBBLE) || + get_unaligned_le24(rec->nibble_mask) == attrbs->nibble_mask) + return rec; + break; + case CXL_CACHELINE_SPARING: + if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) || + !(validity_flags & CXL_DER_VALID_BANK) || + !(validity_flags & CXL_DER_VALID_ROW) || + !(validity_flags & CXL_DER_VALID_COLUMN)) + return NULL; + + if (rec->media_hdr.channel == attrbs->channel && + rec->media_hdr.rank == attrbs->rank && + rec->bank_group == attrbs->bank_group && + rec->bank == attrbs->bank && + get_unaligned_le24(rec->row) == attrbs->row && + get_unaligned_le16(rec->column) == attrbs->column && + (!(validity_flags & CXL_DER_VALID_NIBBLE) || + get_unaligned_le24(rec->nibble_mask) == + attrbs->nibble_mask) && + (!(validity_flags & CXL_DER_VALID_SUB_CHANNEL) || + rec->sub_channel == attrbs->sub_channel)) + return rec; + break; + case CXL_ROW_SPARING: + if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) || + !(validity_flags & CXL_DER_VALID_BANK) || + !(validity_flags & CXL_DER_VALID_ROW)) + return NULL; + + if (rec->media_hdr.channel == attrbs->channel && + rec->media_hdr.rank == attrbs->rank && + rec->bank_group == attrbs->bank_group && + rec->bank == attrbs->bank && + get_unaligned_le24(rec->row) == attrbs->row && + (!(validity_flags & CXL_DER_VALID_NIBBLE) || + get_unaligned_le24(rec->nibble_mask) == + attrbs->nibble_mask)) + return rec; + break; + case CXL_BANK_SPARING: + if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) || + !(validity_flags & CXL_DER_VALID_BANK)) + return NULL; + + if (rec->media_hdr.channel == attrbs->channel && + rec->media_hdr.rank == attrbs->rank && + rec->bank_group == attrbs->bank_group && + rec->bank == attrbs->bank && + (!(validity_flags & CXL_DER_VALID_NIBBLE) || + get_unaligned_le24(rec->nibble_mask) == + attrbs->nibble_mask)) + return rec; + break; + case CXL_RANK_SPARING: + if (rec->media_hdr.channel == attrbs->channel && + rec->media_hdr.rank == attrbs->rank && + (!(validity_flags & CXL_DER_VALID_NIBBLE) || + get_unaligned_le24(rec->nibble_mask) == + attrbs->nibble_mask)) + return rec; + break; + default: + return NULL; + } + + return NULL; +} + +#define CXL_MAX_STORAGE_DAYS 10 +#define CXL_MAX_STORAGE_TIME_SECS (CXL_MAX_STORAGE_DAYS * 24 * 60 * 60) + +static void cxl_del_expired_gmedia_recs(struct xarray *rec_xarray, + struct cxl_event_gen_media *cur_rec) +{ + u64 cur_ts = le64_to_cpu(cur_rec->media_hdr.hdr.timestamp); + struct cxl_event_gen_media *rec; + unsigned long index; + u64 delta_ts_secs; + + xa_for_each(rec_xarray, index, rec) { + delta_ts_secs = (cur_ts - + le64_to_cpu(rec->media_hdr.hdr.timestamp)) / 1000000000ULL; + if (delta_ts_secs >= CXL_MAX_STORAGE_TIME_SECS) { + xa_erase(rec_xarray, index); + kfree(rec); + } + } +} + +static void cxl_del_expired_dram_recs(struct xarray *rec_xarray, + struct cxl_event_dram *cur_rec) +{ + u64 cur_ts = le64_to_cpu(cur_rec->media_hdr.hdr.timestamp); + struct cxl_event_dram *rec; + unsigned long index; + u64 delta_secs; + + xa_for_each(rec_xarray, index, rec) { + delta_secs = (cur_ts - + le64_to_cpu(rec->media_hdr.hdr.timestamp)) / 1000000000ULL; + if (delta_secs >= CXL_MAX_STORAGE_TIME_SECS) { + xa_erase(rec_xarray, index); + kfree(rec); + } + } +} + +#define CXL_MAX_REC_STORAGE_COUNT 200 + +static void cxl_del_overflow_old_recs(struct xarray *rec_xarray) +{ + void *err_rec; + unsigned long index, count = 0; + + xa_for_each(rec_xarray, index, err_rec) + count++; + + if (count <= CXL_MAX_REC_STORAGE_COUNT) + return; + + count -= CXL_MAX_REC_STORAGE_COUNT; + xa_for_each(rec_xarray, index, err_rec) { + xa_erase(rec_xarray, index); + kfree(err_rec); + count--; + if (!count) + break; + } +} + +int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt) +{ + struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array; + struct cxl_event_gen_media *rec; + void *old_rec; + + if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec) + return 0; + + rec = kmemdup(&evt->gen_media, sizeof(*rec), GFP_KERNEL); + if (!rec) + return -ENOMEM; + + old_rec = xa_store(&array_rec->rec_gen_media, + le64_to_cpu(rec->media_hdr.phys_addr), rec, + GFP_KERNEL); + if (xa_is_err(old_rec)) { + kfree(rec); + return xa_err(old_rec); + } + + kfree(old_rec); + + cxl_del_expired_gmedia_recs(&array_rec->rec_gen_media, rec); + cxl_del_overflow_old_recs(&array_rec->rec_gen_media); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_store_rec_gen_media, "CXL"); + +int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt) +{ + struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array; + struct cxl_event_dram *rec; + void *old_rec; + + if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec) + return 0; + + rec = kmemdup(&evt->dram, sizeof(*rec), GFP_KERNEL); + if (!rec) + return -ENOMEM; + + old_rec = xa_store(&array_rec->rec_dram, + le64_to_cpu(rec->media_hdr.phys_addr), rec, + GFP_KERNEL); + if (xa_is_err(old_rec)) { + kfree(rec); + return xa_err(old_rec); + } + + kfree(old_rec); + + cxl_del_expired_dram_recs(&array_rec->rec_dram, rec); + cxl_del_overflow_old_recs(&array_rec->rec_dram); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_store_rec_dram, "CXL"); + +static bool cxl_is_memdev_memory_online(const struct cxl_memdev *cxlmd) +{ + struct cxl_port *port = cxlmd->endpoint; + + if (port && cxl_num_decoders_committed(port)) + return true; + + return false; +} + +/* + * CXL memory sparing control + */ +enum cxl_mem_sparing_granularity { + CXL_MEM_SPARING_CACHELINE, + CXL_MEM_SPARING_ROW, + CXL_MEM_SPARING_BANK, + CXL_MEM_SPARING_RANK, + CXL_MEM_SPARING_MAX +}; + +struct cxl_mem_sparing_context { + struct cxl_memdev *cxlmd; + uuid_t repair_uuid; + u16 get_feat_size; + u16 set_feat_size; + u16 effects; + u8 instance; + u8 get_version; + u8 set_version; + u8 op_class; + u8 op_subclass; + bool cap_safe_when_in_use; + bool cap_hard_sparing; + bool cap_soft_sparing; + u8 channel; + u8 rank; + u8 bank_group; + u32 nibble_mask; + u64 dpa; + u32 row; + u16 column; + u8 bank; + u8 sub_channel; + enum edac_mem_repair_type repair_type; + bool persist_mode; +}; + +#define CXL_SPARING_RD_CAP_SAFE_IN_USE_MASK BIT(0) +#define CXL_SPARING_RD_CAP_HARD_SPARING_MASK BIT(1) +#define CXL_SPARING_RD_CAP_SOFT_SPARING_MASK BIT(2) + +#define CXL_SPARING_WR_DEVICE_INITIATED_MASK BIT(0) + +#define CXL_SPARING_QUERY_RESOURCE_FLAG BIT(0) +#define CXL_SET_HARD_SPARING_FLAG BIT(1) +#define CXL_SPARING_SUB_CHNL_VALID_FLAG BIT(2) +#define CXL_SPARING_NIB_MASK_VALID_FLAG BIT(3) + +#define CXL_GET_SPARING_SAFE_IN_USE(flags) \ + (FIELD_GET(CXL_SPARING_RD_CAP_SAFE_IN_USE_MASK, \ + flags) ^ 1) +#define CXL_GET_CAP_HARD_SPARING(flags) \ + FIELD_GET(CXL_SPARING_RD_CAP_HARD_SPARING_MASK, \ + flags) +#define CXL_GET_CAP_SOFT_SPARING(flags) \ + FIELD_GET(CXL_SPARING_RD_CAP_SOFT_SPARING_MASK, \ + flags) + +#define CXL_SET_SPARING_QUERY_RESOURCE(val) \ + FIELD_PREP(CXL_SPARING_QUERY_RESOURCE_FLAG, val) +#define CXL_SET_HARD_SPARING(val) \ + FIELD_PREP(CXL_SET_HARD_SPARING_FLAG, val) +#define CXL_SET_SPARING_SUB_CHNL_VALID(val) \ + FIELD_PREP(CXL_SPARING_SUB_CHNL_VALID_FLAG, val) +#define CXL_SET_SPARING_NIB_MASK_VALID(val) \ + FIELD_PREP(CXL_SPARING_NIB_MASK_VALID_FLAG, val) + +/* + * See CXL spec rev 3.2 @8.2.10.7.2.3 Table 8-134 Memory Sparing Feature + * Readable Attributes. + */ +struct cxl_memdev_repair_rd_attrbs_hdr { + u8 max_op_latency; + __le16 op_cap; + __le16 op_mode; + u8 op_class; + u8 op_subclass; + u8 rsvd[9]; +} __packed; + +struct cxl_memdev_sparing_rd_attrbs { + struct cxl_memdev_repair_rd_attrbs_hdr hdr; + u8 rsvd; + __le16 restriction_flags; +} __packed; + +/* + * See CXL spec rev 3.2 @8.2.10.7.1.4 Table 8-120 Memory Sparing Input Payload. + */ +struct cxl_memdev_sparing_in_payload { + u8 flags; + u8 channel; + u8 rank; + u8 nibble_mask[3]; + u8 bank_group; + u8 bank; + u8 row[3]; + __le16 column; + u8 sub_channel; +} __packed; + +static int +cxl_mem_sparing_get_attrbs(struct cxl_mem_sparing_context *cxl_sparing_ctx) +{ + size_t rd_data_size = sizeof(struct cxl_memdev_sparing_rd_attrbs); + struct cxl_memdev *cxlmd = cxl_sparing_ctx->cxlmd; + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; + u16 restriction_flags; + size_t data_size; + u16 return_code; + struct cxl_memdev_sparing_rd_attrbs *rd_attrbs __free(kfree) = + kzalloc(rd_data_size, GFP_KERNEL); + if (!rd_attrbs) + return -ENOMEM; + + data_size = cxl_get_feature(cxl_mbox, &cxl_sparing_ctx->repair_uuid, + CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs, + rd_data_size, 0, &return_code); + if (!data_size) + return -EIO; + + cxl_sparing_ctx->op_class = rd_attrbs->hdr.op_class; + cxl_sparing_ctx->op_subclass = rd_attrbs->hdr.op_subclass; + restriction_flags = le16_to_cpu(rd_attrbs->restriction_flags); + cxl_sparing_ctx->cap_safe_when_in_use = + CXL_GET_SPARING_SAFE_IN_USE(restriction_flags); + cxl_sparing_ctx->cap_hard_sparing = + CXL_GET_CAP_HARD_SPARING(restriction_flags); + cxl_sparing_ctx->cap_soft_sparing = + CXL_GET_CAP_SOFT_SPARING(restriction_flags); + + return 0; +} + +static struct cxl_event_dram * +cxl_mem_get_rec_dram(struct cxl_memdev *cxlmd, + struct cxl_mem_sparing_context *ctx) +{ + struct cxl_mem_repair_attrbs attrbs = { 0 }; + + attrbs.dpa = ctx->dpa; + attrbs.channel = ctx->channel; + attrbs.rank = ctx->rank; + attrbs.nibble_mask = ctx->nibble_mask; + switch (ctx->repair_type) { + case EDAC_REPAIR_CACHELINE_SPARING: + attrbs.repair_type = CXL_CACHELINE_SPARING; + attrbs.bank_group = ctx->bank_group; + attrbs.bank = ctx->bank; + attrbs.row = ctx->row; + attrbs.column = ctx->column; + attrbs.sub_channel = ctx->sub_channel; + break; + case EDAC_REPAIR_ROW_SPARING: + attrbs.repair_type = CXL_ROW_SPARING; + attrbs.bank_group = ctx->bank_group; + attrbs.bank = ctx->bank; + attrbs.row = ctx->row; + break; + case EDAC_REPAIR_BANK_SPARING: + attrbs.repair_type = CXL_BANK_SPARING; + attrbs.bank_group = ctx->bank_group; + attrbs.bank = ctx->bank; + break; + case EDAC_REPAIR_RANK_SPARING: + attrbs.repair_type = CXL_RANK_SPARING; + break; + default: + return NULL; + } + + return cxl_find_rec_dram(cxlmd, &attrbs); +} + +static int +cxl_mem_perform_sparing(struct device *dev, + struct cxl_mem_sparing_context *cxl_sparing_ctx) +{ + struct cxl_memdev *cxlmd = cxl_sparing_ctx->cxlmd; + struct cxl_memdev_sparing_in_payload sparing_pi; + struct cxl_event_dram *rec = NULL; + u16 validity_flags = 0; + + struct rw_semaphore *region_lock __free(rwsem_read_release) = + rwsem_read_intr_acquire(&cxl_region_rwsem); + if (!region_lock) + return -EINTR; + + struct rw_semaphore *dpa_lock __free(rwsem_read_release) = + rwsem_read_intr_acquire(&cxl_dpa_rwsem); + if (!dpa_lock) + return -EINTR; + + if (!cxl_sparing_ctx->cap_safe_when_in_use) { + /* Memory to repair must be offline */ + if (cxl_is_memdev_memory_online(cxlmd)) + return -EBUSY; + } else { + if (cxl_is_memdev_memory_online(cxlmd)) { + rec = cxl_mem_get_rec_dram(cxlmd, cxl_sparing_ctx); + if (!rec) + return -EINVAL; + + if (!get_unaligned_le16(rec->media_hdr.validity_flags)) + return -EINVAL; + } + } + + memset(&sparing_pi, 0, sizeof(sparing_pi)); + sparing_pi.flags = CXL_SET_SPARING_QUERY_RESOURCE(0); + if (cxl_sparing_ctx->persist_mode) + sparing_pi.flags |= CXL_SET_HARD_SPARING(1); + + if (rec) + validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags); + + switch (cxl_sparing_ctx->repair_type) { + case EDAC_REPAIR_CACHELINE_SPARING: + sparing_pi.column = cpu_to_le16(cxl_sparing_ctx->column); + if (!rec || (validity_flags & CXL_DER_VALID_SUB_CHANNEL)) { + sparing_pi.flags |= CXL_SET_SPARING_SUB_CHNL_VALID(1); + sparing_pi.sub_channel = cxl_sparing_ctx->sub_channel; + } + fallthrough; + case EDAC_REPAIR_ROW_SPARING: + put_unaligned_le24(cxl_sparing_ctx->row, sparing_pi.row); + fallthrough; + case EDAC_REPAIR_BANK_SPARING: + sparing_pi.bank_group = cxl_sparing_ctx->bank_group; + sparing_pi.bank = cxl_sparing_ctx->bank; + fallthrough; + case EDAC_REPAIR_RANK_SPARING: + sparing_pi.rank = cxl_sparing_ctx->rank; + fallthrough; + default: + sparing_pi.channel = cxl_sparing_ctx->channel; + if ((rec && (validity_flags & CXL_DER_VALID_NIBBLE)) || + (!rec && (!cxl_sparing_ctx->nibble_mask || + (cxl_sparing_ctx->nibble_mask & 0xFFFFFF)))) { + sparing_pi.flags |= CXL_SET_SPARING_NIB_MASK_VALID(1); + put_unaligned_le24(cxl_sparing_ctx->nibble_mask, + sparing_pi.nibble_mask); + } + break; + } + + return cxl_perform_maintenance(&cxlmd->cxlds->cxl_mbox, + cxl_sparing_ctx->op_class, + cxl_sparing_ctx->op_subclass, + &sparing_pi, sizeof(sparing_pi)); +} + +static int cxl_mem_sparing_get_repair_type(struct device *dev, void *drv_data, + const char **repair_type) +{ + struct cxl_mem_sparing_context *ctx = drv_data; + + switch (ctx->repair_type) { + case EDAC_REPAIR_CACHELINE_SPARING: + case EDAC_REPAIR_ROW_SPARING: + case EDAC_REPAIR_BANK_SPARING: + case EDAC_REPAIR_RANK_SPARING: + *repair_type = edac_repair_type[ctx->repair_type]; + break; + default: + return -EINVAL; + } + + return 0; +} + +#define CXL_SPARING_GET_ATTR(attrb, data_type) \ + static int cxl_mem_sparing_get_##attrb( \ + struct device *dev, void *drv_data, data_type *val) \ + { \ + struct cxl_mem_sparing_context *ctx = drv_data; \ + \ + *val = ctx->attrb; \ + \ + return 0; \ + } +CXL_SPARING_GET_ATTR(persist_mode, bool) +CXL_SPARING_GET_ATTR(dpa, u64) +CXL_SPARING_GET_ATTR(nibble_mask, u32) +CXL_SPARING_GET_ATTR(bank_group, u32) +CXL_SPARING_GET_ATTR(bank, u32) +CXL_SPARING_GET_ATTR(rank, u32) +CXL_SPARING_GET_ATTR(row, u32) +CXL_SPARING_GET_ATTR(column, u32) +CXL_SPARING_GET_ATTR(channel, u32) +CXL_SPARING_GET_ATTR(sub_channel, u32) + +#define CXL_SPARING_SET_ATTR(attrb, data_type) \ + static int cxl_mem_sparing_set_##attrb(struct device *dev, \ + void *drv_data, data_type val) \ + { \ + struct cxl_mem_sparing_context *ctx = drv_data; \ + \ + ctx->attrb = val; \ + \ + return 0; \ + } +CXL_SPARING_SET_ATTR(nibble_mask, u32) +CXL_SPARING_SET_ATTR(bank_group, u32) +CXL_SPARING_SET_ATTR(bank, u32) +CXL_SPARING_SET_ATTR(rank, u32) +CXL_SPARING_SET_ATTR(row, u32) +CXL_SPARING_SET_ATTR(column, u32) +CXL_SPARING_SET_ATTR(channel, u32) +CXL_SPARING_SET_ATTR(sub_channel, u32) + +static int cxl_mem_sparing_set_persist_mode(struct device *dev, void *drv_data, + bool persist_mode) +{ + struct cxl_mem_sparing_context *ctx = drv_data; + + if ((persist_mode && ctx->cap_hard_sparing) || + (!persist_mode && ctx->cap_soft_sparing)) + ctx->persist_mode = persist_mode; + else + return -EOPNOTSUPP; + + return 0; +} + +static int cxl_get_mem_sparing_safe_when_in_use(struct device *dev, + void *drv_data, bool *safe) +{ + struct cxl_mem_sparing_context *ctx = drv_data; + + *safe = ctx->cap_safe_when_in_use; + + return 0; +} + +static int cxl_mem_sparing_get_min_dpa(struct device *dev, void *drv_data, + u64 *min_dpa) +{ + struct cxl_mem_sparing_context *ctx = drv_data; + struct cxl_memdev *cxlmd = ctx->cxlmd; + struct cxl_dev_state *cxlds = cxlmd->cxlds; + + *min_dpa = cxlds->dpa_res.start; + + return 0; +} + +static int cxl_mem_sparing_get_max_dpa(struct device *dev, void *drv_data, + u64 *max_dpa) +{ + struct cxl_mem_sparing_context *ctx = drv_data; + struct cxl_memdev *cxlmd = ctx->cxlmd; + struct cxl_dev_state *cxlds = cxlmd->cxlds; + + *max_dpa = cxlds->dpa_res.end; + + return 0; +} + +static int cxl_mem_sparing_set_dpa(struct device *dev, void *drv_data, u64 dpa) +{ + struct cxl_mem_sparing_context *ctx = drv_data; + struct cxl_memdev *cxlmd = ctx->cxlmd; + struct cxl_dev_state *cxlds = cxlmd->cxlds; + + if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) + return -EINVAL; + + ctx->dpa = dpa; + + return 0; +} + +static int cxl_do_mem_sparing(struct device *dev, void *drv_data, u32 val) +{ + struct cxl_mem_sparing_context *ctx = drv_data; + + if (val != EDAC_DO_MEM_REPAIR) + return -EINVAL; + + return cxl_mem_perform_sparing(dev, ctx); +} + +#define RANK_OPS \ + .get_repair_type = cxl_mem_sparing_get_repair_type, \ + .get_persist_mode = cxl_mem_sparing_get_persist_mode, \ + .set_persist_mode = cxl_mem_sparing_set_persist_mode, \ + .get_repair_safe_when_in_use = cxl_get_mem_sparing_safe_when_in_use, \ + .get_min_dpa = cxl_mem_sparing_get_min_dpa, \ + .get_max_dpa = cxl_mem_sparing_get_max_dpa, \ + .get_dpa = cxl_mem_sparing_get_dpa, \ + .set_dpa = cxl_mem_sparing_set_dpa, \ + .get_nibble_mask = cxl_mem_sparing_get_nibble_mask, \ + .set_nibble_mask = cxl_mem_sparing_set_nibble_mask, \ + .get_rank = cxl_mem_sparing_get_rank, \ + .set_rank = cxl_mem_sparing_set_rank, \ + .get_channel = cxl_mem_sparing_get_channel, \ + .set_channel = cxl_mem_sparing_set_channel, \ + .do_repair = cxl_do_mem_sparing + +#define BANK_OPS \ + RANK_OPS, .get_bank_group = cxl_mem_sparing_get_bank_group, \ + .set_bank_group = cxl_mem_sparing_set_bank_group, \ + .get_bank = cxl_mem_sparing_get_bank, \ + .set_bank = cxl_mem_sparing_set_bank + +#define ROW_OPS \ + BANK_OPS, .get_row = cxl_mem_sparing_get_row, \ + .set_row = cxl_mem_sparing_set_row + +#define CACHELINE_OPS \ + ROW_OPS, .get_column = cxl_mem_sparing_get_column, \ + .set_column = cxl_mem_sparing_set_column, \ + .get_sub_channel = cxl_mem_sparing_get_sub_channel, \ + .set_sub_channel = cxl_mem_sparing_set_sub_channel + +static const struct edac_mem_repair_ops cxl_rank_sparing_ops = { + RANK_OPS, +}; + +static const struct edac_mem_repair_ops cxl_bank_sparing_ops = { + BANK_OPS, +}; + +static const struct edac_mem_repair_ops cxl_row_sparing_ops = { + ROW_OPS, +}; + +static const struct edac_mem_repair_ops cxl_cacheline_sparing_ops = { + CACHELINE_OPS, +}; + +struct cxl_mem_sparing_desc { + const uuid_t repair_uuid; + enum edac_mem_repair_type repair_type; + const struct edac_mem_repair_ops *repair_ops; +}; + +static const struct cxl_mem_sparing_desc mem_sparing_desc[] = { + { + .repair_uuid = CXL_FEAT_CACHELINE_SPARING_UUID, + .repair_type = EDAC_REPAIR_CACHELINE_SPARING, + .repair_ops = &cxl_cacheline_sparing_ops, + }, + { + .repair_uuid = CXL_FEAT_ROW_SPARING_UUID, + .repair_type = EDAC_REPAIR_ROW_SPARING, + .repair_ops = &cxl_row_sparing_ops, + }, + { + .repair_uuid = CXL_FEAT_BANK_SPARING_UUID, + .repair_type = EDAC_REPAIR_BANK_SPARING, + .repair_ops = &cxl_bank_sparing_ops, + }, + { + .repair_uuid = CXL_FEAT_RANK_SPARING_UUID, + .repair_type = EDAC_REPAIR_RANK_SPARING, + .repair_ops = &cxl_rank_sparing_ops, + }, +}; + +static int cxl_memdev_sparing_init(struct cxl_memdev *cxlmd, + struct edac_dev_feature *ras_feature, + const struct cxl_mem_sparing_desc *desc, + u8 repair_inst) +{ + struct cxl_mem_sparing_context *cxl_sparing_ctx; + struct cxl_feat_entry *feat_entry; + int ret; + + feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds), + &desc->repair_uuid); + if (IS_ERR(feat_entry)) + return -EOPNOTSUPP; + + if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE)) + return -EOPNOTSUPP; + + cxl_sparing_ctx = devm_kzalloc(&cxlmd->dev, sizeof(*cxl_sparing_ctx), + GFP_KERNEL); + if (!cxl_sparing_ctx) + return -ENOMEM; + + *cxl_sparing_ctx = (struct cxl_mem_sparing_context){ + .get_feat_size = le16_to_cpu(feat_entry->get_feat_size), + .set_feat_size = le16_to_cpu(feat_entry->set_feat_size), + .get_version = feat_entry->get_feat_ver, + .set_version = feat_entry->set_feat_ver, + .effects = le16_to_cpu(feat_entry->effects), + .cxlmd = cxlmd, + .repair_type = desc->repair_type, + .instance = repair_inst++, + }; + uuid_copy(&cxl_sparing_ctx->repair_uuid, &desc->repair_uuid); + + ret = cxl_mem_sparing_get_attrbs(cxl_sparing_ctx); + if (ret) + return ret; + + if ((cxl_sparing_ctx->cap_soft_sparing && + cxl_sparing_ctx->cap_hard_sparing) || + cxl_sparing_ctx->cap_soft_sparing) + cxl_sparing_ctx->persist_mode = 0; + else if (cxl_sparing_ctx->cap_hard_sparing) + cxl_sparing_ctx->persist_mode = 1; + else + return -EOPNOTSUPP; + + ras_feature->ft_type = RAS_FEAT_MEM_REPAIR; + ras_feature->instance = cxl_sparing_ctx->instance; + ras_feature->mem_repair_ops = desc->repair_ops; + ras_feature->ctx = cxl_sparing_ctx; + + return 0; +} + +/* + * CXL memory soft PPR & hard PPR control + */ +struct cxl_ppr_context { + uuid_t repair_uuid; + u8 instance; + u16 get_feat_size; + u16 set_feat_size; + u8 get_version; + u8 set_version; + u16 effects; + u8 op_class; + u8 op_subclass; + bool cap_dpa; + bool cap_nib_mask; + bool media_accessible; + bool data_retained; + struct cxl_memdev *cxlmd; + enum edac_mem_repair_type repair_type; + bool persist_mode; + u64 dpa; + u32 nibble_mask; +}; + +/* + * See CXL rev 3.2 @8.2.10.7.2.1 Table 8-128 sPPR Feature Readable Attributes + * + * See CXL rev 3.2 @8.2.10.7.2.2 Table 8-131 hPPR Feature Readable Attributes + */ + +#define CXL_PPR_OP_CAP_DEVICE_INITIATED BIT(0) +#define CXL_PPR_OP_MODE_DEV_INITIATED BIT(0) + +#define CXL_PPR_FLAG_DPA_SUPPORT_MASK BIT(0) +#define CXL_PPR_FLAG_NIB_SUPPORT_MASK BIT(1) +#define CXL_PPR_FLAG_MEM_SPARING_EV_REC_SUPPORT_MASK BIT(2) +#define CXL_PPR_FLAG_DEV_INITED_PPR_AT_BOOT_CAP_MASK BIT(3) + +#define CXL_PPR_RESTRICTION_FLAG_MEDIA_ACCESSIBLE_MASK BIT(0) +#define CXL_PPR_RESTRICTION_FLAG_DATA_RETAINED_MASK BIT(2) + +#define CXL_PPR_SPARING_EV_REC_EN_MASK BIT(0) +#define CXL_PPR_DEV_INITED_PPR_AT_BOOT_EN_MASK BIT(1) + +#define CXL_PPR_GET_CAP_DPA(flags) \ + FIELD_GET(CXL_PPR_FLAG_DPA_SUPPORT_MASK, flags) +#define CXL_PPR_GET_CAP_NIB_MASK(flags) \ + FIELD_GET(CXL_PPR_FLAG_NIB_SUPPORT_MASK, flags) +#define CXL_PPR_GET_MEDIA_ACCESSIBLE(restriction_flags) \ + (FIELD_GET(CXL_PPR_RESTRICTION_FLAG_MEDIA_ACCESSIBLE_MASK, \ + restriction_flags) ^ 1) +#define CXL_PPR_GET_DATA_RETAINED(restriction_flags) \ + (FIELD_GET(CXL_PPR_RESTRICTION_FLAG_DATA_RETAINED_MASK, \ + restriction_flags) ^ 1) + +struct cxl_memdev_ppr_rd_attrbs { + struct cxl_memdev_repair_rd_attrbs_hdr hdr; + u8 ppr_flags; + __le16 restriction_flags; + u8 ppr_op_mode; +} __packed; + +/* + * See CXL rev 3.2 @8.2.10.7.1.2 Table 8-118 sPPR Maintenance Input Payload + * + * See CXL rev 3.2 @8.2.10.7.1.3 Table 8-119 hPPR Maintenance Input Payload + */ +struct cxl_memdev_ppr_maintenance_attrbs { + u8 flags; + __le64 dpa; + u8 nibble_mask[3]; +} __packed; + +static int cxl_mem_ppr_get_attrbs(struct cxl_ppr_context *cxl_ppr_ctx) +{ + size_t rd_data_size = sizeof(struct cxl_memdev_ppr_rd_attrbs); + struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd; + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; + u16 restriction_flags; + size_t data_size; + u16 return_code; + + struct cxl_memdev_ppr_rd_attrbs *rd_attrbs __free(kfree) = + kmalloc(rd_data_size, GFP_KERNEL); + if (!rd_attrbs) + return -ENOMEM; + + data_size = cxl_get_feature(cxl_mbox, &cxl_ppr_ctx->repair_uuid, + CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs, + rd_data_size, 0, &return_code); + if (!data_size) + return -EIO; + + cxl_ppr_ctx->op_class = rd_attrbs->hdr.op_class; + cxl_ppr_ctx->op_subclass = rd_attrbs->hdr.op_subclass; + cxl_ppr_ctx->cap_dpa = CXL_PPR_GET_CAP_DPA(rd_attrbs->ppr_flags); + cxl_ppr_ctx->cap_nib_mask = + CXL_PPR_GET_CAP_NIB_MASK(rd_attrbs->ppr_flags); + + restriction_flags = le16_to_cpu(rd_attrbs->restriction_flags); + cxl_ppr_ctx->media_accessible = + CXL_PPR_GET_MEDIA_ACCESSIBLE(restriction_flags); + cxl_ppr_ctx->data_retained = + CXL_PPR_GET_DATA_RETAINED(restriction_flags); + + return 0; +} + +static int cxl_mem_perform_ppr(struct cxl_ppr_context *cxl_ppr_ctx) +{ + struct cxl_memdev_ppr_maintenance_attrbs maintenance_attrbs; + struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd; + struct cxl_mem_repair_attrbs attrbs = { 0 }; + + struct rw_semaphore *region_lock __free(rwsem_read_release) = + rwsem_read_intr_acquire(&cxl_region_rwsem); + if (!region_lock) + return -EINTR; + + struct rw_semaphore *dpa_lock __free(rwsem_read_release) = + rwsem_read_intr_acquire(&cxl_dpa_rwsem); + if (!dpa_lock) + return -EINTR; + + if (!cxl_ppr_ctx->media_accessible || !cxl_ppr_ctx->data_retained) { + /* Memory to repair must be offline */ + if (cxl_is_memdev_memory_online(cxlmd)) + return -EBUSY; + } else { + if (cxl_is_memdev_memory_online(cxlmd)) { + /* Check memory to repair is from the current boot */ + attrbs.repair_type = CXL_PPR; + attrbs.dpa = cxl_ppr_ctx->dpa; + attrbs.nibble_mask = cxl_ppr_ctx->nibble_mask; + if (!cxl_find_rec_dram(cxlmd, &attrbs) && + !cxl_find_rec_gen_media(cxlmd, &attrbs)) + return -EINVAL; + } + } + + memset(&maintenance_attrbs, 0, sizeof(maintenance_attrbs)); + maintenance_attrbs.flags = 0; + maintenance_attrbs.dpa = cpu_to_le64(cxl_ppr_ctx->dpa); + put_unaligned_le24(cxl_ppr_ctx->nibble_mask, + maintenance_attrbs.nibble_mask); + + return cxl_perform_maintenance(&cxlmd->cxlds->cxl_mbox, + cxl_ppr_ctx->op_class, + cxl_ppr_ctx->op_subclass, + &maintenance_attrbs, + sizeof(maintenance_attrbs)); +} + +static int cxl_ppr_get_repair_type(struct device *dev, void *drv_data, + const char **repair_type) +{ + *repair_type = edac_repair_type[EDAC_REPAIR_PPR]; + + return 0; +} + +static int cxl_ppr_get_persist_mode(struct device *dev, void *drv_data, + bool *persist_mode) +{ + struct cxl_ppr_context *cxl_ppr_ctx = drv_data; + + *persist_mode = cxl_ppr_ctx->persist_mode; + + return 0; +} + +static int cxl_get_ppr_safe_when_in_use(struct device *dev, void *drv_data, + bool *safe) +{ + struct cxl_ppr_context *cxl_ppr_ctx = drv_data; + + *safe = cxl_ppr_ctx->media_accessible & cxl_ppr_ctx->data_retained; + + return 0; +} + +static int cxl_ppr_get_min_dpa(struct device *dev, void *drv_data, u64 *min_dpa) +{ + struct cxl_ppr_context *cxl_ppr_ctx = drv_data; + struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd; + struct cxl_dev_state *cxlds = cxlmd->cxlds; + + *min_dpa = cxlds->dpa_res.start; + + return 0; +} + +static int cxl_ppr_get_max_dpa(struct device *dev, void *drv_data, u64 *max_dpa) +{ + struct cxl_ppr_context *cxl_ppr_ctx = drv_data; + struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd; + struct cxl_dev_state *cxlds = cxlmd->cxlds; + + *max_dpa = cxlds->dpa_res.end; + + return 0; +} + +static int cxl_ppr_get_dpa(struct device *dev, void *drv_data, u64 *dpa) +{ + struct cxl_ppr_context *cxl_ppr_ctx = drv_data; + + *dpa = cxl_ppr_ctx->dpa; + + return 0; +} + +static int cxl_ppr_set_dpa(struct device *dev, void *drv_data, u64 dpa) +{ + struct cxl_ppr_context *cxl_ppr_ctx = drv_data; + struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd; + struct cxl_dev_state *cxlds = cxlmd->cxlds; + + if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) + return -EINVAL; + + cxl_ppr_ctx->dpa = dpa; + + return 0; +} + +static int cxl_ppr_get_nibble_mask(struct device *dev, void *drv_data, + u32 *nibble_mask) +{ + struct cxl_ppr_context *cxl_ppr_ctx = drv_data; + + *nibble_mask = cxl_ppr_ctx->nibble_mask; + + return 0; +} + +static int cxl_ppr_set_nibble_mask(struct device *dev, void *drv_data, + u32 nibble_mask) +{ + struct cxl_ppr_context *cxl_ppr_ctx = drv_data; + + cxl_ppr_ctx->nibble_mask = nibble_mask; + + return 0; +} + +static int cxl_do_ppr(struct device *dev, void *drv_data, u32 val) +{ + struct cxl_ppr_context *cxl_ppr_ctx = drv_data; + + if (!cxl_ppr_ctx->dpa || val != EDAC_DO_MEM_REPAIR) + return -EINVAL; + + return cxl_mem_perform_ppr(cxl_ppr_ctx); +} + +static const struct edac_mem_repair_ops cxl_sppr_ops = { + .get_repair_type = cxl_ppr_get_repair_type, + .get_persist_mode = cxl_ppr_get_persist_mode, + .get_repair_safe_when_in_use = cxl_get_ppr_safe_when_in_use, + .get_min_dpa = cxl_ppr_get_min_dpa, + .get_max_dpa = cxl_ppr_get_max_dpa, + .get_dpa = cxl_ppr_get_dpa, + .set_dpa = cxl_ppr_set_dpa, + .get_nibble_mask = cxl_ppr_get_nibble_mask, + .set_nibble_mask = cxl_ppr_set_nibble_mask, + .do_repair = cxl_do_ppr, +}; + +static int cxl_memdev_soft_ppr_init(struct cxl_memdev *cxlmd, + struct edac_dev_feature *ras_feature, + u8 repair_inst) +{ + struct cxl_ppr_context *cxl_sppr_ctx; + struct cxl_feat_entry *feat_entry; + int ret; + + feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds), + &CXL_FEAT_SPPR_UUID); + if (IS_ERR(feat_entry)) + return -EOPNOTSUPP; + + if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE)) + return -EOPNOTSUPP; + + cxl_sppr_ctx = + devm_kzalloc(&cxlmd->dev, sizeof(*cxl_sppr_ctx), GFP_KERNEL); + if (!cxl_sppr_ctx) + return -ENOMEM; + + *cxl_sppr_ctx = (struct cxl_ppr_context){ + .get_feat_size = le16_to_cpu(feat_entry->get_feat_size), + .set_feat_size = le16_to_cpu(feat_entry->set_feat_size), + .get_version = feat_entry->get_feat_ver, + .set_version = feat_entry->set_feat_ver, + .effects = le16_to_cpu(feat_entry->effects), + .cxlmd = cxlmd, + .repair_type = EDAC_REPAIR_PPR, + .persist_mode = 0, + .instance = repair_inst, + }; + uuid_copy(&cxl_sppr_ctx->repair_uuid, &CXL_FEAT_SPPR_UUID); + + ret = cxl_mem_ppr_get_attrbs(cxl_sppr_ctx); + if (ret) + return ret; + + ras_feature->ft_type = RAS_FEAT_MEM_REPAIR; + ras_feature->instance = cxl_sppr_ctx->instance; + ras_feature->mem_repair_ops = &cxl_sppr_ops; + ras_feature->ctx = cxl_sppr_ctx; + + return 0; +} + +int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd) +{ + struct edac_dev_feature ras_features[CXL_NR_EDAC_DEV_FEATURES]; + int num_ras_features = 0; + u8 repair_inst = 0; + int rc; + + if (IS_ENABLED(CONFIG_CXL_EDAC_SCRUB)) { + rc = cxl_memdev_scrub_init(cxlmd, &ras_features[num_ras_features], 0); + if (rc < 0 && rc != -EOPNOTSUPP) + return rc; + + if (rc != -EOPNOTSUPP) + num_ras_features++; + } + + if (IS_ENABLED(CONFIG_CXL_EDAC_ECS)) { + rc = cxl_memdev_ecs_init(cxlmd, &ras_features[num_ras_features]); + if (rc < 0 && rc != -EOPNOTSUPP) + return rc; + + if (rc != -EOPNOTSUPP) + num_ras_features++; + } + + if (IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR)) { + for (int i = 0; i < CXL_MEM_SPARING_MAX; i++) { + rc = cxl_memdev_sparing_init(cxlmd, + &ras_features[num_ras_features], + &mem_sparing_desc[i], repair_inst); + if (rc == -EOPNOTSUPP) + continue; + if (rc < 0) + return rc; + + repair_inst++; + num_ras_features++; + } + + rc = cxl_memdev_soft_ppr_init(cxlmd, &ras_features[num_ras_features], + repair_inst); + if (rc < 0 && rc != -EOPNOTSUPP) + return rc; + + if (rc != -EOPNOTSUPP) { + repair_inst++; + num_ras_features++; + } + + if (repair_inst) { + struct cxl_mem_err_rec *array_rec = + devm_kzalloc(&cxlmd->dev, sizeof(*array_rec), + GFP_KERNEL); + if (!array_rec) + return -ENOMEM; + + xa_init(&array_rec->rec_gen_media); + xa_init(&array_rec->rec_dram); + cxlmd->err_rec_array = array_rec; + } + } + + if (!num_ras_features) + return -EINVAL; + + char *cxl_dev_name __free(kfree) = + kasprintf(GFP_KERNEL, "cxl_%s", dev_name(&cxlmd->dev)); + if (!cxl_dev_name) + return -ENOMEM; + + return edac_dev_register(&cxlmd->dev, cxl_dev_name, NULL, + num_ras_features, ras_features); +} +EXPORT_SYMBOL_NS_GPL(devm_cxl_memdev_edac_register, "CXL"); + +int devm_cxl_region_edac_register(struct cxl_region *cxlr) +{ + struct edac_dev_feature ras_features[CXL_NR_EDAC_DEV_FEATURES]; + int num_ras_features = 0; + int rc; + + if (!IS_ENABLED(CONFIG_CXL_EDAC_SCRUB)) + return 0; + + rc = cxl_region_scrub_init(cxlr, &ras_features[num_ras_features], 0); + if (rc < 0) + return rc; + + num_ras_features++; + + char *cxl_dev_name __free(kfree) = + kasprintf(GFP_KERNEL, "cxl_%s", dev_name(&cxlr->dev)); + if (!cxl_dev_name) + return -ENOMEM; + + return edac_dev_register(&cxlr->dev, cxl_dev_name, NULL, + num_ras_features, ras_features); +} +EXPORT_SYMBOL_NS_GPL(devm_cxl_region_edac_register, "CXL"); + +void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd) +{ + struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array; + struct cxl_event_gen_media *rec_gen_media; + struct cxl_event_dram *rec_dram; + unsigned long index; + + if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec) + return; + + xa_for_each(&array_rec->rec_dram, index, rec_dram) + kfree(rec_dram); + xa_destroy(&array_rec->rec_dram); + + xa_for_each(&array_rec->rec_gen_media, index, rec_gen_media) + kfree(rec_gen_media); + xa_destroy(&array_rec->rec_gen_media); +} +EXPORT_SYMBOL_NS_GPL(devm_cxl_memdev_edac_release, "CXL"); diff --git a/drivers/cxl/core/features.c b/drivers/cxl/core/features.c new file mode 100644 index 000000000000..7c750599ea69 --- /dev/null +++ b/drivers/cxl/core/features.c @@ -0,0 +1,703 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */ +#include <linux/fwctl.h> +#include <linux/device.h> +#include <cxl/mailbox.h> +#include <cxl/features.h> +#include <uapi/fwctl/cxl.h> +#include "cxl.h" +#include "core.h" +#include "cxlmem.h" + +/** + * DOC: cxl features + * + * CXL Features: + * A CXL device that includes a mailbox supports commands that allows + * listing, getting, and setting of optionally defined features such + * as memory sparing or post package sparing. Vendors may define custom + * features for the device. + */ + +/* All the features below are exclusive to the kernel */ +static const uuid_t cxl_exclusive_feats[] = { + CXL_FEAT_PATROL_SCRUB_UUID, + CXL_FEAT_ECS_UUID, + CXL_FEAT_SPPR_UUID, + CXL_FEAT_HPPR_UUID, + CXL_FEAT_CACHELINE_SPARING_UUID, + CXL_FEAT_ROW_SPARING_UUID, + CXL_FEAT_BANK_SPARING_UUID, + CXL_FEAT_RANK_SPARING_UUID, +}; + +static bool is_cxl_feature_exclusive_by_uuid(const uuid_t *uuid) +{ + for (int i = 0; i < ARRAY_SIZE(cxl_exclusive_feats); i++) { + if (uuid_equal(uuid, &cxl_exclusive_feats[i])) + return true; + } + + return false; +} + +static bool is_cxl_feature_exclusive(struct cxl_feat_entry *entry) +{ + return is_cxl_feature_exclusive_by_uuid(&entry->uuid); +} + +struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds) +{ + return cxlds->cxlfs; +} +EXPORT_SYMBOL_NS_GPL(to_cxlfs, "CXL"); + +static int cxl_get_supported_features_count(struct cxl_mailbox *cxl_mbox) +{ + struct cxl_mbox_get_sup_feats_out mbox_out; + struct cxl_mbox_get_sup_feats_in mbox_in; + struct cxl_mbox_cmd mbox_cmd; + int rc; + + memset(&mbox_in, 0, sizeof(mbox_in)); + mbox_in.count = cpu_to_le32(sizeof(mbox_out)); + memset(&mbox_out, 0, sizeof(mbox_out)); + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES, + .size_in = sizeof(mbox_in), + .payload_in = &mbox_in, + .size_out = sizeof(mbox_out), + .payload_out = &mbox_out, + .min_out = sizeof(mbox_out), + }; + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + if (rc < 0) + return rc; + + return le16_to_cpu(mbox_out.supported_feats); +} + +static struct cxl_feat_entries * +get_supported_features(struct cxl_features_state *cxlfs) +{ + int remain_feats, max_size, max_feats, start, rc, hdr_size; + struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox; + int feat_size = sizeof(struct cxl_feat_entry); + struct cxl_mbox_get_sup_feats_in mbox_in; + struct cxl_feat_entry *entry; + struct cxl_mbox_cmd mbox_cmd; + int user_feats = 0; + int count; + + count = cxl_get_supported_features_count(cxl_mbox); + if (count <= 0) + return NULL; + + struct cxl_feat_entries *entries __free(kvfree) = + kvmalloc(struct_size(entries, ent, count), GFP_KERNEL); + if (!entries) + return NULL; + + struct cxl_mbox_get_sup_feats_out *mbox_out __free(kvfree) = + kvmalloc(cxl_mbox->payload_size, GFP_KERNEL); + if (!mbox_out) + return NULL; + + hdr_size = struct_size(mbox_out, ents, 0); + max_size = cxl_mbox->payload_size - hdr_size; + /* max feat entries that can fit in mailbox max payload size */ + max_feats = max_size / feat_size; + entry = entries->ent; + + start = 0; + remain_feats = count; + do { + int retrieved, alloc_size, copy_feats; + int num_entries; + + if (remain_feats > max_feats) { + alloc_size = struct_size(mbox_out, ents, max_feats); + remain_feats = remain_feats - max_feats; + copy_feats = max_feats; + } else { + alloc_size = struct_size(mbox_out, ents, remain_feats); + copy_feats = remain_feats; + remain_feats = 0; + } + + memset(&mbox_in, 0, sizeof(mbox_in)); + mbox_in.count = cpu_to_le32(alloc_size); + mbox_in.start_idx = cpu_to_le16(start); + memset(mbox_out, 0, alloc_size); + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_SUPPORTED_FEATURES, + .size_in = sizeof(mbox_in), + .payload_in = &mbox_in, + .size_out = alloc_size, + .payload_out = mbox_out, + .min_out = hdr_size, + }; + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + if (rc < 0) + return NULL; + + if (mbox_cmd.size_out <= hdr_size) + return NULL; + + /* + * Make sure retrieved out buffer is multiple of feature + * entries. + */ + retrieved = mbox_cmd.size_out - hdr_size; + if (retrieved % feat_size) + return NULL; + + num_entries = le16_to_cpu(mbox_out->num_entries); + /* + * If the reported output entries * defined entry size != + * retrieved output bytes, then the output package is incorrect. + */ + if (num_entries * feat_size != retrieved) + return NULL; + + memcpy(entry, mbox_out->ents, retrieved); + for (int i = 0; i < num_entries; i++) { + if (!is_cxl_feature_exclusive(entry + i)) + user_feats++; + } + entry += num_entries; + /* + * If the number of output entries is less than expected, add the + * remaining entries to the next batch. + */ + remain_feats += copy_feats - num_entries; + start += num_entries; + } while (remain_feats); + + entries->num_features = count; + entries->num_user_features = user_feats; + + return no_free_ptr(entries); +} + +static void free_cxlfs(void *_cxlfs) +{ + struct cxl_features_state *cxlfs = _cxlfs; + struct cxl_dev_state *cxlds = cxlfs->cxlds; + + cxlds->cxlfs = NULL; + kvfree(cxlfs->entries); + kfree(cxlfs); +} + +/** + * devm_cxl_setup_features() - Allocate and initialize features context + * @cxlds: CXL device context + * + * Return 0 on success or -errno on failure. + */ +int devm_cxl_setup_features(struct cxl_dev_state *cxlds) +{ + struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; + + if (cxl_mbox->feat_cap < CXL_FEATURES_RO) + return -ENODEV; + + struct cxl_features_state *cxlfs __free(kfree) = + kzalloc(sizeof(*cxlfs), GFP_KERNEL); + if (!cxlfs) + return -ENOMEM; + + cxlfs->cxlds = cxlds; + + cxlfs->entries = get_supported_features(cxlfs); + if (!cxlfs->entries) + return -ENOMEM; + + cxlds->cxlfs = cxlfs; + + return devm_add_action_or_reset(cxlds->dev, free_cxlfs, no_free_ptr(cxlfs)); +} +EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_features, "CXL"); + +size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid, + enum cxl_get_feat_selection selection, + void *feat_out, size_t feat_out_size, u16 offset, + u16 *return_code) +{ + size_t data_to_rd_size, size_out; + struct cxl_mbox_get_feat_in pi; + struct cxl_mbox_cmd mbox_cmd; + size_t data_rcvd_size = 0; + int rc; + + if (return_code) + *return_code = CXL_MBOX_CMD_RC_INPUT; + + if (!feat_out || !feat_out_size) + return 0; + + size_out = min(feat_out_size, cxl_mbox->payload_size); + uuid_copy(&pi.uuid, feat_uuid); + pi.selection = selection; + do { + data_to_rd_size = min(feat_out_size - data_rcvd_size, + cxl_mbox->payload_size); + pi.offset = cpu_to_le16(offset + data_rcvd_size); + pi.count = cpu_to_le16(data_to_rd_size); + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_FEATURE, + .size_in = sizeof(pi), + .payload_in = &pi, + .size_out = size_out, + .payload_out = feat_out + data_rcvd_size, + .min_out = data_to_rd_size, + }; + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + if (rc < 0 || !mbox_cmd.size_out) { + if (return_code) + *return_code = mbox_cmd.return_code; + return 0; + } + data_rcvd_size += mbox_cmd.size_out; + } while (data_rcvd_size < feat_out_size); + + if (return_code) + *return_code = CXL_MBOX_CMD_RC_SUCCESS; + + return data_rcvd_size; +} + +/* + * FEAT_DATA_MIN_PAYLOAD_SIZE - min extra number of bytes should be + * available in the mailbox for storing the actual feature data so that + * the feature data transfer would work as expected. + */ +#define FEAT_DATA_MIN_PAYLOAD_SIZE 10 +int cxl_set_feature(struct cxl_mailbox *cxl_mbox, + const uuid_t *feat_uuid, u8 feat_version, + const void *feat_data, size_t feat_data_size, + u32 feat_flag, u16 offset, u16 *return_code) +{ + size_t data_in_size, data_sent_size = 0; + struct cxl_mbox_cmd mbox_cmd; + size_t hdr_size; + + if (return_code) + *return_code = CXL_MBOX_CMD_RC_INPUT; + + struct cxl_mbox_set_feat_in *pi __free(kfree) = + kzalloc(cxl_mbox->payload_size, GFP_KERNEL); + if (!pi) + return -ENOMEM; + + uuid_copy(&pi->uuid, feat_uuid); + pi->version = feat_version; + feat_flag &= ~CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK; + feat_flag |= CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET; + hdr_size = sizeof(pi->hdr); + /* + * Check minimum mbox payload size is available for + * the feature data transfer. + */ + if (hdr_size + FEAT_DATA_MIN_PAYLOAD_SIZE > cxl_mbox->payload_size) + return -ENOMEM; + + if (hdr_size + feat_data_size <= cxl_mbox->payload_size) { + pi->flags = cpu_to_le32(feat_flag | + CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER); + data_in_size = feat_data_size; + } else { + pi->flags = cpu_to_le32(feat_flag | + CXL_SET_FEAT_FLAG_INITIATE_DATA_TRANSFER); + data_in_size = cxl_mbox->payload_size - hdr_size; + } + + do { + int rc; + + pi->offset = cpu_to_le16(offset + data_sent_size); + memcpy(pi->feat_data, feat_data + data_sent_size, data_in_size); + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_SET_FEATURE, + .size_in = hdr_size + data_in_size, + .payload_in = pi, + }; + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + if (rc < 0) { + if (return_code) + *return_code = mbox_cmd.return_code; + return rc; + } + + data_sent_size += data_in_size; + if (data_sent_size >= feat_data_size) { + if (return_code) + *return_code = CXL_MBOX_CMD_RC_SUCCESS; + return 0; + } + + if ((feat_data_size - data_sent_size) <= (cxl_mbox->payload_size - hdr_size)) { + data_in_size = feat_data_size - data_sent_size; + pi->flags = cpu_to_le32(feat_flag | + CXL_SET_FEAT_FLAG_FINISH_DATA_TRANSFER); + } else { + pi->flags = cpu_to_le32(feat_flag | + CXL_SET_FEAT_FLAG_CONTINUE_DATA_TRANSFER); + } + } while (true); +} + +/* FWCTL support */ + +static inline struct cxl_memdev *fwctl_to_memdev(struct fwctl_device *fwctl_dev) +{ + return to_cxl_memdev(fwctl_dev->dev.parent); +} + +static int cxlctl_open_uctx(struct fwctl_uctx *uctx) +{ + return 0; +} + +static void cxlctl_close_uctx(struct fwctl_uctx *uctx) +{ +} + +struct cxl_feat_entry * +cxl_feature_info(struct cxl_features_state *cxlfs, + const uuid_t *uuid) +{ + struct cxl_feat_entry *feat; + + for (int i = 0; i < cxlfs->entries->num_features; i++) { + feat = &cxlfs->entries->ent[i]; + if (uuid_equal(uuid, &feat->uuid)) + return feat; + } + + return ERR_PTR(-EINVAL); +} + +static void *cxlctl_get_supported_features(struct cxl_features_state *cxlfs, + const struct fwctl_rpc_cxl *rpc_in, + size_t *out_len) +{ + const struct cxl_mbox_get_sup_feats_in *feat_in; + struct cxl_mbox_get_sup_feats_out *feat_out; + struct cxl_feat_entry *pos; + size_t out_size; + int requested; + u32 count; + u16 start; + int i; + + if (rpc_in->op_size != sizeof(*feat_in)) + return ERR_PTR(-EINVAL); + + feat_in = &rpc_in->get_sup_feats_in; + count = le32_to_cpu(feat_in->count); + start = le16_to_cpu(feat_in->start_idx); + requested = count / sizeof(*pos); + + /* + * Make sure that the total requested number of entries is not greater + * than the total number of supported features allowed for userspace. + */ + if (start >= cxlfs->entries->num_features) + return ERR_PTR(-EINVAL); + + requested = min_t(int, requested, cxlfs->entries->num_features - start); + + out_size = sizeof(struct fwctl_rpc_cxl_out) + + struct_size(feat_out, ents, requested); + + struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) = + kvzalloc(out_size, GFP_KERNEL); + if (!rpc_out) + return ERR_PTR(-ENOMEM); + + rpc_out->size = struct_size(feat_out, ents, requested); + feat_out = &rpc_out->get_sup_feats_out; + + for (i = start, pos = &feat_out->ents[0]; + i < cxlfs->entries->num_features; i++, pos++) { + if (i - start == requested) + break; + + memcpy(pos, &cxlfs->entries->ent[i], sizeof(*pos)); + /* + * If the feature is exclusive, set the set_feat_size to 0 to + * indicate that the feature is not changeable. + */ + if (is_cxl_feature_exclusive(pos)) { + u32 flags; + + pos->set_feat_size = 0; + flags = le32_to_cpu(pos->flags); + flags &= ~CXL_FEATURE_F_CHANGEABLE; + pos->flags = cpu_to_le32(flags); + } + } + + feat_out->num_entries = cpu_to_le16(requested); + feat_out->supported_feats = cpu_to_le16(cxlfs->entries->num_features); + rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS; + *out_len = out_size; + + return no_free_ptr(rpc_out); +} + +static void *cxlctl_get_feature(struct cxl_features_state *cxlfs, + const struct fwctl_rpc_cxl *rpc_in, + size_t *out_len) +{ + struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox; + const struct cxl_mbox_get_feat_in *feat_in; + u16 offset, count, return_code; + size_t out_size = *out_len; + + if (rpc_in->op_size != sizeof(*feat_in)) + return ERR_PTR(-EINVAL); + + feat_in = &rpc_in->get_feat_in; + offset = le16_to_cpu(feat_in->offset); + count = le16_to_cpu(feat_in->count); + + if (!count) + return ERR_PTR(-EINVAL); + + struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) = + kvzalloc(out_size, GFP_KERNEL); + if (!rpc_out) + return ERR_PTR(-ENOMEM); + + out_size = cxl_get_feature(cxl_mbox, &feat_in->uuid, + feat_in->selection, rpc_out->payload, + count, offset, &return_code); + *out_len = sizeof(struct fwctl_rpc_cxl_out); + if (!out_size) { + rpc_out->size = 0; + rpc_out->retval = return_code; + return no_free_ptr(rpc_out); + } + + rpc_out->size = out_size; + rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS; + *out_len += out_size; + + return no_free_ptr(rpc_out); +} + +static void *cxlctl_set_feature(struct cxl_features_state *cxlfs, + const struct fwctl_rpc_cxl *rpc_in, + size_t *out_len) +{ + struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox; + const struct cxl_mbox_set_feat_in *feat_in; + size_t out_size, data_size; + u16 offset, return_code; + u32 flags; + int rc; + + if (rpc_in->op_size <= sizeof(feat_in->hdr)) + return ERR_PTR(-EINVAL); + + feat_in = &rpc_in->set_feat_in; + + if (is_cxl_feature_exclusive_by_uuid(&feat_in->uuid)) + return ERR_PTR(-EPERM); + + offset = le16_to_cpu(feat_in->offset); + flags = le32_to_cpu(feat_in->flags); + out_size = *out_len; + + struct fwctl_rpc_cxl_out *rpc_out __free(kvfree) = + kvzalloc(out_size, GFP_KERNEL); + if (!rpc_out) + return ERR_PTR(-ENOMEM); + + rpc_out->size = 0; + + data_size = rpc_in->op_size - sizeof(feat_in->hdr); + rc = cxl_set_feature(cxl_mbox, &feat_in->uuid, + feat_in->version, feat_in->feat_data, + data_size, flags, offset, &return_code); + *out_len = sizeof(*rpc_out); + if (rc) { + rpc_out->retval = return_code; + return no_free_ptr(rpc_out); + } + + rpc_out->retval = CXL_MBOX_CMD_RC_SUCCESS; + + return no_free_ptr(rpc_out); +} + +static bool cxlctl_validate_set_features(struct cxl_features_state *cxlfs, + const struct fwctl_rpc_cxl *rpc_in, + enum fwctl_rpc_scope scope) +{ + u16 effects, imm_mask, reset_mask; + struct cxl_feat_entry *feat; + u32 flags; + + if (rpc_in->op_size < sizeof(uuid_t)) + return false; + + feat = cxl_feature_info(cxlfs, &rpc_in->set_feat_in.uuid); + if (IS_ERR(feat)) + return false; + + /* Ensure that the attribute is changeable */ + flags = le32_to_cpu(feat->flags); + if (!(flags & CXL_FEATURE_F_CHANGEABLE)) + return false; + + effects = le16_to_cpu(feat->effects); + + /* + * Reserved bits are set, rejecting since the effects is not + * comprehended by the driver. + */ + if (effects & CXL_CMD_EFFECTS_RESERVED) { + dev_warn_once(cxlfs->cxlds->dev, + "Reserved bits set in the Feature effects field!\n"); + return false; + } + + /* Currently no user background command support */ + if (effects & CXL_CMD_BACKGROUND) + return false; + + /* Effects cause immediate change, highest security scope is needed */ + imm_mask = CXL_CMD_CONFIG_CHANGE_IMMEDIATE | + CXL_CMD_DATA_CHANGE_IMMEDIATE | + CXL_CMD_POLICY_CHANGE_IMMEDIATE | + CXL_CMD_LOG_CHANGE_IMMEDIATE; + + reset_mask = CXL_CMD_CONFIG_CHANGE_COLD_RESET | + CXL_CMD_CONFIG_CHANGE_CONV_RESET | + CXL_CMD_CONFIG_CHANGE_CXL_RESET; + + /* If no immediate or reset effect set, The hardware has a bug */ + if (!(effects & imm_mask) && !(effects & reset_mask)) + return false; + + /* + * If the Feature setting causes immediate configuration change + * then we need the full write permission policy. + */ + if (effects & imm_mask && scope >= FWCTL_RPC_DEBUG_WRITE_FULL) + return true; + + /* + * If the Feature setting only causes configuration change + * after a reset, then the lesser level of write permission + * policy is ok. + */ + if (!(effects & imm_mask) && scope >= FWCTL_RPC_DEBUG_WRITE) + return true; + + return false; +} + +static bool cxlctl_validate_hw_command(struct cxl_features_state *cxlfs, + const struct fwctl_rpc_cxl *rpc_in, + enum fwctl_rpc_scope scope, + u16 opcode) +{ + struct cxl_mailbox *cxl_mbox = &cxlfs->cxlds->cxl_mbox; + + switch (opcode) { + case CXL_MBOX_OP_GET_SUPPORTED_FEATURES: + case CXL_MBOX_OP_GET_FEATURE: + return cxl_mbox->feat_cap >= CXL_FEATURES_RO; + case CXL_MBOX_OP_SET_FEATURE: + if (cxl_mbox->feat_cap < CXL_FEATURES_RW) + return false; + return cxlctl_validate_set_features(cxlfs, rpc_in, scope); + default: + return false; + } +} + +static void *cxlctl_handle_commands(struct cxl_features_state *cxlfs, + const struct fwctl_rpc_cxl *rpc_in, + size_t *out_len, u16 opcode) +{ + switch (opcode) { + case CXL_MBOX_OP_GET_SUPPORTED_FEATURES: + return cxlctl_get_supported_features(cxlfs, rpc_in, out_len); + case CXL_MBOX_OP_GET_FEATURE: + return cxlctl_get_feature(cxlfs, rpc_in, out_len); + case CXL_MBOX_OP_SET_FEATURE: + return cxlctl_set_feature(cxlfs, rpc_in, out_len); + default: + return ERR_PTR(-EOPNOTSUPP); + } +} + +static void *cxlctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope, + void *in, size_t in_len, size_t *out_len) +{ + struct fwctl_device *fwctl_dev = uctx->fwctl; + struct cxl_memdev *cxlmd = fwctl_to_memdev(fwctl_dev); + struct cxl_features_state *cxlfs = to_cxlfs(cxlmd->cxlds); + const struct fwctl_rpc_cxl *rpc_in = in; + u16 opcode = rpc_in->opcode; + + if (!cxlctl_validate_hw_command(cxlfs, rpc_in, scope, opcode)) + return ERR_PTR(-EINVAL); + + return cxlctl_handle_commands(cxlfs, rpc_in, out_len, opcode); +} + +static const struct fwctl_ops cxlctl_ops = { + .device_type = FWCTL_DEVICE_TYPE_CXL, + .uctx_size = sizeof(struct fwctl_uctx), + .open_uctx = cxlctl_open_uctx, + .close_uctx = cxlctl_close_uctx, + .fw_rpc = cxlctl_fw_rpc, +}; + +DEFINE_FREE(free_fwctl_dev, struct fwctl_device *, if (_T) fwctl_put(_T)) + +static void free_memdev_fwctl(void *_fwctl_dev) +{ + struct fwctl_device *fwctl_dev = _fwctl_dev; + + fwctl_unregister(fwctl_dev); + fwctl_put(fwctl_dev); +} + +int devm_cxl_setup_fwctl(struct device *host, struct cxl_memdev *cxlmd) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_features_state *cxlfs; + int rc; + + cxlfs = to_cxlfs(cxlds); + if (!cxlfs) + return -ENODEV; + + /* No need to setup FWCTL if there are no user allowed features found */ + if (!cxlfs->entries->num_user_features) + return -ENODEV; + + struct fwctl_device *fwctl_dev __free(free_fwctl_dev) = + _fwctl_alloc_device(&cxlmd->dev, &cxlctl_ops, sizeof(*fwctl_dev)); + if (!fwctl_dev) + return -ENOMEM; + + rc = fwctl_register(fwctl_dev); + if (rc) + return rc; + + return devm_add_action_or_reset(host, free_memdev_fwctl, + no_free_ptr(fwctl_dev)); +} +EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fwctl, "CXL"); + +MODULE_IMPORT_NS("FWCTL"); diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 7d97790b893d..ab1007495f6b 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -34,7 +34,8 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, if (rc) return rc; - dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev)); + dev_dbg(port->uport_dev, "%s added to %s\n", + dev_name(&cxld->dev), dev_name(&port->dev)); return 0; } @@ -52,6 +53,14 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port) struct cxl_dport *dport = NULL; int single_port_map[1]; unsigned long index; + struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); + + /* + * Capability checks are moot for passthrough decoders, support + * any and all possibilities. + */ + cxlhdm->interleave_mask = ~0U; + cxlhdm->iw_cap_mask = ~0UL; cxlsd = cxl_switch_decoder_alloc(port, 1); if (IS_ERR(cxlsd)) @@ -65,7 +74,7 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port) return add_hdm_decoder(port, &cxlsd->cxld, single_port_map); } -EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, "CXL"); static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm) { @@ -79,6 +88,11 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm) cxlhdm->interleave_mask |= GENMASK(11, 8); if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap)) cxlhdm->interleave_mask |= GENMASK(14, 12); + cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8); + if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap)) + cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12); + if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap)) + cxlhdm->iw_cap_mask |= BIT(16); } static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info) @@ -186,7 +200,7 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, return cxlhdm; } -EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, "CXL"); static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth) { @@ -200,15 +214,45 @@ void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds) { struct resource *p1, *p2; - down_read(&cxl_dpa_rwsem); + guard(rwsem_read)(&cxl_dpa_rwsem); for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) { __cxl_dpa_debug(file, p1, 0); for (p2 = p1->child; p2; p2 = p2->sibling) __cxl_dpa_debug(file, p2, 1); } - up_read(&cxl_dpa_rwsem); } -EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, "CXL"); + +/* See request_skip() kernel-doc */ +static resource_size_t __adjust_skip(struct cxl_dev_state *cxlds, + const resource_size_t skip_base, + const resource_size_t skip_len, + const char *requester) +{ + const resource_size_t skip_end = skip_base + skip_len - 1; + + for (int i = 0; i < cxlds->nr_partitions; i++) { + const struct resource *part_res = &cxlds->part[i].res; + resource_size_t adjust_start, adjust_end, size; + + adjust_start = max(skip_base, part_res->start); + adjust_end = min(skip_end, part_res->end); + + if (adjust_end < adjust_start) + continue; + + size = adjust_end - adjust_start + 1; + + if (!requester) + __release_region(&cxlds->dpa_res, adjust_start, size); + else if (!__request_region(&cxlds->dpa_res, adjust_start, size, + requester, 0)) + return adjust_start - skip_base; + } + + return skip_len; +} +#define release_skip(c, b, l) __adjust_skip((c), (b), (l), NULL) /* * Must be called in a context that synchronizes against this decoder's @@ -228,7 +272,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled) skip_start = res->start - cxled->skip; __release_region(&cxlds->dpa_res, res->start, resource_size(res)); if (cxled->skip) - __release_region(&cxlds->dpa_res, skip_start, cxled->skip); + release_skip(cxlds, skip_start, cxled->skip); cxled->skip = 0; cxled->dpa_res = NULL; put_device(&cxled->cxld.dev); @@ -237,9 +281,8 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled) static void cxl_dpa_release(void *cxled) { - down_write(&cxl_dpa_rwsem); + guard(rwsem_write)(&cxl_dpa_rwsem); __cxl_dpa_release(cxled); - up_write(&cxl_dpa_rwsem); } /* @@ -255,6 +298,58 @@ static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled) __cxl_dpa_release(cxled); } +/** + * request_skip() - Track DPA 'skip' in @cxlds->dpa_res resource tree + * @cxlds: CXL.mem device context that parents @cxled + * @cxled: Endpoint decoder establishing new allocation that skips lower DPA + * @skip_base: DPA < start of new DPA allocation (DPAnew) + * @skip_len: @skip_base + @skip_len == DPAnew + * + * DPA 'skip' arises from out-of-sequence DPA allocation events relative + * to free capacity across multiple partitions. It is a wasteful event + * as usable DPA gets thrown away, but if a deployment has, for example, + * a dual RAM+PMEM device, wants to use PMEM, and has unallocated RAM + * DPA, the free RAM DPA must be sacrificed to start allocating PMEM. + * See third "Implementation Note" in CXL 3.1 8.2.4.19.13 "Decoder + * Protection" for more details. + * + * A 'skip' always covers the last allocated DPA in a previous partition + * to the start of the current partition to allocate. Allocations never + * start in the middle of a partition, and allocations are always + * de-allocated in reverse order (see cxl_dpa_free(), or natural devm + * unwind order from forced in-order allocation). + * + * If @cxlds->nr_partitions was guaranteed to be <= 2 then the 'skip' + * would always be contained to a single partition. Given + * @cxlds->nr_partitions may be > 2 it results in cases where the 'skip' + * might span "tail capacity of partition[0], all of partition[1], ..., + * all of partition[N-1]" to support allocating from partition[N]. That + * in turn interacts with the partition 'struct resource' boundaries + * within @cxlds->dpa_res whereby 'skip' requests need to be divided by + * partition. I.e. this is a quirk of using a 'struct resource' tree to + * detect range conflicts while also tracking partition boundaries in + * @cxlds->dpa_res. + */ +static int request_skip(struct cxl_dev_state *cxlds, + struct cxl_endpoint_decoder *cxled, + const resource_size_t skip_base, + const resource_size_t skip_len) +{ + resource_size_t skipped = __adjust_skip(cxlds, skip_base, skip_len, + dev_name(&cxled->cxld.dev)); + + if (skipped == skip_len) + return 0; + + dev_dbg(cxlds->dev, + "%s: failed to reserve skipped space (%pa %pa %pa)\n", + dev_name(&cxled->cxld.dev), &skip_base, &skip_len, &skipped); + + release_skip(cxlds, skip_base, skipped); + + return -EBUSY; +} + static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, resource_size_t base, resource_size_t len, resource_size_t skipped) @@ -264,6 +359,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, struct cxl_dev_state *cxlds = cxlmd->cxlds; struct device *dev = &port->dev; struct resource *res; + int rc; lockdep_assert_held_write(&cxl_dpa_rwsem); @@ -292,14 +388,9 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, } if (skipped) { - res = __request_region(&cxlds->dpa_res, base - skipped, skipped, - dev_name(&cxled->cxld.dev), 0); - if (!res) { - dev_dbg(dev, - "decoder%d.%d: failed to reserve skipped space\n", - port->id, cxled->cxld.id); - return -EBUSY; - } + rc = request_skip(cxlds, cxled, base - skipped, skipped); + if (rc) + return rc; } res = __request_region(&cxlds->dpa_res, base, len, dev_name(&cxled->cxld.dev), 0); @@ -307,28 +398,117 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n", port->id, cxled->cxld.id); if (skipped) - __release_region(&cxlds->dpa_res, base - skipped, - skipped); + release_skip(cxlds, base - skipped, skipped); return -EBUSY; } cxled->dpa_res = res; cxled->skip = skipped; - if (resource_contains(&cxlds->pmem_res, res)) - cxled->mode = CXL_DECODER_PMEM; - else if (resource_contains(&cxlds->ram_res, res)) - cxled->mode = CXL_DECODER_RAM; - else { - dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id, - cxled->cxld.id, cxled->dpa_res); - cxled->mode = CXL_DECODER_MIXED; - } + /* + * When allocating new capacity, ->part is already set, when + * discovering decoder settings at initial enumeration, ->part + * is not set. + */ + if (cxled->part < 0) + for (int i = 0; cxlds->nr_partitions; i++) + if (resource_contains(&cxlds->part[i].res, res)) { + cxled->part = i; + break; + } + + if (cxled->part < 0) + dev_warn(dev, "decoder%d.%d: %pr does not map any partition\n", + port->id, cxled->cxld.id, res); port->hdm_end++; get_device(&cxled->cxld.dev); return 0; } +static int add_dpa_res(struct device *dev, struct resource *parent, + struct resource *res, resource_size_t start, + resource_size_t size, const char *type) +{ + int rc; + + *res = (struct resource) { + .name = type, + .start = start, + .end = start + size - 1, + .flags = IORESOURCE_MEM, + }; + if (resource_size(res) == 0) { + dev_dbg(dev, "DPA(%s): no capacity\n", res->name); + return 0; + } + rc = request_resource(parent, res); + if (rc) { + dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name, + res, rc); + return rc; + } + + dev_dbg(dev, "DPA(%s): %pr\n", res->name, res); + + return 0; +} + +static const char *cxl_mode_name(enum cxl_partition_mode mode) +{ + switch (mode) { + case CXL_PARTMODE_RAM: + return "ram"; + case CXL_PARTMODE_PMEM: + return "pmem"; + default: + return ""; + }; +} + +/* if this fails the caller must destroy @cxlds, there is no recovery */ +int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info) +{ + struct device *dev = cxlds->dev; + + guard(rwsem_write)(&cxl_dpa_rwsem); + + if (cxlds->nr_partitions) + return -EBUSY; + + if (!info->size || !info->nr_partitions) { + cxlds->dpa_res = DEFINE_RES_MEM(0, 0); + cxlds->nr_partitions = 0; + return 0; + } + + cxlds->dpa_res = DEFINE_RES_MEM(0, info->size); + + for (int i = 0; i < info->nr_partitions; i++) { + const struct cxl_dpa_part_info *part = &info->part[i]; + int rc; + + cxlds->part[i].perf.qos_class = CXL_QOS_CLASS_INVALID; + cxlds->part[i].mode = part->mode; + + /* Require ordered + contiguous partitions */ + if (i) { + const struct cxl_dpa_part_info *prev = &info->part[i - 1]; + + if (prev->range.end + 1 != part->range.start) + return -EINVAL; + } + rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->part[i].res, + part->range.start, range_len(&part->range), + cxl_mode_name(part->mode)); + if (rc) + return rc; + cxlds->nr_partitions++; + } + + return 0; +} +EXPORT_SYMBOL_GPL(cxl_dpa_setup); + int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, resource_size_t base, resource_size_t len, resource_size_t skipped) @@ -345,18 +525,15 @@ int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); } -EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL"); resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled) { - resource_size_t size = 0; - - down_read(&cxl_dpa_rwsem); + guard(rwsem_read)(&cxl_dpa_rwsem); if (cxled->dpa_res) - size = resource_size(cxled->dpa_res); - up_read(&cxl_dpa_rwsem); + return resource_size(cxled->dpa_res); - return size; + return 0; } resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled) @@ -374,161 +551,136 @@ int cxl_dpa_free(struct cxl_endpoint_decoder *cxled) { struct cxl_port *port = cxled_to_port(cxled); struct device *dev = &cxled->cxld.dev; - int rc; - down_write(&cxl_dpa_rwsem); - if (!cxled->dpa_res) { - rc = 0; - goto out; - } + guard(rwsem_write)(&cxl_dpa_rwsem); + if (!cxled->dpa_res) + return 0; if (cxled->cxld.region) { dev_dbg(dev, "decoder assigned to: %s\n", dev_name(&cxled->cxld.region->dev)); - rc = -EBUSY; - goto out; + return -EBUSY; } if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { dev_dbg(dev, "decoder enabled\n"); - rc = -EBUSY; - goto out; + return -EBUSY; } if (cxled->cxld.id != port->hdm_end) { dev_dbg(dev, "expected decoder%d.%d\n", port->id, port->hdm_end); - rc = -EBUSY; - goto out; + return -EBUSY; } + devm_cxl_dpa_release(cxled); - rc = 0; -out: - up_write(&cxl_dpa_rwsem); - return rc; + return 0; } -int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled, - enum cxl_decoder_mode mode) +int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled, + enum cxl_partition_mode mode) { struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); struct cxl_dev_state *cxlds = cxlmd->cxlds; struct device *dev = &cxled->cxld.dev; - int rc; + int part; - switch (mode) { - case CXL_DECODER_RAM: - case CXL_DECODER_PMEM: - break; - default: + guard(rwsem_write)(&cxl_dpa_rwsem); + if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) + return -EBUSY; + + for (part = 0; part < cxlds->nr_partitions; part++) + if (cxlds->part[part].mode == mode) + break; + + if (part >= cxlds->nr_partitions) { dev_dbg(dev, "unsupported mode: %d\n", mode); return -EINVAL; } - down_write(&cxl_dpa_rwsem); - if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { - rc = -EBUSY; - goto out; - } - - /* - * Only allow modes that are supported by the current partition - * configuration - */ - if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) { - dev_dbg(dev, "no available pmem capacity\n"); - rc = -ENXIO; - goto out; - } - if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) { - dev_dbg(dev, "no available ram capacity\n"); - rc = -ENXIO; - goto out; + if (!resource_size(&cxlds->part[part].res)) { + dev_dbg(dev, "no available capacity for mode: %d\n", mode); + return -ENXIO; } - cxled->mode = mode; - rc = 0; -out: - up_write(&cxl_dpa_rwsem); - - return rc; + cxled->part = part; + return 0; } -int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size) +static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size) { struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); - resource_size_t free_ram_start, free_pmem_start; - struct cxl_port *port = cxled_to_port(cxled); struct cxl_dev_state *cxlds = cxlmd->cxlds; struct device *dev = &cxled->cxld.dev; - resource_size_t start, avail, skip; + struct resource *res, *prev = NULL; + resource_size_t start, avail, skip, skip_start; struct resource *p, *last; - int rc; + int part; - down_write(&cxl_dpa_rwsem); + guard(rwsem_write)(&cxl_dpa_rwsem); if (cxled->cxld.region) { dev_dbg(dev, "decoder attached to %s\n", dev_name(&cxled->cxld.region->dev)); - rc = -EBUSY; - goto out; + return -EBUSY; } if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { dev_dbg(dev, "decoder enabled\n"); - rc = -EBUSY; - goto out; + return -EBUSY; } - for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling) - last = p; - if (last) - free_ram_start = last->end + 1; - else - free_ram_start = cxlds->ram_res.start; + part = cxled->part; + if (part < 0) { + dev_dbg(dev, "partition not set\n"); + return -EBUSY; + } - for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling) + res = &cxlds->part[part].res; + for (p = res->child, last = NULL; p; p = p->sibling) last = p; if (last) - free_pmem_start = last->end + 1; + start = last->end + 1; else - free_pmem_start = cxlds->pmem_res.start; - - if (cxled->mode == CXL_DECODER_RAM) { - start = free_ram_start; - avail = cxlds->ram_res.end - start + 1; - skip = 0; - } else if (cxled->mode == CXL_DECODER_PMEM) { - resource_size_t skip_start, skip_end; - - start = free_pmem_start; - avail = cxlds->pmem_res.end - start + 1; - skip_start = free_ram_start; + start = res->start; - /* - * If some pmem is already allocated, then that allocation - * already handled the skip. - */ - if (cxlds->pmem_res.child && - skip_start == cxlds->pmem_res.child->start) - skip_end = skip_start - 1; - else - skip_end = start - 1; - skip = skip_end - skip_start + 1; - } else { - dev_dbg(dev, "mode not set\n"); - rc = -EINVAL; - goto out; + /* + * To allocate at partition N, a skip needs to be calculated for all + * unallocated space at lower partitions indices. + * + * If a partition has any allocations, the search can end because a + * previous cxl_dpa_alloc() invocation is assumed to have accounted for + * all previous partitions. + */ + skip_start = CXL_RESOURCE_NONE; + for (int i = part; i; i--) { + prev = &cxlds->part[i - 1].res; + for (p = prev->child, last = NULL; p; p = p->sibling) + last = p; + if (last) { + skip_start = last->end + 1; + break; + } + skip_start = prev->start; } + avail = res->end - start + 1; + if (skip_start == CXL_RESOURCE_NONE) + skip = 0; + else + skip = res->start - skip_start; + if (size > avail) { - dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size, - cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem", - &avail); - rc = -ENOSPC; - goto out; + dev_dbg(dev, "%llu exceeds available %s capacity: %llu\n", size, + res->name, (u64)avail); + return -ENOSPC; } - rc = __cxl_dpa_reserve(cxled, start, size, skip); -out: - up_write(&cxl_dpa_rwsem); + return __cxl_dpa_reserve(cxled, start, size, skip); +} + +int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size) +{ + struct cxl_port *port = cxled_to_port(cxled); + int rc; + rc = __cxl_dpa_alloc(cxled, size); if (rc) return rc; @@ -700,7 +852,44 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld) return 0; } -static int cxl_decoder_reset(struct cxl_decoder *cxld) +static int commit_reap(struct device *dev, void *data) +{ + struct cxl_port *port = to_cxl_port(dev->parent); + struct cxl_decoder *cxld; + + if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev)) + return 0; + + cxld = to_cxl_decoder(dev); + if (port->commit_end == cxld->id && + ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) { + port->commit_end--; + dev_dbg(&port->dev, "reap: %s commit_end: %d\n", + dev_name(&cxld->dev), port->commit_end); + } + + return 0; +} + +void cxl_port_commit_reap(struct cxl_decoder *cxld) +{ + struct cxl_port *port = to_cxl_port(cxld->dev.parent); + + lockdep_assert_held_write(&cxl_region_rwsem); + + /* + * Once the highest committed decoder is disabled, free any other + * decoders that were pinned allocated by out-of-order release. + */ + port->commit_end--; + dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev), + port->commit_end); + device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL, + commit_reap); +} +EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, "CXL"); + +static void cxl_decoder_reset(struct cxl_decoder *cxld) { struct cxl_port *port = to_cxl_port(cxld->dev.parent); struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); @@ -709,14 +898,14 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld) u32 ctrl; if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) - return 0; + return; - if (port->commit_end != id) { + if (port->commit_end == id) + cxl_port_commit_reap(cxld); + else dev_dbg(&port->dev, "%s: out of order reset, expected decoder%d.%d\n", dev_name(&cxld->dev), port->id, port->commit_end); - return -EBUSY; - } down_read(&cxl_dpa_rwsem); ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); @@ -729,7 +918,6 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld) writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); up_read(&cxl_dpa_rwsem); - port->commit_end--; cxld->flags &= ~CXL_DECODER_F_ENABLE; /* Userspace is now responsible for reconfiguring this decoder */ @@ -739,8 +927,6 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld) cxled = to_cxl_endpoint_decoder(&cxld->dev); cxled->state = CXL_DECODER_STATE_MANUAL; } - - return 0; } static int cxl_setup_hdm_decoder_from_dvsec( @@ -888,8 +1074,12 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, } rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl), &cxld->interleave_granularity); - if (rc) + if (rc) { + dev_warn(&port->dev, + "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n", + port->id, cxld->id, ctrl); return rc; + } dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n", port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end, @@ -1023,4 +1213,4 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, return 0; } -EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, "CXL"); diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index f0f54aeccc87..2689e6453c5a 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -4,13 +4,14 @@ #include <linux/debugfs.h> #include <linux/ktime.h> #include <linux/mutex.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <cxlpci.h> #include <cxlmem.h> #include <cxl.h> #include "core.h" #include "trace.h" +#include "mce.h" static bool cxl_raw_allow_all; @@ -56,6 +57,9 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), + CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0), + CXL_CMD(CLEAR_LOG, 0x10, 0, 0), + CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0), CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), @@ -222,7 +226,7 @@ static const char *cxl_mem_opcode_to_name(u16 opcode) /** * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command - * @mds: The driver data for the operation + * @cxl_mbox: CXL mailbox context * @mbox_cmd: initialized command to execute * * Context: Any context. @@ -238,19 +242,19 @@ static const char *cxl_mem_opcode_to_name(u16 opcode) * error. While this distinction can be useful for commands from userspace, the * kernel will only be able to use results when both are successful. */ -int cxl_internal_send_cmd(struct cxl_memdev_state *mds, +int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_mbox_cmd *mbox_cmd) { size_t out_size, min_out; int rc; - if (mbox_cmd->size_in > mds->payload_size || - mbox_cmd->size_out > mds->payload_size) + if (mbox_cmd->size_in > cxl_mbox->payload_size || + mbox_cmd->size_out > cxl_mbox->payload_size) return -E2BIG; out_size = mbox_cmd->size_out; min_out = mbox_cmd->min_out; - rc = mds->mbox_send(mds, mbox_cmd); + rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd); /* * EIO is reserved for a payload size mismatch and mbox_send() * may not return this error. @@ -278,7 +282,7 @@ int cxl_internal_send_cmd(struct cxl_memdev_state *mds, return -EIO; return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, "CXL"); static bool cxl_mem_raw_command_allowed(u16 opcode) { @@ -331,45 +335,54 @@ static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in) return false; break; } + case CXL_MBOX_OP_CLEAR_LOG: { + const uuid_t *uuid = (uuid_t *)payload_in; + + /* + * Restrict the ‘Clear log’ action to only apply to + * Vendor debug logs. + */ + return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID); + } default: break; } return true; } -static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox, - struct cxl_memdev_state *mds, u16 opcode, +static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox_cmd, + struct cxl_mailbox *cxl_mbox, u16 opcode, size_t in_size, size_t out_size, u64 in_payload) { - *mbox = (struct cxl_mbox_cmd) { + *mbox_cmd = (struct cxl_mbox_cmd) { .opcode = opcode, .size_in = in_size, }; if (in_size) { - mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload), - in_size); - if (IS_ERR(mbox->payload_in)) - return PTR_ERR(mbox->payload_in); + mbox_cmd->payload_in = vmemdup_user(u64_to_user_ptr(in_payload), + in_size); + if (IS_ERR(mbox_cmd->payload_in)) + return PTR_ERR(mbox_cmd->payload_in); - if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) { - dev_dbg(mds->cxlds.dev, "%s: input payload not allowed\n", + if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in)) { + dev_dbg(cxl_mbox->host, "%s: input payload not allowed\n", cxl_mem_opcode_to_name(opcode)); - kvfree(mbox->payload_in); + kvfree(mbox_cmd->payload_in); return -EBUSY; } } /* Prepare to handle a full payload for variable sized output */ if (out_size == CXL_VARIABLE_PAYLOAD) - mbox->size_out = mds->payload_size; + mbox_cmd->size_out = cxl_mbox->payload_size; else - mbox->size_out = out_size; + mbox_cmd->size_out = out_size; - if (mbox->size_out) { - mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL); - if (!mbox->payload_out) { - kvfree(mbox->payload_in); + if (mbox_cmd->size_out) { + mbox_cmd->payload_out = kvzalloc(mbox_cmd->size_out, GFP_KERNEL); + if (!mbox_cmd->payload_out) { + kvfree(mbox_cmd->payload_in); return -ENOMEM; } } @@ -384,7 +397,7 @@ static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox) static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd, const struct cxl_send_command *send_cmd, - struct cxl_memdev_state *mds) + struct cxl_mailbox *cxl_mbox) { if (send_cmd->raw.rsvd) return -EINVAL; @@ -394,13 +407,13 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd, * gets passed along without further checking, so it must be * validated here. */ - if (send_cmd->out.size > mds->payload_size) + if (send_cmd->out.size > cxl_mbox->payload_size) return -EINVAL; if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) return -EPERM; - dev_WARN_ONCE(mds->cxlds.dev, true, "raw command path used\n"); + dev_WARN_ONCE(cxl_mbox->host, true, "raw command path used\n"); *mem_cmd = (struct cxl_mem_command) { .info = { @@ -416,7 +429,7 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd, static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, const struct cxl_send_command *send_cmd, - struct cxl_memdev_state *mds) + struct cxl_mailbox *cxl_mbox) { struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id]; const struct cxl_command_info *info = &c->info; @@ -431,11 +444,11 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, return -EINVAL; /* Check that the command is enabled for hardware */ - if (!test_bit(info->id, mds->enabled_cmds)) + if (!test_bit(info->id, cxl_mbox->enabled_cmds)) return -ENOTTY; /* Check that the command is not claimed for exclusive kernel use */ - if (test_bit(info->id, mds->exclusive_cmds)) + if (test_bit(info->id, cxl_mbox->exclusive_cmds)) return -EBUSY; /* Check the input buffer is the expected size */ @@ -464,7 +477,7 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, /** * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd. - * @mds: The driver data for the operation + * @cxl_mbox: CXL mailbox context * @send_cmd: &struct cxl_send_command copied in from userspace. * * Return: @@ -479,7 +492,7 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, * safe to send to the hardware. */ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd, - struct cxl_memdev_state *mds, + struct cxl_mailbox *cxl_mbox, const struct cxl_send_command *send_cmd) { struct cxl_mem_command mem_cmd; @@ -493,29 +506,28 @@ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd, * supports, but output can be arbitrarily large (simply write out as * much data as the hardware provides). */ - if (send_cmd->in.size > mds->payload_size) + if (send_cmd->in.size > cxl_mbox->payload_size) return -EINVAL; /* Sanitize and construct a cxl_mem_command */ if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) - rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, mds); + rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxl_mbox); else - rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, mds); + rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxl_mbox); if (rc) return rc; /* Sanitize and construct a cxl_mbox_cmd */ - return cxl_mbox_cmd_ctor(mbox_cmd, mds, mem_cmd.opcode, + return cxl_mbox_cmd_ctor(mbox_cmd, cxl_mbox, mem_cmd.opcode, mem_cmd.info.size_in, mem_cmd.info.size_out, send_cmd->in.payload); } -int cxl_query_cmd(struct cxl_memdev *cxlmd, +int cxl_query_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_mem_query_commands __user *q) { - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); - struct device *dev = &cxlmd->dev; + struct device *dev = cxl_mbox->host; struct cxl_mem_command *cmd; u32 n_commands; int j = 0; @@ -530,15 +542,15 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd, return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands); /* - * otherwise, return max(n_commands, total commands) cxl_command_info + * otherwise, return min(n_commands, total commands) cxl_command_info * structures. */ cxl_for_each_cmd(cmd) { struct cxl_command_info info = cmd->info; - if (test_bit(info.id, mds->enabled_cmds)) + if (test_bit(info.id, cxl_mbox->enabled_cmds)) info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED; - if (test_bit(info.id, mds->exclusive_cmds)) + if (test_bit(info.id, cxl_mbox->exclusive_cmds)) info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE; if (copy_to_user(&q->commands[j++], &info, sizeof(info))) @@ -553,7 +565,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd, /** * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. - * @mds: The driver data for the operation + * @cxl_mbox: The mailbox context for the operation. * @mbox_cmd: The validated mailbox command. * @out_payload: Pointer to userspace's output payload. * @size_out: (Input) Max payload size to copy out. @@ -574,12 +586,12 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd, * * See cxl_send_cmd(). */ -static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds, +static int handle_mailbox_cmd_from_user(struct cxl_mailbox *cxl_mbox, struct cxl_mbox_cmd *mbox_cmd, u64 out_payload, s32 *size_out, u32 *retval) { - struct device *dev = mds->cxlds.dev; + struct device *dev = cxl_mbox->host; int rc; dev_dbg(dev, @@ -589,7 +601,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds, cxl_mem_opcode_to_name(mbox_cmd->opcode), mbox_cmd->opcode, mbox_cmd->size_in); - rc = mds->mbox_send(mds, mbox_cmd); + rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd); if (rc) goto out; @@ -616,10 +628,9 @@ out: return rc; } -int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) +int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s) { - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); - struct device *dev = &cxlmd->dev; + struct device *dev = cxl_mbox->host; struct cxl_send_command send; struct cxl_mbox_cmd mbox_cmd; int rc; @@ -629,11 +640,11 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) if (copy_from_user(&send, s, sizeof(send))) return -EFAULT; - rc = cxl_validate_cmd_from_user(&mbox_cmd, mds, &send); + rc = cxl_validate_cmd_from_user(&mbox_cmd, cxl_mbox, &send); if (rc) return rc; - rc = handle_mailbox_cmd_from_user(mds, &mbox_cmd, send.out.payload, + rc = handle_mailbox_cmd_from_user(cxl_mbox, &mbox_cmd, send.out.payload, &send.out.size, &send.retval); if (rc) return rc; @@ -647,11 +658,12 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid, u32 *size, u8 *out) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; u32 remaining = *size; u32 offset = 0; while (remaining) { - u32 xfer_size = min_t(u32, remaining, mds->payload_size); + u32 xfer_size = min_t(u32, remaining, cxl_mbox->payload_size); struct cxl_mbox_cmd mbox_cmd; struct cxl_mbox_get_log log; int rc; @@ -670,7 +682,7 @@ static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid, .payload_out = out, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); /* * The output payload length that indicates the number @@ -695,6 +707,35 @@ static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid, return 0; } +static int check_features_opcodes(u16 opcode, int *ro_cmds, int *wr_cmds) +{ + switch (opcode) { + case CXL_MBOX_OP_GET_SUPPORTED_FEATURES: + case CXL_MBOX_OP_GET_FEATURE: + (*ro_cmds)++; + return 1; + case CXL_MBOX_OP_SET_FEATURE: + (*wr_cmds)++; + return 1; + default: + return 0; + } +} + +/* 'Get Supported Features' and 'Get Feature' */ +#define MAX_FEATURES_READ_CMDS 2 +static void set_features_cap(struct cxl_mailbox *cxl_mbox, + int ro_cmds, int wr_cmds) +{ + /* Setting up Features capability while walking the CEL */ + if (ro_cmds == MAX_FEATURES_READ_CMDS) { + if (wr_cmds) + cxl_mbox->feat_cap = CXL_FEATURES_RW; + else + cxl_mbox->feat_cap = CXL_FEATURES_RO; + } +} + /** * cxl_walk_cel() - Walk through the Command Effects Log. * @mds: The driver data for the operation @@ -706,10 +747,11 @@ static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid, */ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_cel_entry *cel_entry; const int cel_entries = size / sizeof(*cel_entry); struct device *dev = mds->cxlds.dev; - int i; + int i, ro_cmds = 0, wr_cmds = 0; cel_entry = (struct cxl_cel_entry *) cel; @@ -719,10 +761,13 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel) int enabled = 0; if (cmd) { - set_bit(cmd->info.id, mds->enabled_cmds); + set_bit(cmd->info.id, cxl_mbox->enabled_cmds); enabled++; } + enabled += check_features_opcodes(opcode, &ro_cmds, + &wr_cmds); + if (cxl_is_poison_command(opcode)) { cxl_set_poison_cmd_enabled(&mds->poison, opcode); enabled++; @@ -736,26 +781,29 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel) dev_dbg(dev, "Opcode 0x%04x %s\n", opcode, enabled ? "enabled" : "unsupported by driver"); } + + set_features_cap(cxl_mbox, ro_cmds, wr_cmds); } static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_get_supported_logs *ret; struct cxl_mbox_cmd mbox_cmd; int rc; - ret = kvmalloc(mds->payload_size, GFP_KERNEL); + ret = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL); if (!ret) return ERR_PTR(-ENOMEM); mbox_cmd = (struct cxl_mbox_cmd) { .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS, - .size_out = mds->payload_size, + .size_out = cxl_mbox->payload_size, .payload_out = ret, /* At least the record number field must be valid */ .min_out = 2, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) { kvfree(ret); return ERR_PTR(rc); @@ -788,6 +836,7 @@ static const uuid_t log_uuid[] = { */ int cxl_enumerate_cmds(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_get_supported_logs *gsl; struct device *dev = mds->cxlds.dev; struct cxl_mem_command *cmd; @@ -826,7 +875,7 @@ int cxl_enumerate_cmds(struct cxl_memdev_state *mds) /* In case CEL was bogus, enable some default commands. */ cxl_for_each_cmd(cmd) if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE) - set_bit(cmd->info.id, mds->enabled_cmds); + set_bit(cmd->info.id, cxl_mbox->enabled_cmds); /* Found the required CEL */ rc = 0; @@ -835,23 +884,60 @@ out: kvfree(gsl); return rc; } -EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, "CXL"); void cxl_event_trace_record(const struct cxl_memdev *cxlmd, enum cxl_event_log_type type, enum cxl_event_type event_type, const uuid_t *uuid, union cxl_event *evt) { - if (event_type == CXL_CPER_EVENT_GEN_MEDIA) - trace_cxl_general_media(cxlmd, type, &evt->gen_media); - else if (event_type == CXL_CPER_EVENT_DRAM) - trace_cxl_dram(cxlmd, type, &evt->dram); - else if (event_type == CXL_CPER_EVENT_MEM_MODULE) + if (event_type == CXL_CPER_EVENT_MEM_MODULE) { trace_cxl_memory_module(cxlmd, type, &evt->mem_module); - else + return; + } + if (event_type == CXL_CPER_EVENT_GENERIC) { trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic); + return; + } + + if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) { + u64 dpa, hpa = ULLONG_MAX, hpa_alias = ULLONG_MAX; + struct cxl_region *cxlr; + + /* + * These trace points are annotated with HPA and region + * translations. Take topology mutation locks and lookup + * { HPA, REGION } from { DPA, MEMDEV } in the event record. + */ + guard(rwsem_read)(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_dpa_rwsem); + + dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK; + cxlr = cxl_dpa_to_region(cxlmd, dpa); + if (cxlr) { + u64 cache_size = cxlr->params.cache_size; + + hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa); + if (cache_size) + hpa_alias = hpa - cache_size; + } + + if (event_type == CXL_CPER_EVENT_GEN_MEDIA) { + if (cxl_store_rec_gen_media((struct cxl_memdev *)cxlmd, evt)) + dev_dbg(&cxlmd->dev, "CXL store rec_gen_media failed\n"); + + trace_cxl_general_media(cxlmd, type, cxlr, hpa, + hpa_alias, &evt->gen_media); + } else if (event_type == CXL_CPER_EVENT_DRAM) { + if (cxl_store_rec_dram((struct cxl_memdev *)cxlmd, evt)) + dev_dbg(&cxlmd->dev, "CXL store rec_dram failed\n"); + + trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias, + &evt->dram); + } + } } -EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL"); static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd, enum cxl_event_log_type type, @@ -874,6 +960,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds, enum cxl_event_log_type log, struct cxl_get_event_payload *get_pl) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_clear_event_payload *payload; u16 total = le16_to_cpu(get_pl->record_count); u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES; @@ -884,8 +971,8 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds, int i; /* Payload size may limit the max handles */ - if (pl_size > mds->payload_size) { - max_handles = (mds->payload_size - sizeof(*payload)) / + if (pl_size > cxl_mbox->payload_size) { + max_handles = (cxl_mbox->payload_size - sizeof(*payload)) / sizeof(__le16); pl_size = struct_size(payload, handles, max_handles); } @@ -919,7 +1006,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds, if (i == max_handles) { payload->nr_recs = i; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) goto free_pl; i = 0; @@ -930,7 +1017,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds, if (i) { payload->nr_recs = i; mbox_cmd.size_in = struct_size(payload, handles, i); - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) goto free_pl; } @@ -943,30 +1030,28 @@ free_pl: static void cxl_mem_get_records_log(struct cxl_memdev_state *mds, enum cxl_event_log_type type) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_memdev *cxlmd = mds->cxlds.cxlmd; struct device *dev = mds->cxlds.dev; struct cxl_get_event_payload *payload; - struct cxl_mbox_cmd mbox_cmd; u8 log_type = type; u16 nr_rec; mutex_lock(&mds->event.log_lock); payload = mds->event.buf; - mbox_cmd = (struct cxl_mbox_cmd) { - .opcode = CXL_MBOX_OP_GET_EVENT_RECORD, - .payload_in = &log_type, - .size_in = sizeof(log_type), - .payload_out = payload, - .min_out = struct_size(payload, records, 0), - }; - do { int rc, i; + struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_EVENT_RECORD, + .payload_in = &log_type, + .size_in = sizeof(log_type), + .payload_out = payload, + .size_out = cxl_mbox->payload_size, + .min_out = struct_size(payload, records, 0), + }; - mbox_cmd.size_out = mds->payload_size; - - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) { dev_err_ratelimited(dev, "Event log '%d': Failed to query event records : %d", @@ -1021,7 +1106,7 @@ void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status) if (status & CXLDEV_EVENT_STATUS_INFO) cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO); } -EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, "CXL"); /** * cxl_mem_get_partition_info - Get partition info @@ -1037,6 +1122,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL); */ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_get_partition_info pi; struct cxl_mbox_cmd mbox_cmd; int rc; @@ -1046,7 +1132,7 @@ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds) .size_out = sizeof(pi), .payload_out = &pi, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) return rc; @@ -1054,10 +1140,6 @@ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds) le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER; mds->active_persistent_bytes = le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER; - mds->next_volatile_bytes = - le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; - mds->next_persistent_bytes = - le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; return 0; } @@ -1073,6 +1155,7 @@ static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds) */ int cxl_dev_state_identify(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ struct cxl_mbox_identify id; struct cxl_mbox_cmd mbox_cmd; @@ -1087,7 +1170,7 @@ int cxl_dev_state_identify(struct cxl_memdev_state *mds) .size_out = sizeof(id), .payload_out = &id, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) return rc; @@ -1111,10 +1194,11 @@ int cxl_dev_state_identify(struct cxl_memdev_state *mds) return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, "CXL"); static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; int rc; u32 sec_out = 0; struct cxl_get_security_output { @@ -1126,14 +1210,13 @@ static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd) .size_out = sizeof(out), }; struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd }; - struct cxl_dev_state *cxlds = &mds->cxlds; if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE) return -EINVAL; - rc = cxl_internal_send_cmd(mds, &sec_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &sec_cmd); if (rc < 0) { - dev_err(cxlds->dev, "Failed to get security state : %d", rc); + dev_err(cxl_mbox->host, "Failed to get security state : %d", rc); return rc; } @@ -1150,9 +1233,9 @@ static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd) sec_out & CXL_PMEM_SEC_STATE_LOCKED) return -EINVAL; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) { - dev_err(cxlds->dev, "Failed to sanitize device : %d", rc); + dev_err(cxl_mbox->host, "Failed to sanitize device : %d", rc); return rc; } @@ -1178,75 +1261,54 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd) { struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); struct cxl_port *endpoint; - int rc; /* synchronize with cxl_mem_probe() and decoder write operations */ - device_lock(&cxlmd->dev); + guard(device)(&cxlmd->dev); endpoint = cxlmd->endpoint; - down_read(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_region_rwsem); /* * Require an endpoint to be safe otherwise the driver can not * be sure that the device is unmapped. */ if (endpoint && cxl_num_decoders_committed(endpoint) == 0) - rc = __cxl_mem_sanitize(mds, cmd); - else - rc = -EBUSY; - up_read(&cxl_region_rwsem); - device_unlock(&cxlmd->dev); + return __cxl_mem_sanitize(mds, cmd); - return rc; + return -EBUSY; } -static int add_dpa_res(struct device *dev, struct resource *parent, - struct resource *res, resource_size_t start, - resource_size_t size, const char *type) +static void add_part(struct cxl_dpa_info *info, u64 start, u64 size, enum cxl_partition_mode mode) { - int rc; + int i = info->nr_partitions; - res->name = type; - res->start = start; - res->end = start + size - 1; - res->flags = IORESOURCE_MEM; - if (resource_size(res) == 0) { - dev_dbg(dev, "DPA(%s): no capacity\n", res->name); - return 0; - } - rc = request_resource(parent, res); - if (rc) { - dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name, - res, rc); - return rc; - } - - dev_dbg(dev, "DPA(%s): %pr\n", res->name, res); + if (size == 0) + return; - return 0; + info->part[i].range = (struct range) { + .start = start, + .end = start + size - 1, + }; + info->part[i].mode = mode; + info->nr_partitions++; } -int cxl_mem_create_range_info(struct cxl_memdev_state *mds) +int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info) { struct cxl_dev_state *cxlds = &mds->cxlds; struct device *dev = cxlds->dev; int rc; if (!cxlds->media_ready) { - cxlds->dpa_res = DEFINE_RES_MEM(0, 0); - cxlds->ram_res = DEFINE_RES_MEM(0, 0); - cxlds->pmem_res = DEFINE_RES_MEM(0, 0); + info->size = 0; return 0; } - cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes); + info->size = mds->total_bytes; if (mds->partition_align_bytes == 0) { - rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0, - mds->volatile_only_bytes, "ram"); - if (rc) - return rc; - return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res, - mds->volatile_only_bytes, - mds->persistent_only_bytes, "pmem"); + add_part(info, 0, mds->volatile_only_bytes, CXL_PARTMODE_RAM); + add_part(info, mds->volatile_only_bytes, + mds->persistent_only_bytes, CXL_PARTMODE_PMEM); + return 0; } rc = cxl_mem_get_partition_info(mds); @@ -1255,18 +1317,56 @@ int cxl_mem_create_range_info(struct cxl_memdev_state *mds) return rc; } - rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0, - mds->active_volatile_bytes, "ram"); - if (rc) - return rc; - return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res, - mds->active_volatile_bytes, - mds->active_persistent_bytes, "pmem"); + add_part(info, 0, mds->active_volatile_bytes, CXL_PARTMODE_RAM); + add_part(info, mds->active_volatile_bytes, mds->active_persistent_bytes, + CXL_PARTMODE_PMEM); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_mem_dpa_fetch, "CXL"); + +int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count) +{ + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + struct cxl_mbox_get_health_info_out hi; + struct cxl_mbox_cmd mbox_cmd; + int rc; + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_HEALTH_INFO, + .size_out = sizeof(hi), + .payload_out = &hi, + }; + + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + if (!rc) + *count = le32_to_cpu(hi.dirty_shutdown_cnt); + + return rc; +} +EXPORT_SYMBOL_NS_GPL(cxl_get_dirty_count, "CXL"); + +int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds) +{ + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + struct cxl_mbox_cmd mbox_cmd; + struct cxl_mbox_set_shutdown_state_in in = { + .state = 1 + }; + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_SET_SHUTDOWN_STATE, + .size_in = sizeof(in), + .payload_in = &in, + }; + + return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); } -EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_arm_dirty_shutdown, "CXL"); int cxl_set_timestamp(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_cmd mbox_cmd; struct cxl_mbox_set_timestamp_in pi; int rc; @@ -1278,7 +1378,7 @@ int cxl_set_timestamp(struct cxl_memdev_state *mds) .payload_in = &pi, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); /* * Command is optional. Devices may have another way of providing * a timestamp, or may return all 0s in timestamp fields. @@ -1289,15 +1389,15 @@ int cxl_set_timestamp(struct cxl_memdev_state *mds) return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, "CXL"); int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, struct cxl_region *cxlr) { struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; struct cxl_mbox_poison_out *po; struct cxl_mbox_poison_in pi; - struct cxl_mbox_cmd mbox_cmd; int nr_records = 0; int rc; @@ -1309,17 +1409,17 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, pi.offset = cpu_to_le64(offset); pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT); - mbox_cmd = (struct cxl_mbox_cmd) { - .opcode = CXL_MBOX_OP_GET_POISON, - .size_in = sizeof(pi), - .payload_in = &pi, - .size_out = mds->payload_size, - .payload_out = po, - .min_out = struct_size(po, record, 0), - }; - do { - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){ + .opcode = CXL_MBOX_OP_GET_POISON, + .size_in = sizeof(pi), + .payload_in = &pi, + .size_out = cxl_mbox->payload_size, + .payload_out = po, + .min_out = struct_size(po, record, 0), + }; + + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) break; @@ -1340,7 +1440,7 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, mutex_unlock(&mds->poison.lock); return rc; } -EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, "CXL"); static void free_poison_buf(void *buf) { @@ -1350,7 +1450,9 @@ static void free_poison_buf(void *buf) /* Get Poison List output buffer is protected by mds->poison.lock */ static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds) { - mds->poison.list_out = kvmalloc(mds->payload_size, GFP_KERNEL); + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + + mds->poison.list_out = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL); if (!mds->poison.list_out) return -ENOMEM; @@ -1374,11 +1476,25 @@ int cxl_poison_state_init(struct cxl_memdev_state *mds) mutex_init(&mds->poison.lock); return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, "CXL"); + +int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host) +{ + if (!cxl_mbox || !host) + return -EINVAL; + + cxl_mbox->host = host; + mutex_init(&cxl_mbox->mbox_mutex); + rcuwait_init(&cxl_mbox->mbox_wait); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, "CXL"); struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) { struct cxl_memdev_state *mds; + int rc; mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL); if (!mds) { @@ -1386,18 +1502,22 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) return ERR_PTR(-ENOMEM); } - mutex_init(&mds->mbox_mutex); mutex_init(&mds->event.log_lock); mds->cxlds.dev = dev; mds->cxlds.reg_map.host = dev; + mds->cxlds.cxl_mbox.host = dev; mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE; mds->cxlds.type = CXL_DEVTYPE_CLASSMEM; - mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID; - mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID; + + rc = devm_cxl_register_mce_notifier(dev, &mds->mce_notifier); + if (rc == -EOPNOTSUPP) + dev_warn(dev, "CXL MCE unsupported\n"); + else if (rc) + return ERR_PTR(rc); return mds; } -EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, "CXL"); void __init cxl_mbox_init(void) { diff --git a/drivers/cxl/core/mce.c b/drivers/cxl/core/mce.c new file mode 100644 index 000000000000..ff8d078c6ca1 --- /dev/null +++ b/drivers/cxl/core/mce.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation. All rights reserved. */ +#include <linux/mm.h> +#include <linux/notifier.h> +#include <linux/set_memory.h> +#include <asm/mce.h> +#include <cxlmem.h> +#include "mce.h" + +static int cxl_handle_mce(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct cxl_memdev_state *mds = container_of(nb, struct cxl_memdev_state, + mce_notifier); + struct cxl_memdev *cxlmd = mds->cxlds.cxlmd; + struct cxl_port *endpoint = cxlmd->endpoint; + struct mce *mce = data; + u64 spa, spa_alias; + unsigned long pfn; + + if (!mce || !mce_usable_address(mce)) + return NOTIFY_DONE; + + if (!endpoint) + return NOTIFY_DONE; + + spa = mce->addr & MCI_ADDR_PHYSADDR; + + pfn = spa >> PAGE_SHIFT; + if (!pfn_valid(pfn)) + return NOTIFY_DONE; + + spa_alias = cxl_port_get_spa_cache_alias(endpoint, spa); + if (spa_alias == ~0ULL) + return NOTIFY_DONE; + + pfn = spa_alias >> PAGE_SHIFT; + + /* + * Take down the aliased memory page. The original memory page flagged + * by the MCE will be taken cared of by the standard MCE handler. + */ + dev_emerg(mds->cxlds.dev, "Offlining aliased SPA address0: %#llx\n", + spa_alias); + if (!memory_failure(pfn, 0)) + set_mce_nospec(pfn); + + return NOTIFY_OK; +} + +static void cxl_unregister_mce_notifier(void *mce_notifier) +{ + mce_unregister_decode_chain(mce_notifier); +} + +int devm_cxl_register_mce_notifier(struct device *dev, + struct notifier_block *mce_notifier) +{ + mce_notifier->notifier_call = cxl_handle_mce; + mce_notifier->priority = MCE_PRIO_UC; + mce_register_decode_chain(mce_notifier); + + return devm_add_action_or_reset(dev, cxl_unregister_mce_notifier, + mce_notifier); +} diff --git a/drivers/cxl/core/mce.h b/drivers/cxl/core/mce.h new file mode 100644 index 000000000000..ace73424eeb6 --- /dev/null +++ b/drivers/cxl/core/mce.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation. All rights reserved. */ +#ifndef _CXL_CORE_MCE_H_ +#define _CXL_CORE_MCE_H_ + +#include <linux/notifier.h> + +#ifdef CONFIG_CXL_MCE +int devm_cxl_register_mce_notifier(struct device *dev, + struct notifier_block *mce_notifer); +#else +static inline int +devm_cxl_register_mce_notifier(struct device *dev, + struct notifier_block *mce_notifier) +{ + return -EOPNOTSUPP; +} +#endif + +#endif diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index d4e259f3a7e9..f88a13adf7fa 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -27,6 +27,7 @@ static void cxl_memdev_release(struct device *dev) struct cxl_memdev *cxlmd = to_cxl_memdev(dev); ida_free(&cxl_memdev_ida, cxlmd->id); + devm_cxl_memdev_edac_release(cxlmd); kfree(cxlmd); } @@ -58,7 +59,7 @@ static ssize_t payload_max_show(struct device *dev, if (!mds) return sysfs_emit(buf, "\n"); - return sysfs_emit(buf, "%zu\n", mds->payload_size); + return sysfs_emit(buf, "%zu\n", cxlds->cxl_mbox.payload_size); } static DEVICE_ATTR_RO(payload_max); @@ -75,12 +76,20 @@ static ssize_t label_storage_size_show(struct device *dev, } static DEVICE_ATTR_RO(label_storage_size); +static resource_size_t cxl_ram_size(struct cxl_dev_state *cxlds) +{ + /* Static RAM is only expected at partition 0. */ + if (cxlds->part[0].mode != CXL_PARTMODE_RAM) + return 0; + return resource_size(&cxlds->part[0].res); +} + static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; - unsigned long long len = resource_size(&cxlds->ram_res); + unsigned long long len = cxl_ram_size(cxlds); return sysfs_emit(buf, "%#llx\n", len); } @@ -93,7 +102,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr, { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; - unsigned long long len = resource_size(&cxlds->pmem_res); + unsigned long long len = cxl_pmem_size(cxlds); return sysfs_emit(buf, "%#llx\n", len); } @@ -124,15 +133,16 @@ static ssize_t security_state_show(struct device *dev, { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); unsigned long state = mds->security.state; int rc = 0; /* sync with latest submission state */ - mutex_lock(&mds->mbox_mutex); + mutex_lock(&cxl_mbox->mbox_mutex); if (mds->security.sanitize_active) rc = sysfs_emit(buf, "sanitize\n"); - mutex_unlock(&mds->mbox_mutex); + mutex_unlock(&cxl_mbox->mbox_mutex); if (rc) return rc; @@ -144,8 +154,8 @@ static ssize_t security_state_show(struct device *dev, return sysfs_emit(buf, "frozen\n"); if (state & CXL_PMEM_SEC_STATE_LOCKED) return sysfs_emit(buf, "locked\n"); - else - return sysfs_emit(buf, "unlocked\n"); + + return sysfs_emit(buf, "unlocked\n"); } static struct device_attribute dev_attr_security_state = __ATTR(state, 0444, security_state_show, NULL); @@ -197,22 +207,17 @@ static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd) int rc = 0; /* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */ - if (resource_size(&cxlds->pmem_res)) { - offset = cxlds->pmem_res.start; - length = resource_size(&cxlds->pmem_res); - rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); - if (rc) - return rc; - } - if (resource_size(&cxlds->ram_res)) { - offset = cxlds->ram_res.start; - length = resource_size(&cxlds->ram_res); + for (int i = 0; i < cxlds->nr_partitions; i++) { + const struct resource *res = &cxlds->part[i].res; + + offset = res->start; + length = resource_size(res); rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); /* * Invalid Physical Address is not an error for * volatile addresses. Device support is optional. */ - if (rc == -EFAULT) + if (rc == -EFAULT && cxlds->part[i].mode == CXL_PARTMODE_RAM) rc = 0; } return rc; @@ -249,51 +254,7 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) return rc; } -EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL); - -struct cxl_dpa_to_region_context { - struct cxl_region *cxlr; - u64 dpa; -}; - -static int __cxl_dpa_to_region(struct device *dev, void *arg) -{ - struct cxl_dpa_to_region_context *ctx = arg; - struct cxl_endpoint_decoder *cxled; - u64 dpa = ctx->dpa; - - if (!is_endpoint_decoder(dev)) - return 0; - - cxled = to_cxl_endpoint_decoder(dev); - if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) - return 0; - - if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) - return 0; - - dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa, - dev_name(&cxled->cxld.region->dev)); - - ctx->cxlr = cxled->cxld.region; - - return 1; -} - -static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa) -{ - struct cxl_dpa_to_region_context ctx; - struct cxl_port *port; - - ctx = (struct cxl_dpa_to_region_context) { - .dpa = dpa, - }; - port = cxlmd->endpoint; - if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port)) - device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); - - return ctx.cxlr; -} +EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, "CXL"); static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) { @@ -321,7 +282,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) { - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; struct cxl_mbox_inject_poison inject; struct cxl_poison_record record; struct cxl_mbox_cmd mbox_cmd; @@ -351,13 +312,13 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) .size_in = sizeof(inject), .payload_in = &inject, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) goto out; cxlr = cxl_dpa_to_region(cxlmd, dpa); if (cxlr) - dev_warn_once(mds->cxlds.dev, + dev_warn_once(cxl_mbox->host, "poison inject dpa:%#llx region: %s\n", dpa, dev_name(&cxlr->dev)); @@ -372,11 +333,11 @@ out: return rc; } -EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL"); int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) { - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; struct cxl_mbox_clear_poison clear; struct cxl_poison_record record; struct cxl_mbox_cmd mbox_cmd; @@ -415,13 +376,13 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) .payload_in = &clear, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) goto out; cxlr = cxl_dpa_to_region(cxlmd, dpa); if (cxlr) - dev_warn_once(mds->cxlds.dev, + dev_warn_once(cxl_mbox->host, "poison clear dpa:%#llx region: %s\n", dpa, dev_name(&cxlr->dev)); @@ -436,7 +397,7 @@ out: return rc; } -EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL"); static struct attribute *cxl_memdev_attributes[] = { &dev_attr_serial.attr, @@ -447,14 +408,21 @@ static struct attribute *cxl_memdev_attributes[] = { NULL, }; +static struct cxl_dpa_perf *to_pmem_perf(struct cxl_dev_state *cxlds) +{ + for (int i = 0; i < cxlds->nr_partitions; i++) + if (cxlds->part[i].mode == CXL_PARTMODE_PMEM) + return &cxlds->part[i].perf; + return NULL; +} + static ssize_t pmem_qos_class_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); - return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class); + return sysfs_emit(buf, "%d\n", to_pmem_perf(cxlds)->qos_class); } static struct device_attribute dev_attr_pmem_qos_class = @@ -466,14 +434,20 @@ static struct attribute *cxl_memdev_pmem_attributes[] = { NULL, }; +static struct cxl_dpa_perf *to_ram_perf(struct cxl_dev_state *cxlds) +{ + if (cxlds->part[0].mode != CXL_PARTMODE_RAM) + return NULL; + return &cxlds->part[0].perf; +} + static ssize_t ram_qos_class_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); - return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class); + return sysfs_emit(buf, "%d\n", to_ram_perf(cxlds)->qos_class); } static struct device_attribute dev_attr_ram_qos_class = @@ -509,11 +483,11 @@ static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = kobj_to_dev(kobj); struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_dpa_perf *perf = to_ram_perf(cxlmd->cxlds); - if (a == &dev_attr_ram_qos_class.attr) - if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID) - return 0; + if (a == &dev_attr_ram_qos_class.attr && + (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID)) + return 0; return a->mode; } @@ -528,11 +502,11 @@ static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n { struct device *dev = kobj_to_dev(kobj); struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_dpa_perf *perf = to_pmem_perf(cxlmd->cxlds); - if (a == &dev_attr_pmem_qos_class.attr) - if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID) - return 0; + if (a == &dev_attr_pmem_qos_class.attr && + (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID)) + return 0; return a->mode; } @@ -580,7 +554,7 @@ void cxl_memdev_update_perf(struct cxl_memdev *cxlmd) sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group); sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group); } -EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, "CXL"); static const struct device_type cxl_memdev_type = { .name = "cxl_memdev", @@ -593,7 +567,7 @@ bool is_cxl_memdev(const struct device *dev) { return dev->type == &cxl_memdev_type; } -EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL); +EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, "CXL"); /** * set_exclusive_cxl_commands() - atomically disable user cxl commands @@ -607,12 +581,13 @@ EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, CXL); void set_exclusive_cxl_commands(struct cxl_memdev_state *mds, unsigned long *cmds) { - down_write(&cxl_memdev_rwsem); - bitmap_or(mds->exclusive_cmds, mds->exclusive_cmds, cmds, - CXL_MEM_COMMAND_ID_MAX); - up_write(&cxl_memdev_rwsem); + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + + guard(rwsem_write)(&cxl_memdev_rwsem); + bitmap_or(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds, + cmds, CXL_MEM_COMMAND_ID_MAX); } -EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL); +EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, "CXL"); /** * clear_exclusive_cxl_commands() - atomically enable user cxl commands @@ -622,20 +597,20 @@ EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL); void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds, unsigned long *cmds) { - down_write(&cxl_memdev_rwsem); - bitmap_andnot(mds->exclusive_cmds, mds->exclusive_cmds, cmds, - CXL_MEM_COMMAND_ID_MAX); - up_write(&cxl_memdev_rwsem); + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + + guard(rwsem_write)(&cxl_memdev_rwsem); + bitmap_andnot(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds, + cmds, CXL_MEM_COMMAND_ID_MAX); } -EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL); +EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, "CXL"); static void cxl_memdev_shutdown(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - down_write(&cxl_memdev_rwsem); + guard(rwsem_write)(&cxl_memdev_rwsem); cxlmd->cxlds = NULL; - up_write(&cxl_memdev_rwsem); } static void cxl_memdev_unregister(void *_cxlmd) @@ -699,11 +674,14 @@ err: static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd, unsigned long arg) { + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + switch (cmd) { case CXL_MEM_QUERY_COMMANDS: - return cxl_query_cmd(cxlmd, (void __user *)arg); + return cxl_query_cmd(cxl_mbox, (void __user *)arg); case CXL_MEM_SEND_COMMAND: - return cxl_send_cmd(cxlmd, (void __user *)arg); + return cxl_send_cmd(cxl_mbox, (void __user *)arg); default: return -ENOTTY; } @@ -714,15 +692,13 @@ static long cxl_memdev_ioctl(struct file *file, unsigned int cmd, { struct cxl_memdev *cxlmd = file->private_data; struct cxl_dev_state *cxlds; - int rc = -ENXIO; - down_read(&cxl_memdev_rwsem); + guard(rwsem_read)(&cxl_memdev_rwsem); cxlds = cxlmd->cxlds; if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM) - rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); - up_read(&cxl_memdev_rwsem); + return __cxl_memdev_ioctl(cxlmd, cmd, arg); - return rc; + return -ENXIO; } static int cxl_memdev_open(struct inode *inode, struct file *file) @@ -758,6 +734,7 @@ static int cxl_memdev_release_file(struct inode *inode, struct file *file) */ static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_get_fw_info info; struct cxl_mbox_cmd mbox_cmd; int rc; @@ -768,7 +745,7 @@ static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds) .payload_out = &info, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) return rc; @@ -792,6 +769,7 @@ static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds) */ static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_activate_fw activate; struct cxl_mbox_cmd mbox_cmd; @@ -808,7 +786,7 @@ static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot) activate.action = CXL_FW_ACTIVATE_OFFLINE; activate.slot = slot; - return cxl_internal_send_cmd(mds, &mbox_cmd); + return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); } /** @@ -823,6 +801,7 @@ static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot) */ static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_transfer_fw *transfer; struct cxl_mbox_cmd mbox_cmd; int rc; @@ -842,7 +821,7 @@ static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds) transfer->action = CXL_FW_TRANSFER_ACTION_ABORT; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); kfree(transfer); return rc; } @@ -873,12 +852,13 @@ static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data, { struct cxl_memdev_state *mds = fwl->dd_handle; struct cxl_mbox_transfer_fw *transfer; + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; if (!size) return FW_UPLOAD_ERR_INVALID_SIZE; mds->fw.oneshot = struct_size(transfer, data, size) < - mds->payload_size; + cxl_mbox->payload_size; if (cxl_mem_get_fw_info(mds)) return FW_UPLOAD_ERR_HW_ERROR; @@ -898,6 +878,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, { struct cxl_memdev_state *mds = fwl->dd_handle; struct cxl_dev_state *cxlds = &mds->cxlds; + struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; struct cxl_memdev *cxlmd = cxlds->cxlmd; struct cxl_mbox_transfer_fw *transfer; struct cxl_mbox_cmd mbox_cmd; @@ -921,7 +902,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, * sizeof(*transfer) is 128. These constraints imply that @cur_size * will always be 128b aligned. */ - cur_size = min_t(size_t, size, mds->payload_size - sizeof(*transfer)); + cur_size = min_t(size_t, size, cxl_mbox->payload_size - sizeof(*transfer)); remaining = size - cur_size; size_in = struct_size(transfer, data, cur_size); @@ -965,7 +946,7 @@ static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, .poll_count = 30, }; - rc = cxl_internal_send_cmd(mds, &mbox_cmd); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) { rc = FW_UPLOAD_ERR_RW_ERROR; goto out_free; @@ -1032,10 +1013,11 @@ static void cxl_remove_fw_upload(void *fwl) int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds) { struct cxl_dev_state *cxlds = &mds->cxlds; + struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; struct device *dev = &cxlds->cxlmd->dev; struct fw_upload *fwl; - if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds)) + if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, cxl_mbox->enabled_cmds)) return 0; fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev), @@ -1044,7 +1026,7 @@ int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds) return PTR_ERR(fwl); return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl); } -EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, "CXL"); static const struct file_operations cxl_memdev_fops = { .owner = THIS_MODULE, @@ -1098,21 +1080,22 @@ err: put_device(dev); return ERR_PTR(rc); } -EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, "CXL"); static void sanitize_teardown_notifier(void *data) { struct cxl_memdev_state *mds = data; + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct kernfs_node *state; /* * Prevent new irq triggered invocations of the workqueue and * flush inflight invocations. */ - mutex_lock(&mds->mbox_mutex); + mutex_lock(&cxl_mbox->mbox_mutex); state = mds->security.sanitize_node; mds->security.sanitize_node = NULL; - mutex_unlock(&mds->mbox_mutex); + mutex_unlock(&cxl_mbox->mbox_mutex); cancel_delayed_work_sync(&mds->security.poll_dwork); sysfs_put(state); @@ -1142,7 +1125,7 @@ int devm_cxl_sanitize_setup_notifier(struct device *host, return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds); } -EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, "CXL"); __init int cxl_memdev_init(void) { diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 0df09bd79408..b50551601c2e 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -101,7 +101,7 @@ int devm_cxl_port_enumerate_dports(struct cxl_port *port) return ctx.error; return ctx.count; } -EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, "CXL"); static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id) { @@ -209,38 +209,7 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds) return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL); - -static int wait_for_valid(struct pci_dev *pdev, int d) -{ - u32 val; - int rc; - - /* - * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high - * and Size Low registers are valid. Must be set within 1 second of - * deassertion of reset to CXL device. Likely it is already set by the - * time this runs, but otherwise give a 1.5 second timeout in case of - * clock skew. - */ - rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); - if (rc) - return rc; - - if (val & CXL_DVSEC_MEM_INFO_VALID) - return 0; - - msleep(1500); - - rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); - if (rc) - return rc; - - if (val & CXL_DVSEC_MEM_INFO_VALID) - return 0; - - return -ETIMEDOUT; -} +EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, "CXL"); static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val) { @@ -283,9 +252,9 @@ static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds) } /* require dvsec ranges to be covered by a locked platform window */ -static int dvsec_range_allowed(struct device *dev, void *arg) +static int dvsec_range_allowed(struct device *dev, const void *arg) { - struct range *dev_range = arg; + const struct range *dev_range = arg; struct cxl_decoder *cxld; if (!is_root_decoder(dev)) @@ -322,11 +291,13 @@ static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm) return devm_add_action_or_reset(host, disable_hdm, cxlhdm); } -int cxl_dvsec_rr_decode(struct device *dev, int d, +int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds, struct cxl_endpoint_dvsec_info *info) { - struct pci_dev *pdev = to_pci_dev(dev); + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + struct device *dev = cxlds->dev; int hdm_count, rc, i, ranges = 0; + int d = cxlds->cxl_dvsec; u16 cap, ctrl; if (!d) { @@ -338,10 +309,6 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, if (rc) return rc; - rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl); - if (rc) - return rc; - if (!(cap & CXL_DVSEC_MEM_CAPABLE)) { dev_dbg(dev, "Not MEM Capable\n"); return -ENXIO; @@ -357,17 +324,15 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, if (!hdm_count || hdm_count > 2) return -EINVAL; - rc = wait_for_valid(pdev, d); - if (rc) { - dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc); - return rc; - } - /* * The current DVSEC values are moot if the memory capability is * disabled, and they will remain moot after the HDM Decoder * capability is enabled. */ + rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl); + if (rc) + return rc; + info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl); if (!info->mem_enabled) return 0; @@ -376,6 +341,10 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, u64 base, size; u32 temp; + rc = cxl_dvsec_mem_range_valid(cxlds, i); + if (rc) + return rc; + rc = pci_read_config_dword( pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp); if (rc) @@ -390,10 +359,6 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK; if (!size) { - info->dvsec_range[i] = (struct range) { - .start = 0, - .end = CXL_RESOURCE_NONE, - }; continue; } @@ -411,19 +376,17 @@ int cxl_dvsec_rr_decode(struct device *dev, int d, base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK; - info->dvsec_range[i] = (struct range) { + info->dvsec_range[ranges++] = (struct range) { .start = base, .end = base + size - 1 }; - - ranges++; } info->ranges = ranges; return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, "CXL"); /** * cxl_hdm_decode_init() - Setup HDM decoding for the endpoint @@ -452,9 +415,40 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, */ if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled)) return devm_cxl_enable_mem(&port->dev, cxlds); - else if (!hdm) + + /* + * If the HDM Decoder Capability does not exist and DVSEC was + * not setup, the DVSEC based emulation cannot be used. + */ + if (!hdm) return -ENODEV; + /* The HDM Decoder Capability exists but is globally disabled. */ + + /* + * If the DVSEC CXL Range registers are not enabled, just + * enable and use the HDM Decoder Capability registers. + */ + if (!info->mem_enabled) { + rc = devm_cxl_enable_hdm(&port->dev, cxlhdm); + if (rc) + return rc; + + return devm_cxl_enable_mem(&port->dev, cxlds); + } + + /* + * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base + * [High,Low] when HDM operation is enabled the range register values + * are ignored by the device, but the spec also recommends matching the + * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges + * are expected even though Linux does not require or maintain that + * match. Check if at least one DVSEC range is enabled and allowed by + * the platform. That is, the DVSEC range must be covered by a locked + * platform window (CFMWS). Fail otherwise as the endpoint's decoders + * cannot be used. + */ + root = to_cxl_port(port->dev.parent); while (!is_cxl_root(root) && is_cxl_port(root->dev.parent)) root = to_cxl_port(root->dev.parent); @@ -463,7 +457,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, return -ENODEV; } - for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) { + for (i = 0, allowed = 0; i < info->ranges; i++) { struct device *cxld_dev; cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i], @@ -477,30 +471,14 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, allowed++; } - if (!allowed && info->mem_enabled) { + if (!allowed) { dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n"); return -ENXIO; } - /* - * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base - * [High,Low] when HDM operation is enabled the range register values - * are ignored by the device, but the spec also recommends matching the - * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges - * are expected even though Linux does not require or maintain that - * match. If at least one DVSEC range is enabled and allowed, skip HDM - * Decoder Capability Enable. - */ - if (info->mem_enabled) - return 0; - - rc = devm_cxl_enable_hdm(&port->dev, cxlhdm); - if (rc) - return rc; - - return devm_cxl_enable_mem(&port->dev, cxlds); + return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, "CXL"); #define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff #define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0 @@ -525,7 +503,7 @@ static int cxl_cdat_get_length(struct device *dev, __le32 response[2]; int rc; - rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL, + rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS, &request, sizeof(request), &response, sizeof(response)); @@ -555,7 +533,7 @@ static int cxl_cdat_read_table(struct device *dev, __le32 request = CDAT_DOE_REQ(entry_handle); int rc; - rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL, + rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS, &request, sizeof(request), rsp, sizeof(*rsp) + remaining); @@ -640,7 +618,7 @@ void read_cdat_data(struct cxl_port *port) if (!pdev) return; - doe_mb = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL, + doe_mb = pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS); if (!doe_mb) { dev_dbg(dev, "No CDAT mailbox\n"); @@ -684,7 +662,7 @@ err: devm_kfree(dev, buf); dev_err(dev, "Failed to read/validate CDAT.\n"); } -EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL); +EXPORT_SYMBOL_NS_GPL(read_cdat_data, "CXL"); static void __cxl_handle_cor_ras(struct cxl_dev_state *cxlds, void __iomem *ras_base) @@ -772,22 +750,20 @@ static bool cxl_handle_endpoint_ras(struct cxl_dev_state *cxlds) static void cxl_dport_map_rch_aer(struct cxl_dport *dport) { - struct cxl_rcrb_info *ri = &dport->rcrb; - void __iomem *dport_aer = NULL; resource_size_t aer_phys; struct device *host; + u16 aer_cap; - if (dport->rch && ri->aer_cap) { + aer_cap = cxl_rcrb_to_aer(dport->dport_dev, dport->rcrb.base); + if (aer_cap) { host = dport->reg_map.host; - aer_phys = ri->aer_cap + ri->base; - dport_aer = devm_cxl_iomap_block(host, aer_phys, - sizeof(struct aer_capability_regs)); + aer_phys = aer_cap + dport->rcrb.base; + dport->regs.dport_aer = devm_cxl_iomap_block(host, aer_phys, + sizeof(struct aer_capability_regs)); } - - dport->regs.dport_aer = dport_aer; } -static void cxl_dport_map_regs(struct cxl_dport *dport) +static void cxl_dport_map_ras(struct cxl_dport *dport) { struct cxl_register_map *map = &dport->reg_map; struct device *dev = dport->dport_dev; @@ -797,22 +773,16 @@ static void cxl_dport_map_regs(struct cxl_dport *dport) else if (cxl_map_component_regs(map, &dport->regs.component, BIT(CXL_CM_CAP_CAP_ID_RAS))) dev_dbg(dev, "Failed to map RAS capability.\n"); - - if (dport->rch) - cxl_dport_map_rch_aer(dport); } static void cxl_disable_rch_root_ints(struct cxl_dport *dport) { void __iomem *aer_base = dport->regs.dport_aer; - struct pci_host_bridge *bridge; u32 aer_cmd_mask, aer_cmd; if (!aer_base) return; - bridge = to_pci_host_bridge(dport->dport_dev); - /* * Disable RCH root port command interrupts. * CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors @@ -821,32 +791,35 @@ static void cxl_disable_rch_root_ints(struct cxl_dport *dport) * the root cmd register's interrupts is required. But, PCI spec * shows these are disabled by default on reset. */ - if (bridge->native_aer) { - aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN | - PCI_ERR_ROOT_CMD_NONFATAL_EN | - PCI_ERR_ROOT_CMD_FATAL_EN); - aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND); - aer_cmd &= ~aer_cmd_mask; - writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND); - } + aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN | + PCI_ERR_ROOT_CMD_NONFATAL_EN | + PCI_ERR_ROOT_CMD_FATAL_EN); + aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND); + aer_cmd &= ~aer_cmd_mask; + writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND); } -void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport) +/** + * cxl_dport_init_ras_reporting - Setup CXL RAS report on this dport + * @dport: the cxl_dport that needs to be initialized + * @host: host device for devm operations + */ +void cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host) { - struct device *dport_dev = dport->dport_dev; - struct pci_host_bridge *host_bridge; + dport->reg_map.host = host; + cxl_dport_map_ras(dport); - host_bridge = to_pci_host_bridge(dport_dev); - if (host_bridge->native_aer) - dport->rcrb.aer_cap = cxl_rcrb_to_aer(dport_dev, dport->rcrb.base); + if (dport->rch) { + struct pci_host_bridge *host_bridge = to_pci_host_bridge(dport->dport_dev); - dport->reg_map.host = host; - cxl_dport_map_regs(dport); + if (!host_bridge->native_aer) + return; - if (dport->rch) + cxl_dport_map_rch_aer(dport); cxl_disable_rch_root_ints(dport); + } } -EXPORT_SYMBOL_NS_GPL(cxl_setup_parent_dport, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_dport_init_ras_reporting, "CXL"); static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds, struct cxl_dport *dport) @@ -913,15 +886,13 @@ static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) struct pci_dev *pdev = to_pci_dev(cxlds->dev); struct aer_capability_regs aer_regs; struct cxl_dport *dport; - struct cxl_port *port; int severity; - port = cxl_pci_find_port(pdev, &dport); + struct cxl_port *port __free(put_cxl_port) = + cxl_pci_find_port(pdev, &dport); if (!port) return; - put_device(&port->dev); - if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs)) return; @@ -959,7 +930,7 @@ void cxl_cor_error_detected(struct pci_dev *pdev) cxl_handle_endpoint_cor_ras(cxlds); } } -EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, "CXL"); pci_ers_result_t cxl_error_detected(struct pci_dev *pdev, pci_channel_state_t state) @@ -1009,7 +980,7 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev, } return PCI_ERS_RESULT_NEED_RESET; } -EXPORT_SYMBOL_NS_GPL(cxl_error_detected, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_error_detected, "CXL"); static int cxl_flit_size(struct pci_dev *pdev) { @@ -1045,3 +1016,156 @@ long cxl_pci_get_latency(struct pci_dev *pdev) return cxl_flit_size(pdev) * MEGA / bw; } + +static int __cxl_endpoint_decoder_reset_detected(struct device *dev, void *data) +{ + struct cxl_port *port = data; + struct cxl_decoder *cxld; + struct cxl_hdm *cxlhdm; + void __iomem *hdm; + u32 ctrl; + + if (!is_endpoint_decoder(dev)) + return 0; + + cxld = to_cxl_decoder(dev); + if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) + return 0; + + cxlhdm = dev_get_drvdata(&port->dev); + hdm = cxlhdm->regs.hdm_decoder; + ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); + + return !FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl); +} + +bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port) +{ + return device_for_each_child(&port->dev, port, + __cxl_endpoint_decoder_reset_detected); +} +EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, "CXL"); + +int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c) +{ + int speed, bw; + u16 lnksta; + u32 width; + + speed = pcie_link_speed_mbps(pdev); + if (speed < 0) + return speed; + speed /= BITS_PER_BYTE; + + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); + width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta); + bw = speed * width; + + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + c[i].read_bandwidth = bw; + c[i].write_bandwidth = bw; + } + + return 0; +} + +/* + * Set max timeout such that platforms will optimize GPF flow to avoid + * the implied worst-case scenario delays. On a sane platform, all + * devices should always complete GPF within the energy budget of + * the GPF flow. The kernel does not have enough information to pick + * anything better than "maximize timeouts and hope it works". + * + * A misbehaving device could block forward progress of GPF for all + * the other devices, exhausting the energy budget of the platform. + * However, the spec seems to assume that moving on from slow to respond + * devices is a virtue. It is not possible to know that, in actuality, + * the slow to respond device is *the* most critical device in the + * system to wait. + */ +#define GPF_TIMEOUT_BASE_MAX 2 +#define GPF_TIMEOUT_SCALE_MAX 7 /* 10 seconds */ + +u16 cxl_gpf_get_dvsec(struct device *dev) +{ + struct pci_dev *pdev; + bool is_port = true; + u16 dvsec; + + if (!dev_is_pci(dev)) + return 0; + + pdev = to_pci_dev(dev); + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT) + is_port = false; + + dvsec = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL, + is_port ? CXL_DVSEC_PORT_GPF : CXL_DVSEC_DEVICE_GPF); + if (!dvsec) + dev_warn(dev, "%s GPF DVSEC not present\n", + is_port ? "Port" : "Device"); + return dvsec; +} +EXPORT_SYMBOL_NS_GPL(cxl_gpf_get_dvsec, "CXL"); + +static int update_gpf_port_dvsec(struct pci_dev *pdev, int dvsec, int phase) +{ + u64 base, scale; + int rc, offset; + u16 ctrl; + + switch (phase) { + case 1: + offset = CXL_DVSEC_PORT_GPF_PHASE_1_CONTROL_OFFSET; + base = CXL_DVSEC_PORT_GPF_PHASE_1_TMO_BASE_MASK; + scale = CXL_DVSEC_PORT_GPF_PHASE_1_TMO_SCALE_MASK; + break; + case 2: + offset = CXL_DVSEC_PORT_GPF_PHASE_2_CONTROL_OFFSET; + base = CXL_DVSEC_PORT_GPF_PHASE_2_TMO_BASE_MASK; + scale = CXL_DVSEC_PORT_GPF_PHASE_2_TMO_SCALE_MASK; + break; + default: + return -EINVAL; + } + + rc = pci_read_config_word(pdev, dvsec + offset, &ctrl); + if (rc) + return rc; + + if (FIELD_GET(base, ctrl) == GPF_TIMEOUT_BASE_MAX && + FIELD_GET(scale, ctrl) == GPF_TIMEOUT_SCALE_MAX) + return 0; + + ctrl = FIELD_PREP(base, GPF_TIMEOUT_BASE_MAX); + ctrl |= FIELD_PREP(scale, GPF_TIMEOUT_SCALE_MAX); + + rc = pci_write_config_word(pdev, dvsec + offset, ctrl); + if (!rc) + pci_dbg(pdev, "Port GPF phase %d timeout: %d0 secs\n", + phase, GPF_TIMEOUT_BASE_MAX); + + return rc; +} + +int cxl_gpf_port_setup(struct cxl_dport *dport) +{ + if (!dport) + return -EINVAL; + + if (!dport->gpf_dvsec) { + struct pci_dev *pdev; + int dvsec; + + dvsec = cxl_gpf_get_dvsec(dport->dport_dev); + if (!dvsec) + return -EINVAL; + + dport->gpf_dvsec = dvsec; + pdev = to_pci_dev(dport->dport_dev); + update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 1); + update_gpf_port_dvsec(pdev, dport->gpf_dvsec, 2); + } + + return 0; +} diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index e69625a8d6a1..8853415c106a 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -49,36 +49,31 @@ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev) return NULL; return container_of(dev, struct cxl_nvdimm_bridge, dev); } -EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, CXL); +EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, "CXL"); -bool is_cxl_nvdimm_bridge(struct device *dev) -{ - return dev->type == &cxl_nvdimm_bridge_type; -} -EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, CXL); - -static int match_nvdimm_bridge(struct device *dev, void *data) -{ - return is_cxl_nvdimm_bridge(dev); -} - -struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd) +/** + * cxl_find_nvdimm_bridge() - find a bridge device relative to a port + * @port: any descendant port of an nvdimm-bridge associated + * root-cxl-port + */ +struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_port *port) { - struct cxl_root *cxl_root __free(put_cxl_root) = - find_cxl_root(cxlmd->endpoint); + struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port); struct device *dev; if (!cxl_root) return NULL; - dev = device_find_child(&cxl_root->port.dev, NULL, match_nvdimm_bridge); + dev = device_find_child(&cxl_root->port.dev, + &cxl_nvdimm_bridge_type, + device_match_type); if (!dev) return NULL; return to_cxl_nvdimm_bridge(dev); } -EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, "CXL"); static struct lock_class_key cxl_nvdimm_bridge_key; @@ -160,7 +155,7 @@ err: put_device(dev); return ERR_PTR(rc); } -EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, "CXL"); static void cxl_nvdimm_release(struct device *dev) { @@ -184,7 +179,7 @@ bool is_cxl_nvdimm(struct device *dev) { return dev->type == &cxl_nvdimm_type; } -EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, CXL); +EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, "CXL"); struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev) { @@ -193,7 +188,7 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev) return NULL; return container_of(dev, struct cxl_nvdimm, dev); } -EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL); +EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, "CXL"); static struct lock_class_key cxl_nvdimm_key; @@ -242,18 +237,20 @@ static void cxlmd_release_nvdimm(void *_cxlmd) /** * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm + * @parent_port: parent port for the (to be added) @cxlmd endpoint port * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations * * Return: 0 on success negative error code on failure. */ -int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd) +int devm_cxl_add_nvdimm(struct cxl_port *parent_port, + struct cxl_memdev *cxlmd) { struct cxl_nvdimm_bridge *cxl_nvb; struct cxl_nvdimm *cxl_nvd; struct device *dev; int rc; - cxl_nvb = cxl_find_nvdimm_bridge(cxlmd); + cxl_nvb = cxl_find_nvdimm_bridge(parent_port); if (!cxl_nvb) return -ENODEV; @@ -287,4 +284,4 @@ err_alloc: return rc; } -EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, "CXL"); diff --git a/drivers/cxl/core/pmu.c b/drivers/cxl/core/pmu.c index 5d8e06b0ba6e..b3136d7664ab 100644 --- a/drivers/cxl/core/pmu.c +++ b/drivers/cxl/core/pmu.c @@ -65,4 +65,4 @@ err: put_device(&pmu->dev); return rc; } -EXPORT_SYMBOL_NS_GPL(devm_cxl_pmu_add, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_pmu_add, "CXL"); diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index 762783bb091a..eb46c6764d20 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -3,7 +3,6 @@ #include <linux/platform_device.h> #include <linux/memregion.h> #include <linux/workqueue.h> -#include <linux/einj-cxl.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/module.h> @@ -11,6 +10,7 @@ #include <linux/slab.h> #include <linux/idr.h> #include <linux/node.h> +#include <cxl/einj.h> #include <cxlmem.h> #include <cxlpci.h> #include <cxl.h> @@ -194,25 +194,35 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + /* without @cxl_dpa_rwsem, make sure @part is not reloaded */ + int part = READ_ONCE(cxled->part); + const char *desc; + + if (part < 0) + desc = "none"; + else + desc = cxlds->part[part].res.name; - return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode)); + return sysfs_emit(buf, "%s\n", desc); } static ssize_t mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); - enum cxl_decoder_mode mode; + enum cxl_partition_mode mode; ssize_t rc; if (sysfs_streq(buf, "pmem")) - mode = CXL_DECODER_PMEM; + mode = CXL_PARTMODE_PMEM; else if (sysfs_streq(buf, "ram")) - mode = CXL_DECODER_RAM; + mode = CXL_PARTMODE_RAM; else return -EINVAL; - rc = cxl_dpa_set_mode(cxled, mode); + rc = cxl_dpa_set_part(cxled, mode); if (rc) return rc; @@ -437,7 +447,7 @@ struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev) return NULL; return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev); } -EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL); +EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, "CXL"); static void cxl_root_decoder_release(struct device *dev) { @@ -471,19 +481,19 @@ bool is_endpoint_decoder(struct device *dev) { return dev->type == &cxl_decoder_endpoint_type; } -EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, CXL); +EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, "CXL"); bool is_root_decoder(struct device *dev) { return dev->type == &cxl_decoder_root_type; } -EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL); +EXPORT_SYMBOL_NS_GPL(is_root_decoder, "CXL"); bool is_switch_decoder(struct device *dev) { return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type; } -EXPORT_SYMBOL_NS_GPL(is_switch_decoder, CXL); +EXPORT_SYMBOL_NS_GPL(is_switch_decoder, "CXL"); struct cxl_decoder *to_cxl_decoder(struct device *dev) { @@ -493,7 +503,7 @@ struct cxl_decoder *to_cxl_decoder(struct device *dev) return NULL; return container_of(dev, struct cxl_decoder, dev); } -EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL); +EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, "CXL"); struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev) { @@ -502,7 +512,7 @@ struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev) return NULL; return container_of(dev, struct cxl_endpoint_decoder, cxld.dev); } -EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL); +EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, "CXL"); struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev) { @@ -511,7 +521,7 @@ struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev) return NULL; return container_of(dev, struct cxl_switch_decoder, cxld.dev); } -EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, CXL); +EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, "CXL"); static void cxl_ep_release(struct cxl_ep *ep) { @@ -549,13 +559,9 @@ static ssize_t decoders_committed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cxl_port *port = to_cxl_port(dev); - int rc; - down_read(&cxl_region_rwsem); - rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port)); - up_read(&cxl_region_rwsem); - - return rc; + guard(rwsem_read)(&cxl_region_rwsem); + return sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port)); } static DEVICE_ATTR_RO(decoders_committed); @@ -585,7 +591,7 @@ bool is_cxl_port(const struct device *dev) { return dev->type == &cxl_port_type; } -EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL); +EXPORT_SYMBOL_NS_GPL(is_cxl_port, "CXL"); struct cxl_port *to_cxl_port(const struct device *dev) { @@ -594,19 +600,21 @@ struct cxl_port *to_cxl_port(const struct device *dev) return NULL; return container_of(dev, struct cxl_port, dev); } -EXPORT_SYMBOL_NS_GPL(to_cxl_port, CXL); +EXPORT_SYMBOL_NS_GPL(to_cxl_port, "CXL"); + +struct cxl_port *parent_port_of(struct cxl_port *port) +{ + if (!port || !port->parent_dport) + return NULL; + return port->parent_dport->port; +} static void unregister_port(void *_port) { struct cxl_port *port = _port; - struct cxl_port *parent; + struct cxl_port *parent = parent_port_of(port); struct device *lock_dev; - if (is_cxl_root(port)) - parent = NULL; - else - parent = to_cxl_port(port->dev.parent); - /* * CXL root port's and the first level of ports are unregistered * under the platform firmware device lock, all other ports are @@ -828,27 +836,20 @@ static void cxl_debugfs_create_dport_dir(struct cxl_dport *dport) &cxl_einj_inject_fops); } -static struct cxl_port *__devm_cxl_add_port(struct device *host, - struct device *uport_dev, - resource_size_t component_reg_phys, - struct cxl_dport *parent_dport) +static int cxl_port_add(struct cxl_port *port, + resource_size_t component_reg_phys, + struct cxl_dport *parent_dport) { - struct cxl_port *port; - struct device *dev; + struct device *dev __free(put_device) = &port->dev; int rc; - port = cxl_port_alloc(uport_dev, parent_dport); - if (IS_ERR(port)) - return port; - - dev = &port->dev; - if (is_cxl_memdev(uport_dev)) { - struct cxl_memdev *cxlmd = to_cxl_memdev(uport_dev); + if (is_cxl_memdev(port->uport_dev)) { + struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; rc = dev_set_name(dev, "endpoint%d", port->id); if (rc) - goto err; + return rc; /* * The endpoint driver already enumerated the component and RAS @@ -861,19 +862,41 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host, } else if (parent_dport) { rc = dev_set_name(dev, "port%d", port->id); if (rc) - goto err; + return rc; rc = cxl_port_setup_regs(port, component_reg_phys); if (rc) - goto err; - } else + return rc; + } else { rc = dev_set_name(dev, "root%d", port->id); - if (rc) - goto err; + if (rc) + return rc; + } rc = device_add(dev); if (rc) - goto err; + return rc; + + /* Inhibit the cleanup function invoked */ + dev = NULL; + return 0; +} + +static struct cxl_port *__devm_cxl_add_port(struct device *host, + struct device *uport_dev, + resource_size_t component_reg_phys, + struct cxl_dport *parent_dport) +{ + struct cxl_port *port; + int rc; + + port = cxl_port_alloc(uport_dev, parent_dport); + if (IS_ERR(port)) + return port; + + rc = cxl_port_add(port, component_reg_phys, parent_dport); + if (rc) + return ERR_PTR(rc); rc = devm_add_action_or_reset(host, unregister_port, port); if (rc) @@ -891,10 +914,6 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host, port->pci_latency = cxl_pci_get_latency(to_pci_dev(uport_dev)); return port; - -err: - put_device(dev); - return ERR_PTR(rc); } /** @@ -931,7 +950,7 @@ struct cxl_port *devm_cxl_add_port(struct device *host, return port; } -EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, "CXL"); struct cxl_root *devm_cxl_add_root(struct device *host, const struct cxl_root_ops *ops) @@ -941,13 +960,13 @@ struct cxl_root *devm_cxl_add_root(struct device *host, port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL); if (IS_ERR(port)) - return (struct cxl_root *)port; + return ERR_CAST(port); cxl_root = to_cxl_root(port); cxl_root->ops = ops; return cxl_root; } -EXPORT_SYMBOL_NS_GPL(devm_cxl_add_root, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_add_root, "CXL"); struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port) { @@ -963,7 +982,7 @@ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port) return xa_load(&cxl_root_buses, (unsigned long)port->uport_dev); } -EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, "CXL"); static void unregister_pci_bus(void *uport_dev) { @@ -984,7 +1003,7 @@ int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev, return rc; return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev); } -EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, "CXL"); static bool dev_is_cxl_root_child(struct device *dev) { @@ -1016,16 +1035,7 @@ struct cxl_root *find_cxl_root(struct cxl_port *port) get_device(&iter->dev); return to_cxl_root(iter); } -EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL); - -void put_cxl_root(struct cxl_root *cxl_root) -{ - if (!cxl_root) - return; - - put_device(&cxl_root->port.dev); -} -EXPORT_SYMBOL_NS_GPL(put_cxl_root, CXL); +EXPORT_SYMBOL_NS_GPL(find_cxl_root, "CXL"); static struct cxl_dport *find_dport(struct cxl_port *port, int id) { @@ -1219,7 +1229,7 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, return dport; } -EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, "CXL"); /** * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port @@ -1253,23 +1263,18 @@ struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, return dport; } -EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, "CXL"); static int add_ep(struct cxl_ep *new) { struct cxl_port *port = new->dport->port; - int rc; - device_lock(&port->dev); - if (port->dead) { - device_unlock(&port->dev); + guard(device)(&port->dev); + if (port->dead) return -ENXIO; - } - rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new, - GFP_KERNEL); - device_unlock(&port->dev); - return rc; + return xa_insert(&port->endpoints, (unsigned long)new->ep, + new, GFP_KERNEL); } /** @@ -1393,14 +1398,14 @@ static void delete_endpoint(void *data) struct cxl_port *endpoint = cxlmd->endpoint; struct device *host = endpoint_host(endpoint); - device_lock(host); - if (host->driver && !endpoint->dead) { - devm_release_action(host, cxl_unlink_parent_dport, endpoint); - devm_release_action(host, cxl_unlink_uport, endpoint); - devm_release_action(host, unregister_port, endpoint); + scoped_guard(device, host) { + if (host->driver && !endpoint->dead) { + devm_release_action(host, cxl_unlink_parent_dport, endpoint); + devm_release_action(host, cxl_unlink_uport, endpoint); + devm_release_action(host, unregister_port, endpoint); + } + cxlmd->endpoint = NULL; } - cxlmd->endpoint = NULL; - device_unlock(host); put_device(&endpoint->dev); put_device(host); } @@ -1415,7 +1420,7 @@ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint) cxlmd->depth = endpoint->depth; return devm_add_action_or_reset(dev, delete_endpoint, cxlmd); } -EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, "CXL"); /* * The natural end of life of a non-root 'cxl_port' is when its parent port goes @@ -1477,12 +1482,11 @@ static void cxl_detach_ep(void *data) .cxlmd = cxlmd, .depth = i, }; - struct device *dev; struct cxl_ep *ep; bool died = false; - dev = bus_find_device(&cxl_bus_type, NULL, &ctx, - port_has_memdev); + struct device *dev __free(put_device) = + bus_find_device(&cxl_bus_type, NULL, &ctx, port_has_memdev); if (!dev) continue; port = to_cxl_port(dev); @@ -1512,7 +1516,6 @@ static void cxl_detach_ep(void *data) dev_name(&port->dev)); delete_switch_port(port); } - put_device(&port->dev); device_unlock(&parent_port->dev); } } @@ -1540,7 +1543,6 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd, struct device *dport_dev) { struct device *dparent = grandparent(dport_dev); - struct cxl_port *port, *parent_port = NULL; struct cxl_dport *dport, *parent_dport; resource_size_t component_reg_phys; int rc; @@ -1556,50 +1558,52 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd, return -ENXIO; } - parent_port = find_cxl_port(dparent, &parent_dport); + struct cxl_port *parent_port __free(put_cxl_port) = + find_cxl_port(dparent, &parent_dport); if (!parent_port) { /* iterate to create this parent_port */ return -EAGAIN; } - device_lock(&parent_port->dev); - if (!parent_port->dev.driver) { - dev_warn(&cxlmd->dev, - "port %s:%s disabled, failed to enumerate CXL.mem\n", - dev_name(&parent_port->dev), dev_name(uport_dev)); - port = ERR_PTR(-ENXIO); - goto out; - } + /* + * Definition with __free() here to keep the sequence of + * dereferencing the device of the port before the parent_port releasing. + */ + struct cxl_port *port __free(put_cxl_port) = NULL; + scoped_guard(device, &parent_port->dev) { + if (!parent_port->dev.driver) { + dev_warn(&cxlmd->dev, + "port %s:%s disabled, failed to enumerate CXL.mem\n", + dev_name(&parent_port->dev), dev_name(uport_dev)); + return -ENXIO; + } + + port = find_cxl_port_at(parent_port, dport_dev, &dport); + if (!port) { + component_reg_phys = find_component_registers(uport_dev); + port = devm_cxl_add_port(&parent_port->dev, uport_dev, + component_reg_phys, parent_dport); + if (IS_ERR(port)) + return PTR_ERR(port); - port = find_cxl_port_at(parent_port, dport_dev, &dport); - if (!port) { - component_reg_phys = find_component_registers(uport_dev); - port = devm_cxl_add_port(&parent_port->dev, uport_dev, - component_reg_phys, parent_dport); - /* retry find to pick up the new dport information */ - if (!IS_ERR(port)) + /* retry find to pick up the new dport information */ port = find_cxl_port_at(parent_port, dport_dev, &dport); + if (!port) + return -ENXIO; + } } -out: - device_unlock(&parent_port->dev); - if (IS_ERR(port)) - rc = PTR_ERR(port); - else { - dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", - dev_name(&port->dev), dev_name(port->uport_dev)); - rc = cxl_add_ep(dport, &cxlmd->dev); - if (rc == -EBUSY) { - /* - * "can't" happen, but this error code means - * something to the caller, so translate it. - */ - rc = -ENXIO; - } - put_device(&port->dev); + dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", + dev_name(&port->dev), dev_name(port->uport_dev)); + rc = cxl_add_ep(dport, &cxlmd->dev); + if (rc == -EBUSY) { + /* + * "can't" happen, but this error code means + * something to the caller, so translate it. + */ + rc = -ENXIO; } - put_device(&parent_port->dev); return rc; } @@ -1630,7 +1634,6 @@ retry: struct device *dport_dev = grandparent(iter); struct device *uport_dev; struct cxl_dport *dport; - struct cxl_port *port; /* * The terminal "grandparent" in PCI is NULL and @platform_bus @@ -1649,7 +1652,8 @@ retry: dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n", dev_name(iter), dev_name(dport_dev), dev_name(uport_dev)); - port = find_cxl_port(dport_dev, &dport); + struct cxl_port *port __free(put_cxl_port) = + find_cxl_port(dport_dev, &dport); if (port) { dev_dbg(&cxlmd->dev, "found already registered port %s:%s\n", @@ -1664,18 +1668,15 @@ retry: * the parent_port lock as the current port may be being * reaped. */ - if (rc && rc != -EBUSY) { - put_device(&port->dev); + if (rc && rc != -EBUSY) return rc; - } + + cxl_gpf_port_setup(dport); /* Any more ports to add between this one and the root? */ - if (!dev_is_cxl_root_child(&port->dev)) { - put_device(&port->dev); + if (!dev_is_cxl_root_child(&port->dev)) continue; - } - put_device(&port->dev); return 0; } @@ -1692,21 +1693,21 @@ retry: return 0; } -EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL); +EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, "CXL"); struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev, struct cxl_dport **dport) { return find_cxl_port(pdev->dev.parent, dport); } -EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, "CXL"); struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, struct cxl_dport **dport) { return find_cxl_port(grandparent(&cxlmd->dev), dport); } -EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, "CXL"); static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, struct cxl_port *port, int *target_map) @@ -1733,21 +1734,6 @@ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, return 0; } -struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos) -{ - struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; - struct cxl_decoder *cxld = &cxlsd->cxld; - int iw; - - iw = cxld->interleave_ways; - if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets, - "misconfigured root decoder\n")) - return NULL; - - return cxlrd->cxlsd.target[pos % iw]; -} -EXPORT_SYMBOL_NS_GPL(cxl_hb_modulo, CXL); - static struct lock_class_key cxl_decoder_key; /** @@ -1807,7 +1793,6 @@ static int cxl_switch_decoder_init(struct cxl_port *port, * cxl_root_decoder_alloc - Allocate a root level decoder * @port: owning CXL root of this decoder * @nr_targets: static number of downstream targets - * @calc_hb: which host bridge covers the n'th position by granularity * * Return: A new cxl decoder to be registered by cxl_decoder_add(). A * 'CXL root' decoder is one that decodes from a top-level / static platform @@ -1815,8 +1800,7 @@ static int cxl_switch_decoder_init(struct cxl_port *port, * topology. */ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, - unsigned int nr_targets, - cxl_calc_hb_fn calc_hb) + unsigned int nr_targets) { struct cxl_root_decoder *cxlrd; struct cxl_switch_decoder *cxlsd; @@ -1838,7 +1822,6 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, return ERR_PTR(rc); } - cxlrd->calc_hb = calc_hb; mutex_init(&cxlrd->range_lock); cxld = &cxlsd->cxld; @@ -1858,7 +1841,7 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, cxlrd->qos_class = CXL_QOS_CLASS_INVALID; return cxlrd; } -EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, "CXL"); /** * cxl_switch_decoder_alloc - Allocate a switch level decoder @@ -1895,7 +1878,7 @@ struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, cxld->dev.type = &cxl_decoder_switch_type; return cxlsd; } -EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, "CXL"); /** * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder @@ -1917,6 +1900,7 @@ struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port) return ERR_PTR(-ENOMEM); cxled->pos = -1; + cxled->part = -1; cxld = &cxled->cxld; rc = cxl_decoder_init(port, cxld); if (rc) { @@ -1927,7 +1911,7 @@ struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port) cxld->dev.type = &cxl_decoder_endpoint_type; return cxled; } -EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, "CXL"); /** * cxl_decoder_add_locked - Add a decoder with targets @@ -1983,7 +1967,7 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map) return device_add(dev); } -EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, "CXL"); /** * cxl_decoder_add - Add a decoder with targets @@ -2001,7 +1985,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL); int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) { struct cxl_port *port; - int rc; if (WARN_ON_ONCE(!cxld)) return -EINVAL; @@ -2011,13 +1994,10 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) port = to_cxl_port(cxld->dev.parent); - device_lock(&port->dev); - rc = cxl_decoder_add_locked(cxld, target_map); - device_unlock(&port->dev); - - return rc; + guard(device)(&port->dev); + return cxl_decoder_add_locked(cxld, target_map); } -EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, "CXL"); static void cxld_unregister(void *dev) { @@ -2035,7 +2015,7 @@ int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld) { return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev); } -EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, "CXL"); /** * __cxl_driver_register - register a driver for the cxl bus @@ -2068,13 +2048,13 @@ int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner, return driver_register(&cxl_drv->drv); } -EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL); +EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, "CXL"); void cxl_driver_unregister(struct cxl_driver *cxl_drv) { driver_unregister(&cxl_drv->drv); } -EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, "CXL"); static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) { @@ -2082,7 +2062,7 @@ static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) cxl_device_id(dev)); } -static int cxl_bus_match(struct device *dev, struct device_driver *drv) +static int cxl_bus_match(struct device *dev, const struct device_driver *drv) { return cxl_device_id(dev) == to_cxl_drv(drv)->id; } @@ -2106,11 +2086,18 @@ static void cxl_bus_remove(struct device *dev) static struct workqueue_struct *cxl_bus_wq; -static void cxl_bus_rescan_queue(struct work_struct *w) +static int cxl_rescan_attach(struct device *dev, void *data) { - int rc = bus_rescan_devices(&cxl_bus_type); + int rc = device_attach(dev); + + dev_vdbg(dev, "rescan: %s\n", rc ? "attach" : "detached"); - pr_debug("CXL bus rescan result: %d\n", rc); + return 0; +} + +static void cxl_bus_rescan_queue(struct work_struct *w) +{ + bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_rescan_attach); } void cxl_bus_rescan(void) @@ -2119,19 +2106,19 @@ void cxl_bus_rescan(void) queue_work(cxl_bus_wq, &rescan_work); } -EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, "CXL"); void cxl_bus_drain(void) { drain_workqueue(cxl_bus_wq); } -EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, "CXL"); bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd) { return queue_work(cxl_bus_wq, &cxlmd->detach_work); } -EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL); +EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, "CXL"); static void add_latency(struct access_coordinate *c, long latency) { @@ -2184,6 +2171,7 @@ static bool parent_port_is_cxl_root(struct cxl_port *port) int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, struct access_coordinate *coord) { + struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); struct access_coordinate c[] = { { .read_bandwidth = UINT_MAX, @@ -2197,6 +2185,7 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, struct cxl_port *iter = port; struct cxl_dport *dport; struct pci_dev *pdev; + struct device *dev; unsigned int bw; bool is_cxl_root; @@ -2204,6 +2193,13 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, return -EINVAL; /* + * Skip calculation for RCD. Expectation is HMAT already covers RCD case + * since RCH does not support hotplug. + */ + if (cxlmd->cxlds->rcd) + return 0; + + /* * Exit the loop when the parent port of the current iter port is cxl * root. The iterative loop starts at the endpoint and gathers the * latency of the CXL link from the current device/port to the connected @@ -2232,8 +2228,12 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, return -EINVAL; cxl_coordinates_combine(c, c, dport->coord); + dev = port->uport_dev->parent; + if (!dev_is_pci(dev)) + return -ENODEV; + /* Get the calculated PCI paths bandwidth */ - pdev = to_pci_dev(port->uport_dev->parent); + pdev = to_pci_dev(dev); bw = pcie_bandwidth_available(pdev, NULL, NULL, NULL); if (bw == 0) return -ENXIO; @@ -2244,7 +2244,27 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, "CXL"); + +int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port, + struct access_coordinate *c) +{ + struct cxl_dport *dport = port->parent_dport; + + /* Check this port is connected to a switch DSP and not an RP */ + if (parent_port_is_cxl_root(to_cxl_port(port->dev.parent))) + return -ENODEV; + + if (!coordinates_valid(dport->coord)) + return -EINVAL; + + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + c[i].read_bandwidth = dport->coord[i].read_bandwidth; + c[i].write_bandwidth = dport->coord[i].write_bandwidth; + } + + return 0; +} /* for user tooling to ensure port disable work has completed */ static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count) @@ -2281,7 +2301,7 @@ struct bus_type cxl_bus_type = { .remove = cxl_bus_remove, .bus_groups = cxl_bus_attribute_groups, }; -EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_bus_type, "CXL"); static struct dentry *cxl_debugfs; @@ -2289,7 +2309,7 @@ struct dentry *cxl_debugfs_create_dir(const char *dir) { return debugfs_create_dir(dir, cxl_debugfs); } -EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, "CXL"); static __init int cxl_core_init(void) { @@ -2321,8 +2341,14 @@ static __init int cxl_core_init(void) if (rc) goto err_region; + rc = cxl_ras_init(); + if (rc) + goto err_ras; + return 0; +err_ras: + cxl_region_exit(); err_region: bus_unregister(&cxl_bus_type); err_bus: @@ -2334,6 +2360,7 @@ err_wq: static void cxl_core_exit(void) { + cxl_ras_exit(); cxl_region_exit(); bus_unregister(&cxl_bus_type); destroy_workqueue(cxl_bus_wq); @@ -2343,5 +2370,6 @@ static void cxl_core_exit(void) subsys_initcall(cxl_core_init); module_exit(cxl_core_exit); +MODULE_DESCRIPTION("CXL: Core Compute Express Link support"); MODULE_LICENSE("GPL v2"); -MODULE_IMPORT_NS(CXL); +MODULE_IMPORT_NS("CXL"); diff --git a/drivers/cxl/core/ras.c b/drivers/cxl/core/ras.c new file mode 100644 index 000000000000..2731ba3a0799 --- /dev/null +++ b/drivers/cxl/core/ras.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2025 AMD Corporation. All rights reserved. */ + +#include <linux/pci.h> +#include <linux/aer.h> +#include <cxl/event.h> +#include <cxlmem.h> +#include "trace.h" + +static void cxl_cper_trace_corr_port_prot_err(struct pci_dev *pdev, + struct cxl_ras_capability_regs ras_cap) +{ + u32 status = ras_cap.cor_status & ~ras_cap.cor_mask; + + trace_cxl_port_aer_correctable_error(&pdev->dev, status); +} + +static void cxl_cper_trace_uncorr_port_prot_err(struct pci_dev *pdev, + struct cxl_ras_capability_regs ras_cap) +{ + u32 status = ras_cap.uncor_status & ~ras_cap.uncor_mask; + u32 fe; + + if (hweight32(status) > 1) + fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK, + ras_cap.cap_control)); + else + fe = status; + + trace_cxl_port_aer_uncorrectable_error(&pdev->dev, status, fe, + ras_cap.header_log); +} + +static void cxl_cper_trace_corr_prot_err(struct cxl_memdev *cxlmd, + struct cxl_ras_capability_regs ras_cap) +{ + u32 status = ras_cap.cor_status & ~ras_cap.cor_mask; + + trace_cxl_aer_correctable_error(cxlmd, status); +} + +static void +cxl_cper_trace_uncorr_prot_err(struct cxl_memdev *cxlmd, + struct cxl_ras_capability_regs ras_cap) +{ + u32 status = ras_cap.uncor_status & ~ras_cap.uncor_mask; + u32 fe; + + if (hweight32(status) > 1) + fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK, + ras_cap.cap_control)); + else + fe = status; + + trace_cxl_aer_uncorrectable_error(cxlmd, status, fe, + ras_cap.header_log); +} + +static int match_memdev_by_parent(struct device *dev, const void *uport) +{ + if (is_cxl_memdev(dev) && dev->parent == uport) + return 1; + return 0; +} + +static void cxl_cper_handle_prot_err(struct cxl_cper_prot_err_work_data *data) +{ + unsigned int devfn = PCI_DEVFN(data->prot_err.agent_addr.device, + data->prot_err.agent_addr.function); + struct pci_dev *pdev __free(pci_dev_put) = + pci_get_domain_bus_and_slot(data->prot_err.agent_addr.segment, + data->prot_err.agent_addr.bus, + devfn); + struct cxl_memdev *cxlmd; + int port_type; + + if (!pdev) + return; + + port_type = pci_pcie_type(pdev); + if (port_type == PCI_EXP_TYPE_ROOT_PORT || + port_type == PCI_EXP_TYPE_DOWNSTREAM || + port_type == PCI_EXP_TYPE_UPSTREAM) { + if (data->severity == AER_CORRECTABLE) + cxl_cper_trace_corr_port_prot_err(pdev, data->ras_cap); + else + cxl_cper_trace_uncorr_port_prot_err(pdev, data->ras_cap); + + return; + } + + guard(device)(&pdev->dev); + if (!pdev->dev.driver) + return; + + struct device *mem_dev __free(put_device) = bus_find_device( + &cxl_bus_type, NULL, pdev, match_memdev_by_parent); + if (!mem_dev) + return; + + cxlmd = to_cxl_memdev(mem_dev); + if (data->severity == AER_CORRECTABLE) + cxl_cper_trace_corr_prot_err(cxlmd, data->ras_cap); + else + cxl_cper_trace_uncorr_prot_err(cxlmd, data->ras_cap); +} + +static void cxl_cper_prot_err_work_fn(struct work_struct *work) +{ + struct cxl_cper_prot_err_work_data wd; + + while (cxl_cper_prot_err_kfifo_get(&wd)) + cxl_cper_handle_prot_err(&wd); +} +static DECLARE_WORK(cxl_cper_prot_err_work, cxl_cper_prot_err_work_fn); + +int cxl_ras_init(void) +{ + return cxl_cper_register_prot_err_work(&cxl_cper_prot_err_work); +} + +void cxl_ras_exit(void) +{ + cxl_cper_unregister_prot_err_work(&cxl_cper_prot_err_work); + cancel_work_sync(&cxl_cper_prot_err_work); +} diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 5c186e0a39b9..6e5e1460068d 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -9,6 +9,7 @@ #include <linux/uuid.h> #include <linux/sort.h> #include <linux/idr.h> +#include <linux/memory-tiers.h> #include <cxlmem.h> #include <cxl.h> #include "core.h" @@ -143,7 +144,7 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, rc = down_read_interruptible(&cxl_region_rwsem); if (rc) return rc; - if (cxlr->mode != CXL_DECODER_PMEM) + if (cxlr->mode != CXL_PARTMODE_PMEM) rc = sysfs_emit(buf, "\n"); else rc = sysfs_emit(buf, "%pUb\n", &p->uuid); @@ -230,30 +231,27 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr) &cxlr->dev, "Bypassing cpu_cache_invalidate_memregion() for testing!\n"); return 0; - } else { - dev_err(&cxlr->dev, - "Failed to synchronize CPU cache state\n"); - return -ENXIO; } + dev_WARN(&cxlr->dev, + "Failed to synchronize CPU cache state\n"); + return -ENXIO; } cpu_cache_invalidate_memregion(IORES_DESC_CXL); return 0; } -static int cxl_region_decode_reset(struct cxl_region *cxlr, int count) +static void cxl_region_decode_reset(struct cxl_region *cxlr, int count) { struct cxl_region_params *p = &cxlr->params; - int i, rc = 0; + int i; /* - * Before region teardown attempt to flush, and if the flush - * fails cancel the region teardown for data consistency - * concerns + * Before region teardown attempt to flush, evict any data cached for + * this region, or scream loudly about missing arch / platform support + * for CXL teardown. */ - rc = cxl_region_invalidate_memregion(cxlr); - if (rc) - return rc; + cxl_region_invalidate_memregion(cxlr); for (i = count - 1; i >= 0; i--) { struct cxl_endpoint_decoder *cxled = p->targets[i]; @@ -276,23 +274,17 @@ static int cxl_region_decode_reset(struct cxl_region *cxlr, int count) cxl_rr = cxl_rr_load(iter, cxlr); cxld = cxl_rr->decoder; if (cxld->reset) - rc = cxld->reset(cxld); - if (rc) - return rc; + cxld->reset(cxld); set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); } endpoint_reset: - rc = cxled->cxld.reset(&cxled->cxld); - if (rc) - return rc; + cxled->cxld.reset(&cxled->cxld); set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); } /* all decoders associated with this region have been torn down */ clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); - - return 0; } static int commit_decoder(struct cxl_decoder *cxld) @@ -408,16 +400,8 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr, * still pending. */ if (p->state == CXL_CONFIG_RESET_PENDING) { - rc = cxl_region_decode_reset(cxlr, p->interleave_ways); - /* - * Revert to committed since there may still be active - * decoders associated with this region, or move forward - * to active to mark the reset successful - */ - if (rc) - p->state = CXL_CONFIG_COMMIT; - else - p->state = CXL_CONFIG_ACTIVE; + cxl_region_decode_reset(cxlr, p->interleave_ways); + p->state = CXL_CONFIG_ACTIVE; } } @@ -456,7 +440,7 @@ static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a, * Support tooling that expects to find a 'uuid' attribute for all * regions regardless of mode. */ - if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM) + if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_PARTMODE_PMEM) return 0444; return a->mode; } @@ -618,8 +602,16 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cxl_region *cxlr = to_cxl_region(dev); + const char *desc; + + if (cxlr->mode == CXL_PARTMODE_RAM) + desc = "ram"; + else if (cxlr->mode == CXL_PARTMODE_PMEM) + desc = "pmem"; + else + desc = ""; - return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode)); + return sysfs_emit(buf, "%s\n", desc); } static DEVICE_ATTR_RO(mode); @@ -645,7 +637,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size) /* ways, granularity and uuid (if PMEM) need to be set before HPA */ if (!p->interleave_ways || !p->interleave_granularity || - (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid))) + (cxlr->mode == CXL_PARTMODE_PMEM && uuid_is_null(&p->uuid))) return -ENXIO; div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder); @@ -793,31 +785,70 @@ out: return rc; } -static int match_free_decoder(struct device *dev, void *data) +static int check_commit_order(struct device *dev, void *data) { + struct cxl_decoder *cxld = to_cxl_decoder(dev); + + /* + * if port->commit_end is not the only free decoder, then out of + * order shutdown has occurred, block further allocations until + * that is resolved + */ + if (((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) + return -EBUSY; + return 0; +} + +static int match_free_decoder(struct device *dev, const void *data) +{ + struct cxl_port *port = to_cxl_port(dev->parent); struct cxl_decoder *cxld; - int *id = data; + int rc; if (!is_switch_decoder(dev)) return 0; cxld = to_cxl_decoder(dev); - /* enforce ordered allocation */ - if (cxld->id != *id) + if (cxld->id != port->commit_end + 1) return 0; - if (!cxld->region) - return 1; + if (cxld->region) { + dev_dbg(dev->parent, + "next decoder to commit (%s) is already reserved (%s)\n", + dev_name(dev), dev_name(&cxld->region->dev)); + return 0; + } - (*id)++; + rc = device_for_each_child_reverse_from(dev->parent, dev, NULL, + check_commit_order); + if (rc) { + dev_dbg(dev->parent, + "unable to allocate %s due to out of order shutdown\n", + dev_name(dev)); + return 0; + } + return 1; +} - return 0; +static bool region_res_match_cxl_range(const struct cxl_region_params *p, + struct range *range) +{ + if (!p->res) + return false; + + /* + * If an extended linear cache region then the CXL range is assumed + * to be fronted by the DRAM range in current known implementation. + * This assumption will be made until a variant implementation exists. + */ + return p->res->start + p->cache_size == range->start && + p->res->end == range->end; } -static int match_auto_decoder(struct device *dev, void *data) +static int match_auto_decoder(struct device *dev, const void *data) { - struct cxl_region_params *p = data; + const struct cxl_region_params *p = data; struct cxl_decoder *cxld; struct range *r; @@ -827,19 +858,31 @@ static int match_auto_decoder(struct device *dev, void *data) cxld = to_cxl_decoder(dev); r = &cxld->hpa_range; - if (p->res && p->res->start == r->start && p->res->end == r->end) + if (region_res_match_cxl_range(p, r)) return 1; return 0; } +/** + * cxl_port_pick_region_decoder() - assign or lookup a decoder for a region + * @port: a port in the ancestry of the endpoint implied by @cxled + * @cxled: endpoint decoder to be, or currently, mapped by @port + * @cxlr: region to establish, or validate, decode @port + * + * In the region creation path cxl_port_pick_region_decoder() is an + * allocator to find a free port. In the region assembly path, it is + * recalling the decoder that platform firmware picked for validation + * purposes. + * + * The result is recorded in a 'struct cxl_region_ref' in @port. + */ static struct cxl_decoder * -cxl_region_find_decoder(struct cxl_port *port, - struct cxl_endpoint_decoder *cxled, - struct cxl_region *cxlr) +cxl_port_pick_region_decoder(struct cxl_port *port, + struct cxl_endpoint_decoder *cxled, + struct cxl_region *cxlr) { struct device *dev; - int id = 0; if (port == cxled_to_port(cxled)) return &cxled->cxld; @@ -848,7 +891,7 @@ cxl_region_find_decoder(struct cxl_port *port, dev = device_find_child(&port->dev, &cxlr->params, match_auto_decoder); else - dev = device_find_child(&port->dev, &id, match_free_decoder); + dev = device_find_child(&port->dev, NULL, match_free_decoder); if (!dev) return NULL; /* @@ -885,7 +928,8 @@ static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter, static struct cxl_region_ref * alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr, - struct cxl_endpoint_decoder *cxled) + struct cxl_endpoint_decoder *cxled, + struct cxl_decoder *cxld) { struct cxl_region_params *p = &cxlr->params; struct cxl_region_ref *cxl_rr, *iter; @@ -899,9 +943,6 @@ alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr, continue; if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { - struct cxl_decoder *cxld; - - cxld = cxl_region_find_decoder(port, cxled, cxlr); if (auto_order_ok(port, iter->region, cxld)) continue; } @@ -983,19 +1024,11 @@ static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr, return 0; } -static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr, - struct cxl_endpoint_decoder *cxled, - struct cxl_region_ref *cxl_rr) +static int cxl_rr_assign_decoder(struct cxl_port *port, struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled, + struct cxl_region_ref *cxl_rr, + struct cxl_decoder *cxld) { - struct cxl_decoder *cxld; - - cxld = cxl_region_find_decoder(port, cxled, cxlr); - if (!cxld) { - dev_dbg(&cxlr->dev, "%s: no decoder available\n", - dev_name(&port->dev)); - return -EBUSY; - } - if (cxld->region) { dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", dev_name(&port->dev), dev_name(&cxld->dev), @@ -1086,7 +1119,16 @@ static int cxl_port_attach_region(struct cxl_port *port, nr_targets_inc = true; } } else { - cxl_rr = alloc_region_ref(port, cxlr, cxled); + struct cxl_decoder *cxld; + + cxld = cxl_port_pick_region_decoder(port, cxled, cxlr); + if (!cxld) { + dev_dbg(&cxlr->dev, "%s: no decoder available\n", + dev_name(&port->dev)); + return -EBUSY; + } + + cxl_rr = alloc_region_ref(port, cxlr, cxled, cxld); if (IS_ERR(cxl_rr)) { dev_dbg(&cxlr->dev, "%s: failed to allocate region reference\n", @@ -1095,12 +1137,32 @@ static int cxl_port_attach_region(struct cxl_port *port, } nr_targets_inc = true; - rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr); + rc = cxl_rr_assign_decoder(port, cxlr, cxled, cxl_rr, cxld); if (rc) goto out_erase; } cxld = cxl_rr->decoder; + /* + * the number of targets should not exceed the target_count + * of the decoder + */ + if (is_switch_decoder(&cxld->dev)) { + struct cxl_switch_decoder *cxlsd; + + cxlsd = to_cxl_switch_decoder(&cxld->dev); + if (cxl_rr->nr_targets > cxlsd->nr_targets) { + dev_dbg(&cxlr->dev, + "%s:%s %s add: %s:%s @ %d overflows targets: %d\n", + dev_name(port->uport_dev), dev_name(&port->dev), + dev_name(&cxld->dev), dev_name(&cxlmd->dev), + dev_name(&cxled->cxld.dev), pos, + cxlsd->nr_targets); + rc = -ENXIO; + goto out_erase; + } + } + rc = cxl_rr_ep_add(cxl_rr, cxled); if (rc) { dev_dbg(&cxlr->dev, @@ -1210,6 +1272,50 @@ static int check_last_peer(struct cxl_endpoint_decoder *cxled, return 0; } +static int check_interleave_cap(struct cxl_decoder *cxld, int iw, int ig) +{ + struct cxl_port *port = to_cxl_port(cxld->dev.parent); + struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); + unsigned int interleave_mask; + u8 eiw; + u16 eig; + int high_pos, low_pos; + + if (!test_bit(iw, &cxlhdm->iw_cap_mask)) + return -ENXIO; + /* + * Per CXL specification r3.1(8.2.4.20.13 Decoder Protection), + * if eiw < 8: + * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + 8 + eiw] + * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0] + * + * when the eiw is 0, all the bits of HPAOFFSET[51: 0] are used, the + * interleave bits are none. + * + * if eiw >= 8: + * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + eiw] / 3 + * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0] + * + * when the eiw is 8, all the bits of HPAOFFSET[51: 0] are used, the + * interleave bits are none. + */ + ways_to_eiw(iw, &eiw); + if (eiw == 0 || eiw == 8) + return 0; + + granularity_to_eig(ig, &eig); + if (eiw > 8) + high_pos = eiw + eig - 1; + else + high_pos = eiw + eig + 7; + low_pos = eig + 8; + interleave_mask = GENMASK(high_pos, low_pos); + if (interleave_mask & ~cxlhdm->interleave_mask) + return -ENXIO; + + return 0; +} + static int cxl_port_setup_targets(struct cxl_port *port, struct cxl_region *cxlr, struct cxl_endpoint_decoder *cxled) @@ -1223,6 +1329,7 @@ static int cxl_port_setup_targets(struct cxl_port *port, struct cxl_region_params *p = &cxlr->params; struct cxl_decoder *cxld = cxl_rr->decoder; struct cxl_switch_decoder *cxlsd; + struct cxl_port *iter = port; u16 eig, peig; u8 eiw, peiw; @@ -1239,16 +1346,26 @@ static int cxl_port_setup_targets(struct cxl_port *port, cxlsd = to_cxl_switch_decoder(&cxld->dev); if (cxl_rr->nr_targets_set) { - int i, distance; + int i, distance = 1; + struct cxl_region_ref *cxl_rr_iter; /* - * Passthrough decoders impose no distance requirements between - * peers + * The "distance" between peer downstream ports represents which + * endpoint positions in the region interleave a given port can + * host. + * + * For example, at the root of a hierarchy the distance is + * always 1 as every index targets a different host-bridge. At + * each subsequent switch level those ports map every Nth region + * position where N is the width of the switch == distance. */ - if (cxl_rr->nr_targets == 1) - distance = 0; - else - distance = p->nr_targets / cxl_rr->nr_targets; + do { + cxl_rr_iter = cxl_rr_load(iter, cxlr); + distance *= cxl_rr_iter->nr_targets; + iter = to_cxl_port(iter->dev.parent); + } while (!is_cxl_root(iter)); + distance *= cxlrd->cxlsd.cxld.interleave_ways; + for (i = 0; i < cxl_rr->nr_targets_set; i++) if (ep->dport == cxlsd->target[i]) { rc = check_last_peer(cxled, ep, cxl_rr, @@ -1340,9 +1457,8 @@ static int cxl_port_setup_targets(struct cxl_port *port, if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { if (cxld->interleave_ways != iw || - cxld->interleave_granularity != ig || - cxld->hpa_range.start != p->res->start || - cxld->hpa_range.end != p->res->end || + (iw > 1 && cxld->interleave_granularity != ig) || + !region_res_match_cxl_range(p, &cxld->hpa_range) || ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) { dev_err(&cxlr->dev, "%s:%s %s expected iw: %d ig: %d %pr\n", @@ -1360,6 +1476,15 @@ static int cxl_port_setup_targets(struct cxl_port *port, return -ENXIO; } } else { + rc = check_interleave_cap(cxld, iw, ig); + if (rc) { + dev_dbg(&cxlr->dev, + "%s:%s iw: %d ig: %d is not supported\n", + dev_name(port->uport_dev), + dev_name(&port->dev), iw, ig); + return rc; + } + cxld->interleave_ways = iw; cxld->interleave_granularity = ig; cxld->hpa_range = (struct range) { @@ -1559,10 +1684,13 @@ static int cxl_region_attach_position(struct cxl_region *cxlr, const struct cxl_dport *dport, int pos) { struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; + struct cxl_decoder *cxld = &cxlsd->cxld; + int iw = cxld->interleave_ways; struct cxl_port *iter; int rc; - if (cxlrd->calc_hb(cxlrd, pos) != dport) { + if (dport != cxlrd->cxlsd.target[pos % iw]) { dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n", dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), dev_name(&cxlrd->cxlsd.cxld.dev)); @@ -1631,17 +1759,12 @@ static int cmp_interleave_pos(const void *a, const void *b) return cxled_a->pos - cxled_b->pos; } -static struct cxl_port *next_port(struct cxl_port *port) -{ - if (!port->parent_dport) - return NULL; - return port->parent_dport->port; -} - -static int match_switch_decoder_by_range(struct device *dev, void *data) +static int match_switch_decoder_by_range(struct device *dev, + const void *data) { struct cxl_switch_decoder *cxlsd; - struct range *r1, *r2 = data; + const struct range *r1, *r2 = data; + if (!is_switch_decoder(dev)) return 0; @@ -1662,7 +1785,7 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range, struct device *dev; int rc = -ENXIO; - parent = next_port(port); + parent = parent_port_of(port); if (!parent) return rc; @@ -1686,6 +1809,13 @@ static int find_pos_and_ways(struct cxl_port *port, struct range *range, } put_device(dev); + if (rc) + dev_err(port->uport_dev, + "failed to find %s:%s in target list of %s\n", + dev_name(&port->dev), + dev_name(port->parent_dport->dport_dev), + dev_name(&cxlsd->cxld.dev)); + return rc; } @@ -1742,7 +1872,7 @@ static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled) */ /* Iterate from endpoint to root_port refining the position */ - for (iter = port; iter; iter = next_port(iter)) { + for (iter = port; iter; iter = parent_port_of(iter)) { if (is_cxl_root(iter)) break; @@ -1791,27 +1921,39 @@ static int cxl_region_attach(struct cxl_region *cxlr, { struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_region_params *p = &cxlr->params; struct cxl_port *ep_port, *root_port; struct cxl_dport *dport; int rc = -ENXIO; - if (cxled->mode != cxlr->mode) { - dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n", - dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode); - return -EINVAL; + rc = check_interleave_cap(&cxled->cxld, p->interleave_ways, + p->interleave_granularity); + if (rc) { + dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n", + dev_name(&cxled->cxld.dev), p->interleave_ways, + p->interleave_granularity); + return rc; } - if (cxled->mode == CXL_DECODER_DEAD) { + if (cxled->part < 0) { dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev)); return -ENODEV; } + if (cxlds->part[cxled->part].mode != cxlr->mode) { + dev_dbg(&cxlr->dev, "%s region mode: %d mismatch\n", + dev_name(&cxled->cxld.dev), cxlr->mode); + return -EINVAL; + } + /* all full of members, or interleave config not established? */ if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) { dev_dbg(&cxlr->dev, "region already active\n"); return -EBUSY; - } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) { + } + + if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) { dev_dbg(&cxlr->dev, "interleave config missing\n"); return -ENXIO; } @@ -1845,13 +1987,13 @@ static int cxl_region_attach(struct cxl_region *cxlr, return -ENXIO; } - if (resource_size(cxled->dpa_res) * p->interleave_ways != + if (resource_size(cxled->dpa_res) * p->interleave_ways + p->cache_size != resource_size(p->res)) { dev_dbg(&cxlr->dev, - "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n", + "%s:%s-size-%#llx * ways-%d + cache-%#llx != region-size-%#llx\n", dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), (u64)resource_size(cxled->dpa_res), p->interleave_ways, - (u64)resource_size(p->res)); + (u64)p->cache_size, (u64)resource_size(p->res)); return -EINVAL; } @@ -1897,6 +2039,7 @@ static int cxl_region_attach(struct cxl_region *cxlr, * then the region is already committed. */ p->state = CXL_CONFIG_COMMIT; + cxl_region_shared_upstream_bandwidth_update(cxlr); return 0; } @@ -1918,6 +2061,7 @@ static int cxl_region_attach(struct cxl_region *cxlr, if (rc) return rc; p->state = CXL_CONFIG_ACTIVE; + cxl_region_shared_upstream_bandwidth_update(cxlr); } cxled->cxld.interleave_ways = p->interleave_ways; @@ -1966,13 +2110,7 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled) get_device(&cxlr->dev); if (p->state > CXL_CONFIG_ACTIVE) { - /* - * TODO: tear down all impacted regions if a device is - * removed out of order - */ - rc = cxl_region_decode_reset(cxlr, p->interleave_ways); - if (rc) - goto out; + cxl_region_decode_reset(cxlr, p->interleave_ways); p->state = CXL_CONFIG_ACTIVE; } @@ -2013,7 +2151,7 @@ out: void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled) { down_write(&cxl_region_rwsem); - cxled->mode = CXL_DECODER_DEAD; + cxled->part = -1; cxl_region_detach(cxled); up_write(&cxl_region_rwsem); } @@ -2035,6 +2173,12 @@ static int attach_target(struct cxl_region *cxlr, rc = cxl_region_attach(cxlr, cxled, pos); up_read(&cxl_dpa_rwsem); up_write(&cxl_region_rwsem); + + if (rc) + dev_warn(cxled->cxld.dev.parent, + "failed to attach %s to %s: %d\n", + dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc); + return rc; } @@ -2210,7 +2354,7 @@ bool is_cxl_region(struct device *dev) { return dev->type == &cxl_region_type; } -EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL); +EXPORT_SYMBOL_NS_GPL(is_cxl_region, "CXL"); static struct cxl_region *to_cxl_region(struct device *dev) { @@ -2227,7 +2371,6 @@ static void unregister_region(void *_cxlr) struct cxl_region_params *p = &cxlr->params; int i; - unregister_memory_notifier(&cxlr->memory_notifier); device_del(&cxlr->dev); /* @@ -2309,9 +2452,6 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb, { struct cxl_region *cxlr = container_of(nb, struct cxl_region, memory_notifier); - struct cxl_region_params *p = &cxlr->params; - struct cxl_endpoint_decoder *cxled = p->targets[0]; - struct cxl_decoder *cxld = &cxled->cxld; struct memory_notify *mnb = arg; int nid = mnb->status_change_nid; int region_nid; @@ -2319,7 +2459,11 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb, if (nid == NUMA_NO_NODE || action != MEM_ONLINE) return NOTIFY_DONE; - region_nid = phys_to_target_node(cxld->hpa_range.start); + /* + * No need to hold cxl_region_rwsem; region parameters are stable + * within the cxl_region driver. + */ + region_nid = phys_to_target_node(cxlr->params.res->start); if (nid != region_nid) return NOTIFY_DONE; @@ -2329,6 +2473,31 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb, return NOTIFY_OK; } +static int cxl_region_calculate_adistance(struct notifier_block *nb, + unsigned long nid, void *data) +{ + struct cxl_region *cxlr = container_of(nb, struct cxl_region, + adist_notifier); + struct access_coordinate *perf; + int *adist = data; + int region_nid; + + /* + * No need to hold cxl_region_rwsem; region parameters are stable + * within the cxl_region driver. + */ + region_nid = phys_to_target_node(cxlr->params.res->start); + if (nid != region_nid) + return NOTIFY_OK; + + perf = &cxlr->coord[ACCESS_COORDINATE_CPU]; + + if (mt_perf_to_adistance(perf, adist)) + return NOTIFY_OK; + + return NOTIFY_STOP; +} + /** * devm_cxl_add_region - Adds a region to a decoder * @cxlrd: root decoder @@ -2344,7 +2513,7 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb, */ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, int id, - enum cxl_decoder_mode mode, + enum cxl_partition_mode mode, enum cxl_decoder_type type) { struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent); @@ -2352,15 +2521,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, struct device *dev; int rc; - switch (mode) { - case CXL_DECODER_RAM: - case CXL_DECODER_PMEM: - break; - default: - dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); - return ERR_PTR(-EINVAL); - } - cxlr = cxl_region_alloc(cxlrd, id); if (IS_ERR(cxlr)) return cxlr; @@ -2376,10 +2536,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, if (rc) goto err; - cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback; - cxlr->memory_notifier.priority = CXL_CALLBACK_PRI; - register_memory_notifier(&cxlr->memory_notifier); - rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr); if (rc) return ERR_PTR(rc); @@ -2411,10 +2567,19 @@ static ssize_t create_ram_region_show(struct device *dev, } static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd, - enum cxl_decoder_mode mode, int id) + enum cxl_partition_mode mode, int id) { int rc; + switch (mode) { + case CXL_PARTMODE_RAM: + case CXL_PARTMODE_PMEM: + break; + default: + dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); + return ERR_PTR(-EINVAL); + } + rc = memregion_alloc(GFP_KERNEL); if (rc < 0) return ERR_PTR(rc); @@ -2427,9 +2592,8 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd, return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM); } -static ssize_t create_pmem_region_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) +static ssize_t create_region_store(struct device *dev, const char *buf, + size_t len, enum cxl_partition_mode mode) { struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); struct cxl_region *cxlr; @@ -2439,31 +2603,26 @@ static ssize_t create_pmem_region_store(struct device *dev, if (rc != 1) return -EINVAL; - cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id); + cxlr = __create_region(cxlrd, mode, id); if (IS_ERR(cxlr)) return PTR_ERR(cxlr); return len; } + +static ssize_t create_pmem_region_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + return create_region_store(dev, buf, len, CXL_PARTMODE_PMEM); +} DEVICE_ATTR_RW(create_pmem_region); static ssize_t create_ram_region_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); - struct cxl_region *cxlr; - int rc, id; - - rc = sscanf(buf, "region%d\n", &id); - if (rc != 1) - return -EINVAL; - - cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id); - if (IS_ERR(cxlr)) - return PTR_ERR(cxlr); - - return len; + return create_region_store(dev, buf, len, CXL_PARTMODE_RAM); } DEVICE_ATTR_RW(create_ram_region); @@ -2548,7 +2707,7 @@ bool is_cxl_pmem_region(struct device *dev) { return dev->type == &cxl_pmem_region_type; } -EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL); +EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, "CXL"); struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev) { @@ -2557,11 +2716,11 @@ struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev) return NULL; return container_of(dev, struct cxl_pmem_region, dev); } -EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL); +EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, "CXL"); struct cxl_poison_context { struct cxl_port *port; - enum cxl_decoder_mode mode; + int part; u64 offset; }; @@ -2569,47 +2728,45 @@ static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd, struct cxl_poison_context *ctx) { struct cxl_dev_state *cxlds = cxlmd->cxlds; + const struct resource *res; + struct resource *p, *last; u64 offset, length; int rc = 0; + if (ctx->part < 0) + return 0; + /* - * Collect poison for the remaining unmapped resources - * after poison is collected by committed endpoints. - * - * Knowing that PMEM must always follow RAM, get poison - * for unmapped resources based on the last decoder's mode: - * ram: scan remains of ram range, then any pmem range - * pmem: scan remains of pmem range + * Collect poison for the remaining unmapped resources after + * poison is collected by committed endpoints decoders. */ - - if (ctx->mode == CXL_DECODER_RAM) { - offset = ctx->offset; - length = resource_size(&cxlds->ram_res) - offset; + for (int i = ctx->part; i < cxlds->nr_partitions; i++) { + res = &cxlds->part[i].res; + for (p = res->child, last = NULL; p; p = p->sibling) + last = p; + if (last) + offset = last->end + 1; + else + offset = res->start; + length = res->end - offset + 1; + if (!length) + break; rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); - if (rc == -EFAULT) - rc = 0; + if (rc == -EFAULT && cxlds->part[i].mode == CXL_PARTMODE_RAM) + continue; if (rc) - return rc; - } - if (ctx->mode == CXL_DECODER_PMEM) { - offset = ctx->offset; - length = resource_size(&cxlds->dpa_res) - offset; - if (!length) - return 0; - } else if (resource_size(&cxlds->pmem_res)) { - offset = cxlds->pmem_res.start; - length = resource_size(&cxlds->pmem_res); - } else { - return 0; + break; } - return cxl_mem_get_poison(cxlmd, offset, length, NULL); + return rc; } static int poison_by_decoder(struct device *dev, void *arg) { struct cxl_poison_context *ctx = arg; struct cxl_endpoint_decoder *cxled; + enum cxl_partition_mode mode; + struct cxl_dev_state *cxlds; struct cxl_memdev *cxlmd; u64 offset, length; int rc = 0; @@ -2618,27 +2775,18 @@ static int poison_by_decoder(struct device *dev, void *arg) return rc; cxled = to_cxl_endpoint_decoder(dev); - if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) - return rc; - - /* - * Regions are only created with single mode decoders: pmem or ram. - * Linux does not support mixed mode decoders. This means that - * reading poison per endpoint decoder adheres to the requirement - * that poison reads of pmem and ram must be separated. - * CXL 3.0 Spec 8.2.9.8.4.1 - */ - if (cxled->mode == CXL_DECODER_MIXED) { - dev_dbg(dev, "poison list read unsupported in mixed mode\n"); + if (!cxled->dpa_res) return rc; - } cxlmd = cxled_to_memdev(cxled); + cxlds = cxlmd->cxlds; + mode = cxlds->part[cxled->part].mode; + if (cxled->skip) { offset = cxled->dpa_res->start - cxled->skip; length = cxled->skip; rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); - if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) + if (rc == -EFAULT && mode == CXL_PARTMODE_RAM) rc = 0; if (rc) return rc; @@ -2647,7 +2795,7 @@ static int poison_by_decoder(struct device *dev, void *arg) offset = cxled->dpa_res->start; length = cxled->dpa_res->end - offset + 1; rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region); - if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) + if (rc == -EFAULT && mode == CXL_PARTMODE_RAM) rc = 0; if (rc) return rc; @@ -2655,7 +2803,7 @@ static int poison_by_decoder(struct device *dev, void *arg) /* Iterate until commit_end is reached */ if (cxled->cxld.id == ctx->port->commit_end) { ctx->offset = cxled->dpa_res->end + 1; - ctx->mode = cxled->mode; + ctx->part = cxled->part; return 1; } @@ -2668,7 +2816,8 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port) int rc = 0; ctx = (struct cxl_poison_context) { - .port = port + .port = port, + .part = -1, }; rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder); @@ -2679,28 +2828,167 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port) return rc; } +struct cxl_dpa_to_region_context { + struct cxl_region *cxlr; + u64 dpa; +}; + +static int __cxl_dpa_to_region(struct device *dev, void *arg) +{ + struct cxl_dpa_to_region_context *ctx = arg; + struct cxl_endpoint_decoder *cxled; + struct cxl_region *cxlr; + u64 dpa = ctx->dpa; + + if (!is_endpoint_decoder(dev)) + return 0; + + cxled = to_cxl_endpoint_decoder(dev); + if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res)) + return 0; + + if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) + return 0; + + /* + * Stop the region search (return 1) when an endpoint mapping is + * found. The region may not be fully constructed so offering + * the cxlr in the context structure is not guaranteed. + */ + cxlr = cxled->cxld.region; + if (cxlr) + dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa, + dev_name(&cxlr->dev)); + else + dev_dbg(dev, "dpa:0x%llx mapped in endpoint:%s\n", dpa, + dev_name(dev)); + + ctx->cxlr = cxlr; + + return 1; +} + +struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa) +{ + struct cxl_dpa_to_region_context ctx; + struct cxl_port *port; + + ctx = (struct cxl_dpa_to_region_context) { + .dpa = dpa, + }; + port = cxlmd->endpoint; + if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port)) + device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); + + return ctx.cxlr; +} + +static bool cxl_is_hpa_in_chunk(u64 hpa, struct cxl_region *cxlr, int pos) +{ + struct cxl_region_params *p = &cxlr->params; + int gran = p->interleave_granularity; + int ways = p->interleave_ways; + u64 offset; + + /* Is the hpa in an expected chunk for its pos(-ition) */ + offset = hpa - p->res->start; + offset = do_div(offset, gran * ways); + if ((offset >= pos * gran) && (offset < (pos + 1) * gran)) + return true; + + dev_dbg(&cxlr->dev, + "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa); + + return false; +} + +u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, + u64 dpa) +{ + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); + u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa; + struct cxl_region_params *p = &cxlr->params; + struct cxl_endpoint_decoder *cxled = NULL; + u16 eig = 0; + u8 eiw = 0; + int pos; + + for (int i = 0; i < p->nr_targets; i++) { + cxled = p->targets[i]; + if (cxlmd == cxled_to_memdev(cxled)) + break; + } + if (!cxled || cxlmd != cxled_to_memdev(cxled)) + return ULLONG_MAX; + + pos = cxled->pos; + ways_to_eiw(p->interleave_ways, &eiw); + granularity_to_eig(p->interleave_granularity, &eig); + + /* + * The device position in the region interleave set was removed + * from the offset at HPA->DPA translation. To reconstruct the + * HPA, place the 'pos' in the offset. + * + * The placement of 'pos' in the HPA is determined by interleave + * ways and granularity and is defined in the CXL Spec 3.0 Section + * 8.2.4.19.13 Implementation Note: Device Decode Logic + */ + + /* Remove the dpa base */ + dpa_offset = dpa - cxl_dpa_resource_start(cxled); + + mask_upper = GENMASK_ULL(51, eig + 8); + + if (eiw < 8) { + hpa_offset = (dpa_offset & mask_upper) << eiw; + hpa_offset |= pos << (eig + 8); + } else { + bits_upper = (dpa_offset & mask_upper) >> (eig + 8); + bits_upper = bits_upper * 3; + hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8); + } + + /* The lower bits remain unchanged */ + hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0); + + /* Apply the hpa_offset to the region base address */ + hpa = hpa_offset + p->res->start + p->cache_size; + + /* Root decoder translation overrides typical modulo decode */ + if (cxlrd->hpa_to_spa) + hpa = cxlrd->hpa_to_spa(cxlrd, hpa); + + if (hpa < p->res->start || hpa > p->res->end) { + dev_dbg(&cxlr->dev, + "Addr trans fail: hpa 0x%llx not in region\n", hpa); + return ULLONG_MAX; + } + + /* Simple chunk check, by pos & gran, only applies to modulo decodes */ + if (!cxlrd->hpa_to_spa && (!cxl_is_hpa_in_chunk(hpa, cxlr, pos))) + return ULLONG_MAX; + + return hpa; +} + static struct lock_class_key cxl_pmem_region_key; -static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) +static int cxl_pmem_region_alloc(struct cxl_region *cxlr) { struct cxl_region_params *p = &cxlr->params; struct cxl_nvdimm_bridge *cxl_nvb; - struct cxl_pmem_region *cxlr_pmem; struct device *dev; int i; - down_read(&cxl_region_rwsem); - if (p->state != CXL_CONFIG_COMMIT) { - cxlr_pmem = ERR_PTR(-ENXIO); - goto out; - } + guard(rwsem_read)(&cxl_region_rwsem); + if (p->state != CXL_CONFIG_COMMIT) + return -ENXIO; - cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), - GFP_KERNEL); - if (!cxlr_pmem) { - cxlr_pmem = ERR_PTR(-ENOMEM); - goto out; - } + struct cxl_pmem_region *cxlr_pmem __free(kfree) = + kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), GFP_KERNEL); + if (!cxlr_pmem) + return -ENOMEM; cxlr_pmem->hpa_range.start = p->res->start; cxlr_pmem->hpa_range.end = p->res->end; @@ -2717,11 +3005,9 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) * bridge for one device is the same for all. */ if (i == 0) { - cxl_nvb = cxl_find_nvdimm_bridge(cxlmd); - if (!cxl_nvb) { - cxlr_pmem = ERR_PTR(-ENODEV); - goto out; - } + cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint); + if (!cxl_nvb) + return -ENODEV; cxlr->cxl_nvb = cxl_nvb; } m->cxlmd = cxlmd; @@ -2732,18 +3018,16 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) } dev = &cxlr_pmem->dev; - cxlr_pmem->cxlr = cxlr; - cxlr->cxlr_pmem = cxlr_pmem; device_initialize(dev); lockdep_set_class(&dev->mutex, &cxl_pmem_region_key); device_set_pm_not_required(dev); dev->parent = &cxlr->dev; dev->bus = &cxl_bus_type; dev->type = &cxl_pmem_region_type; -out: - up_read(&cxl_region_rwsem); + cxlr_pmem->cxlr = cxlr; + cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem); - return cxlr_pmem; + return 0; } static void cxl_dax_region_release(struct device *dev) @@ -2776,7 +3060,7 @@ struct cxl_dax_region *to_cxl_dax_region(struct device *dev) return NULL; return container_of(dev, struct cxl_dax_region, dev); } -EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, CXL); +EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, "CXL"); static struct lock_class_key cxl_dax_region_key; @@ -2786,17 +3070,13 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr) struct cxl_dax_region *cxlr_dax; struct device *dev; - down_read(&cxl_region_rwsem); - if (p->state != CXL_CONFIG_COMMIT) { - cxlr_dax = ERR_PTR(-ENXIO); - goto out; - } + guard(rwsem_read)(&cxl_region_rwsem); + if (p->state != CXL_CONFIG_COMMIT) + return ERR_PTR(-ENXIO); cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL); - if (!cxlr_dax) { - cxlr_dax = ERR_PTR(-ENOMEM); - goto out; - } + if (!cxlr_dax) + return ERR_PTR(-ENOMEM); cxlr_dax->hpa_range.start = p->res->start; cxlr_dax->hpa_range.end = p->res->end; @@ -2809,8 +3089,6 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr) dev->parent = &cxlr->dev; dev->bus = &cxl_bus_type; dev->type = &cxl_dax_region_type; -out: - up_read(&cxl_region_rwsem); return cxlr_dax; } @@ -2838,11 +3116,11 @@ static void cxlr_release_nvdimm(void *_cxlr) struct cxl_region *cxlr = _cxlr; struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; - device_lock(&cxl_nvb->dev); - if (cxlr->cxlr_pmem) - devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister, - cxlr->cxlr_pmem); - device_unlock(&cxl_nvb->dev); + scoped_guard(device, &cxl_nvb->dev) { + if (cxlr->cxlr_pmem) + devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister, + cxlr->cxlr_pmem); + } cxlr->cxl_nvb = NULL; put_device(&cxl_nvb->dev); } @@ -2860,9 +3138,10 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr) struct device *dev; int rc; - cxlr_pmem = cxl_pmem_region_alloc(cxlr); - if (IS_ERR(cxlr_pmem)) - return PTR_ERR(cxlr_pmem); + rc = cxl_pmem_region_alloc(cxlr); + if (rc) + return rc; + cxlr_pmem = cxlr->cxlr_pmem; cxl_nvb = cxlr->cxl_nvb; dev = &cxlr_pmem->dev; @@ -2877,13 +3156,14 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr) dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), dev_name(dev)); - device_lock(&cxl_nvb->dev); - if (cxl_nvb->dev.driver) - rc = devm_add_action_or_reset(&cxl_nvb->dev, - cxlr_pmem_unregister, cxlr_pmem); - else - rc = -ENXIO; - device_unlock(&cxl_nvb->dev); + scoped_guard(device, &cxl_nvb->dev) { + if (cxl_nvb->dev.driver) + rc = devm_add_action_or_reset(&cxl_nvb->dev, + cxlr_pmem_unregister, + cxlr_pmem); + else + rc = -ENXIO; + } if (rc) goto err_bridge; @@ -2935,25 +3215,54 @@ err: return rc; } -static int match_root_decoder_by_range(struct device *dev, void *data) +static int match_decoder_by_range(struct device *dev, const void *data) { - struct range *r1, *r2 = data; - struct cxl_root_decoder *cxlrd; + const struct range *r1, *r2 = data; + struct cxl_decoder *cxld; - if (!is_root_decoder(dev)) + if (!is_switch_decoder(dev)) return 0; - cxlrd = to_cxl_root_decoder(dev); - r1 = &cxlrd->cxlsd.cxld.hpa_range; + cxld = to_cxl_decoder(dev); + r1 = &cxld->hpa_range; return range_contains(r1, r2); } -static int match_region_by_range(struct device *dev, void *data) +static struct cxl_decoder * +cxl_port_find_switch_decoder(struct cxl_port *port, struct range *hpa) +{ + struct device *cxld_dev = device_find_child(&port->dev, hpa, + match_decoder_by_range); + + return cxld_dev ? to_cxl_decoder(cxld_dev) : NULL; +} + +static struct cxl_root_decoder * +cxl_find_root_decoder(struct cxl_endpoint_decoder *cxled) +{ + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_port *port = cxled_to_port(cxled); + struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port); + struct cxl_decoder *root, *cxld = &cxled->cxld; + struct range *hpa = &cxld->hpa_range; + + root = cxl_port_find_switch_decoder(&cxl_root->port, hpa); + if (!root) { + dev_err(cxlmd->dev.parent, + "%s:%s no CXL window for range %#llx:%#llx\n", + dev_name(&cxlmd->dev), dev_name(&cxld->dev), + cxld->hpa_range.start, cxld->hpa_range.end); + return NULL; + } + + return to_cxl_root_decoder(&root->dev); +} + +static int match_region_by_range(struct device *dev, const void *data) { struct cxl_region_params *p; struct cxl_region *cxlr; - struct range *r = data; - int rc = 0; + const struct range *r = data; if (!is_cxl_region(dev)) return 0; @@ -2961,60 +3270,96 @@ static int match_region_by_range(struct device *dev, void *data) cxlr = to_cxl_region(dev); p = &cxlr->params; - down_read(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_region_rwsem); if (p->res && p->res->start == r->start && p->res->end == r->end) - rc = 1; - up_read(&cxl_region_rwsem); + return 1; - return rc; + return 0; } -/* Establish an empty region covering the given HPA range */ -static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd, - struct cxl_endpoint_decoder *cxled) +static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr, + struct resource *res) +{ + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); + struct cxl_region_params *p = &cxlr->params; + int nid = phys_to_target_node(res->start); + resource_size_t size = resource_size(res); + resource_size_t cache_size, start; + int rc; + + rc = cxl_acpi_get_extended_linear_cache_size(res, nid, &cache_size); + if (rc) + return rc; + + if (!cache_size) + return 0; + + if (size != cache_size) { + dev_warn(&cxlr->dev, + "Extended Linear Cache size %pa != CXL size %pa. No Support!", + &cache_size, &size); + return -ENXIO; + } + + /* + * Move the start of the range to where the cache range starts. The + * implementation assumes that the cache range is in front of the + * CXL range. This is not dictated by the HMAT spec but is how the + * current known implementation is configured. + * + * The cache range is expected to be within the CFMWS. The adjusted + * res->start should not be less than cxlrd->res->start. + */ + start = res->start - cache_size; + if (start < cxlrd->res->start) + return -ENXIO; + + res->start = start; + p->cache_size = cache_size; + + return 0; +} + +static int __construct_region(struct cxl_region *cxlr, + struct cxl_root_decoder *cxlrd, + struct cxl_endpoint_decoder *cxled) { struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); - struct cxl_port *port = cxlrd_to_port(cxlrd); struct range *hpa = &cxled->cxld.hpa_range; struct cxl_region_params *p; - struct cxl_region *cxlr; struct resource *res; int rc; - do { - cxlr = __create_region(cxlrd, cxled->mode, - atomic_read(&cxlrd->region_id)); - } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY); - - if (IS_ERR(cxlr)) { - dev_err(cxlmd->dev.parent, - "%s:%s: %s failed assign region: %ld\n", - dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), - __func__, PTR_ERR(cxlr)); - return cxlr; - } - - down_write(&cxl_region_rwsem); + guard(rwsem_write)(&cxl_region_rwsem); p = &cxlr->params; if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { dev_err(cxlmd->dev.parent, "%s:%s: %s autodiscovery interrupted\n", dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__); - rc = -EBUSY; - goto err; + return -EBUSY; } set_bit(CXL_REGION_F_AUTO, &cxlr->flags); res = kmalloc(sizeof(*res), GFP_KERNEL); - if (!res) { - rc = -ENOMEM; - goto err; - } + if (!res) + return -ENOMEM; *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa), dev_name(&cxlr->dev)); + + rc = cxl_extended_linear_cache_resize(cxlr, res); + if (rc && rc != -EOPNOTSUPP) { + /* + * Failing to support extended linear cache region resize does not + * prevent the region from functioning. Only causes cxl list showing + * incorrect region size. + */ + dev_warn(cxlmd->dev.parent, + "Extended linear cache calculation failed rc:%d\n", rc); + } + rc = insert_resource(cxlrd->res, res); if (rc) { /* @@ -3034,7 +3379,7 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd, rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); if (rc) - goto err; + return rc; dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n", dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__, @@ -3043,57 +3388,81 @@ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd, /* ...to match put_device() in cxl_add_to_region() */ get_device(&cxlr->dev); - up_write(&cxl_region_rwsem); + + return 0; +} + +/* Establish an empty region covering the given HPA range */ +static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd, + struct cxl_endpoint_decoder *cxled) +{ + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_port *port = cxlrd_to_port(cxlrd); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + int rc, part = READ_ONCE(cxled->part); + struct cxl_region *cxlr; + + do { + cxlr = __create_region(cxlrd, cxlds->part[part].mode, + atomic_read(&cxlrd->region_id)); + } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY); + + if (IS_ERR(cxlr)) { + dev_err(cxlmd->dev.parent, + "%s:%s: %s failed assign region: %ld\n", + dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), + __func__, PTR_ERR(cxlr)); + return cxlr; + } + + rc = __construct_region(cxlr, cxlrd, cxled); + if (rc) { + devm_release_action(port->uport_dev, unregister_region, cxlr); + return ERR_PTR(rc); + } return cxlr; +} -err: - up_write(&cxl_region_rwsem); - devm_release_action(port->uport_dev, unregister_region, cxlr); - return ERR_PTR(rc); +static struct cxl_region * +cxl_find_region_by_range(struct cxl_root_decoder *cxlrd, struct range *hpa) +{ + struct device *region_dev; + + region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa, + match_region_by_range); + if (!region_dev) + return NULL; + + return to_cxl_region(region_dev); } -int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled) +int cxl_add_to_region(struct cxl_endpoint_decoder *cxled) { - struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); struct range *hpa = &cxled->cxld.hpa_range; - struct cxl_decoder *cxld = &cxled->cxld; - struct device *cxlrd_dev, *region_dev; - struct cxl_root_decoder *cxlrd; struct cxl_region_params *p; - struct cxl_region *cxlr; bool attach = false; int rc; - cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range, - match_root_decoder_by_range); - if (!cxlrd_dev) { - dev_err(cxlmd->dev.parent, - "%s:%s no CXL window for range %#llx:%#llx\n", - dev_name(&cxlmd->dev), dev_name(&cxld->dev), - cxld->hpa_range.start, cxld->hpa_range.end); + struct cxl_root_decoder *cxlrd __free(put_cxl_root_decoder) = + cxl_find_root_decoder(cxled); + if (!cxlrd) return -ENXIO; - } - - cxlrd = to_cxl_root_decoder(cxlrd_dev); /* * Ensure that if multiple threads race to construct_region() for @hpa * one does the construction and the others add to that. */ mutex_lock(&cxlrd->range_lock); - region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa, - match_region_by_range); - if (!region_dev) { + struct cxl_region *cxlr __free(put_cxl_region) = + cxl_find_region_by_range(cxlrd, hpa); + if (!cxlr) cxlr = construct_region(cxlrd, cxled); - region_dev = &cxlr->dev; - } else - cxlr = to_cxl_region(region_dev); mutex_unlock(&cxlrd->range_lock); rc = PTR_ERR_OR_ZERO(cxlr); if (rc) - goto out; + return rc; attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE); @@ -3113,12 +3482,37 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled) p->res); } - put_device(region_dev); -out: - put_device(cxlrd_dev); return rc; } -EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, "CXL"); + +u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa) +{ + struct cxl_region_ref *iter; + unsigned long index; + + if (!endpoint) + return ~0ULL; + + guard(rwsem_write)(&cxl_region_rwsem); + + xa_for_each(&endpoint->regions, index, iter) { + struct cxl_region_params *p = &iter->region->params; + + if (p->res->start <= spa && spa <= p->res->end) { + if (!p->cache_size) + return ~0ULL; + + if (spa >= p->res->start + p->cache_size) + return spa - p->cache_size; + + return spa + p->cache_size; + } + } + + return ~0ULL; +} +EXPORT_SYMBOL_NS_GPL(cxl_port_get_spa_cache_alias, "CXL"); static int is_system_ram(struct resource *res, void *arg) { @@ -3129,6 +3523,14 @@ static int is_system_ram(struct resource *res, void *arg) return 1; } +static void shutdown_notifiers(void *_cxlr) +{ + struct cxl_region *cxlr = _cxlr; + + unregister_memory_notifier(&cxlr->memory_notifier); + unregister_mt_adistance_algorithm(&cxlr->adist_notifier); +} + static int cxl_region_probe(struct device *dev) { struct cxl_region *cxlr = to_cxl_region(dev); @@ -3164,10 +3566,32 @@ out: if (rc) return rc; + cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback; + cxlr->memory_notifier.priority = CXL_CALLBACK_PRI; + register_memory_notifier(&cxlr->memory_notifier); + + cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance; + cxlr->adist_notifier.priority = 100; + register_mt_adistance_algorithm(&cxlr->adist_notifier); + + rc = devm_add_action_or_reset(&cxlr->dev, shutdown_notifiers, cxlr); + if (rc) + return rc; + switch (cxlr->mode) { - case CXL_DECODER_PMEM: + case CXL_PARTMODE_PMEM: + rc = devm_cxl_region_edac_register(cxlr); + if (rc) + dev_dbg(&cxlr->dev, "CXL EDAC registration for region_id=%d failed\n", + cxlr->id); + return devm_cxl_add_pmem_region(cxlr); - case CXL_DECODER_RAM: + case CXL_PARTMODE_RAM: + rc = devm_cxl_region_edac_register(cxlr); + if (rc) + dev_dbg(&cxlr->dev, "CXL EDAC registration for region_id=%d failed\n", + cxlr->id); + /* * The region can not be manged by CXL if any portion of * it is already online as 'System RAM' @@ -3201,6 +3625,6 @@ void cxl_region_exit(void) cxl_driver_unregister(&cxl_region_driver); } -MODULE_IMPORT_NS(CXL); -MODULE_IMPORT_NS(DEVMEM); +MODULE_IMPORT_NS("CXL"); +MODULE_IMPORT_NS("DEVMEM"); MODULE_ALIAS_CXL(CXL_DEVICE_REGION); diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index 3c42f984eeaf..5ca7b0eed568 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -52,7 +52,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base, cap_array = readl(base + CXL_CM_CAP_HDR_OFFSET); if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) { - dev_err(dev, + dev_dbg(dev, "Couldn't locate the CXL.cache and CXL.mem capability array header.\n"); return; } @@ -106,7 +106,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base, rmap->size = length; } } -EXPORT_SYMBOL_NS_GPL(cxl_probe_component_regs, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_probe_component_regs, "CXL"); /** * cxl_probe_device_regs() - Detect CXL Device register blocks @@ -174,7 +174,7 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base, rmap->size = length; } } -EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs, "CXL"); void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr, resource_size_t length) @@ -232,7 +232,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map, return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, "CXL"); int cxl_map_device_regs(const struct cxl_register_map *map, struct cxl_device_regs *regs) @@ -266,7 +266,7 @@ int cxl_map_device_regs(const struct cxl_register_map *map, return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, "CXL"); static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi, struct cxl_register_map *map) @@ -289,21 +289,17 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi, return true; } -/** - * cxl_find_regblock_instance() - Locate a register block by type / index - * @pdev: The CXL PCI device to enumerate. - * @type: Register Block Indicator id - * @map: Enumeration output, clobbered on error - * @index: Index into which particular instance of a regblock wanted in the - * order found in register locator DVSEC. - * - * Return: 0 if register block enumerated, negative error code otherwise +/* + * __cxl_find_regblock_instance() - Locate a register block or count instances by type / index + * Use CXL_INSTANCES_COUNT for @index if counting instances. * - * A CXL DVSEC may point to one or more register blocks, search for them - * by @type and @index. + * __cxl_find_regblock_instance() may return: + * 0 - if register block enumerated. + * >= 0 - if counting instances. + * < 0 - error code otherwise. */ -int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type, - struct cxl_register_map *map, int index) +static int __cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type, + struct cxl_register_map *map, int index) { u32 regloc_size, regblocks; int instance = 0; @@ -314,7 +310,7 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type, .resource = CXL_RESOURCE_NONE, }; - regloc = pci_find_dvsec_capability(pdev, PCI_DVSEC_VENDOR_ID_CXL, + regloc = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL, CXL_DVSEC_REG_LOCATOR); if (!regloc) return -ENXIO; @@ -342,9 +338,31 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type, } map->resource = CXL_RESOURCE_NONE; + if (index == CXL_INSTANCES_COUNT) + return instance; + return -ENODEV; } -EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, CXL); + +/** + * cxl_find_regblock_instance() - Locate a register block by type / index + * @pdev: The CXL PCI device to enumerate. + * @type: Register Block Indicator id + * @map: Enumeration output, clobbered on error + * @index: Index into which particular instance of a regblock wanted in the + * order found in register locator DVSEC. + * + * Return: 0 if register block enumerated, negative error code otherwise + * + * A CXL DVSEC may point to one or more register blocks, search for them + * by @type and @index. + */ +int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type, + struct cxl_register_map *map, unsigned int index) +{ + return __cxl_find_regblock_instance(pdev, type, map, index); +} +EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, "CXL"); /** * cxl_find_regblock() - Locate register blocks by type @@ -360,9 +378,9 @@ EXPORT_SYMBOL_NS_GPL(cxl_find_regblock_instance, CXL); int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, struct cxl_register_map *map) { - return cxl_find_regblock_instance(pdev, type, map, 0); + return __cxl_find_regblock_instance(pdev, type, map, 0); } -EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, "CXL"); /** * cxl_count_regblock() - Count instances of a given regblock type. @@ -371,21 +389,15 @@ EXPORT_SYMBOL_NS_GPL(cxl_find_regblock, CXL); * * Some regblocks may be repeated. Count how many instances. * - * Return: count of matching regblocks. + * Return: non-negative count of matching regblocks, negative error code otherwise. */ int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type) { struct cxl_register_map map; - int rc, count = 0; - while (1) { - rc = cxl_find_regblock_instance(pdev, type, &map, count); - if (rc) - return count; - count++; - } + return __cxl_find_regblock_instance(pdev, type, &map, CXL_INSTANCES_COUNT); } -EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, "CXL"); int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs) { @@ -399,7 +411,7 @@ int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs) return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, "CXL"); static int cxl_map_regblock(struct cxl_register_map *map) { @@ -468,7 +480,7 @@ int cxl_setup_regs(struct cxl_register_map *map) return rc; } -EXPORT_SYMBOL_NS_GPL(cxl_setup_regs, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_setup_regs, "CXL"); u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb) { @@ -506,6 +518,62 @@ out: return offset; } +static resource_size_t cxl_rcrb_to_linkcap(struct device *dev, struct cxl_dport *dport) +{ + resource_size_t rcrb = dport->rcrb.base; + void __iomem *addr; + u32 cap_hdr; + u16 offset; + + if (!request_mem_region(rcrb, SZ_4K, "CXL RCRB")) + return CXL_RESOURCE_NONE; + + addr = ioremap(rcrb, SZ_4K); + if (!addr) { + dev_err(dev, "Failed to map region %pr\n", addr); + release_mem_region(rcrb, SZ_4K); + return CXL_RESOURCE_NONE; + } + + offset = FIELD_GET(PCI_RCRB_CAP_LIST_ID_MASK, readw(addr + PCI_CAPABILITY_LIST)); + cap_hdr = readl(addr + offset); + while ((FIELD_GET(PCI_RCRB_CAP_HDR_ID_MASK, cap_hdr)) != PCI_CAP_ID_EXP) { + offset = FIELD_GET(PCI_RCRB_CAP_HDR_NEXT_MASK, cap_hdr); + if (offset == 0 || offset > SZ_4K) { + offset = 0; + break; + } + cap_hdr = readl(addr + offset); + } + + iounmap(addr); + release_mem_region(rcrb, SZ_4K); + if (!offset) + return CXL_RESOURCE_NONE; + + return offset; +} + +int cxl_dport_map_rcd_linkcap(struct pci_dev *pdev, struct cxl_dport *dport) +{ + void __iomem *dport_pcie_cap = NULL; + resource_size_t pos; + struct cxl_rcrb_info *ri; + + ri = &dport->rcrb; + pos = cxl_rcrb_to_linkcap(&pdev->dev, dport); + if (pos == CXL_RESOURCE_NONE) + return -ENXIO; + + dport_pcie_cap = devm_cxl_iomap_block(&pdev->dev, + ri->base + pos, + PCI_CAP_EXP_SIZEOF); + dport->regs.rcd_pcie_cap = dport_pcie_cap; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_dport_map_rcd_linkcap, "CXL"); + resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri, enum cxl_rcrb which) { @@ -513,7 +581,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri resource_size_t rcrb = ri->base; void __iomem *addr; u32 bar0, bar1; - u16 cmd; u32 id; if (which == CXL_RCRB_UPSTREAM) @@ -535,7 +602,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri } id = readl(addr + PCI_VENDOR_ID); - cmd = readw(addr + PCI_COMMAND); bar0 = readl(addr + PCI_BASE_ADDRESS_0); bar1 = readl(addr + PCI_BASE_ADDRESS_1); iounmap(addr); @@ -550,8 +616,6 @@ resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri dev_err(dev, "Failed to access Downstream Port RCRB\n"); return CXL_RESOURCE_NONE; } - if (!(cmd & PCI_COMMAND_MEMORY)) - return CXL_RESOURCE_NONE; /* The RCRB is a Memory Window, and the MEM_TYPE_1M bit is obsolete */ if (bar0 & (PCI_BASE_ADDRESS_MEM_TYPE_1M | PCI_BASE_ADDRESS_SPACE_IO)) return CXL_RESOURCE_NONE; @@ -577,4 +641,4 @@ resource_size_t cxl_rcd_component_reg_phys(struct device *dev, return CXL_RESOURCE_NONE; return __rcrb_to_component(dev, &dport->rcrb, CXL_RCRB_UPSTREAM); } -EXPORT_SYMBOL_NS_GPL(cxl_rcd_component_reg_phys, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_rcd_component_reg_phys, "CXL"); diff --git a/drivers/cxl/core/suspend.c b/drivers/cxl/core/suspend.c index a5984d96ea1d..29aa5cc5e565 100644 --- a/drivers/cxl/core/suspend.c +++ b/drivers/cxl/core/suspend.c @@ -15,10 +15,10 @@ void cxl_mem_active_inc(void) { atomic_inc(&mem_active); } -EXPORT_SYMBOL_NS_GPL(cxl_mem_active_inc, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_mem_active_inc, "CXL"); void cxl_mem_active_dec(void) { atomic_dec(&mem_active); } -EXPORT_SYMBOL_NS_GPL(cxl_mem_active_dec, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_mem_active_dec, "CXL"); diff --git a/drivers/cxl/core/trace.c b/drivers/cxl/core/trace.c index d0403dc3c8ab..7f2a9dd0d0e3 100644 --- a/drivers/cxl/core/trace.c +++ b/drivers/cxl/core/trace.c @@ -6,94 +6,3 @@ #define CREATE_TRACE_POINTS #include "trace.h" - -static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos) -{ - struct cxl_region_params *p = &cxlr->params; - int gran = p->interleave_granularity; - int ways = p->interleave_ways; - u64 offset; - - /* Is the hpa within this region at all */ - if (hpa < p->res->start || hpa > p->res->end) { - dev_dbg(&cxlr->dev, - "Addr trans fail: hpa 0x%llx not in region\n", hpa); - return false; - } - - /* Is the hpa in an expected chunk for its pos(-ition) */ - offset = hpa - p->res->start; - offset = do_div(offset, gran * ways); - if ((offset >= pos * gran) && (offset < (pos + 1) * gran)) - return true; - - dev_dbg(&cxlr->dev, - "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa); - - return false; -} - -static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr, - struct cxl_endpoint_decoder *cxled) -{ - u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa; - struct cxl_region_params *p = &cxlr->params; - int pos = cxled->pos; - u16 eig = 0; - u8 eiw = 0; - - ways_to_eiw(p->interleave_ways, &eiw); - granularity_to_eig(p->interleave_granularity, &eig); - - /* - * The device position in the region interleave set was removed - * from the offset at HPA->DPA translation. To reconstruct the - * HPA, place the 'pos' in the offset. - * - * The placement of 'pos' in the HPA is determined by interleave - * ways and granularity and is defined in the CXL Spec 3.0 Section - * 8.2.4.19.13 Implementation Note: Device Decode Logic - */ - - /* Remove the dpa base */ - dpa_offset = dpa - cxl_dpa_resource_start(cxled); - - mask_upper = GENMASK_ULL(51, eig + 8); - - if (eiw < 8) { - hpa_offset = (dpa_offset & mask_upper) << eiw; - hpa_offset |= pos << (eig + 8); - } else { - bits_upper = (dpa_offset & mask_upper) >> (eig + 8); - bits_upper = bits_upper * 3; - hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8); - } - - /* The lower bits remain unchanged */ - hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0); - - /* Apply the hpa_offset to the region base address */ - hpa = hpa_offset + p->res->start; - - if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos)) - return ULLONG_MAX; - - return hpa; -} - -u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *cxlmd, - u64 dpa) -{ - struct cxl_region_params *p = &cxlr->params; - struct cxl_endpoint_decoder *cxled = NULL; - - for (int i = 0; i < p->nr_targets; i++) { - cxled = p->targets[i]; - if (cxlmd == cxled_to_memdev(cxled)) - break; - } - if (!cxled || cxlmd != cxled_to_memdev(cxled)) - return ULLONG_MAX; - - return cxl_dpa_to_hpa(dpa, cxlr, cxled); -} diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index e5f13260fc52..25ebfbc1616c 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -8,7 +8,7 @@ #include <linux/tracepoint.h> #include <linux/pci.h> -#include <asm-generic/unaligned.h> +#include <linux/unaligned.h> #include <cxl.h> #include <cxlmem.h> @@ -48,6 +48,34 @@ { CXL_RAS_UC_IDE_RX_ERR, "IDE Rx Error" } \ ) +TRACE_EVENT(cxl_port_aer_uncorrectable_error, + TP_PROTO(struct device *dev, u32 status, u32 fe, u32 *hl), + TP_ARGS(dev, status, fe, hl), + TP_STRUCT__entry( + __string(device, dev_name(dev)) + __string(host, dev_name(dev->parent)) + __field(u32, status) + __field(u32, first_error) + __array(u32, header_log, CXL_HEADERLOG_SIZE_U32) + ), + TP_fast_assign( + __assign_str(device); + __assign_str(host); + __entry->status = status; + __entry->first_error = fe; + /* + * Embed the 512B headerlog data for user app retrieval and + * parsing, but no need to print this in the trace buffer. + */ + memcpy(__entry->header_log, hl, CXL_HEADERLOG_SIZE); + ), + TP_printk("device=%s host=%s status: '%s' first_error: '%s'", + __get_str(device), __get_str(host), + show_uc_errs(__entry->status), + show_uc_errs(__entry->first_error) + ) +); + TRACE_EVENT(cxl_aer_uncorrectable_error, TP_PROTO(const struct cxl_memdev *cxlmd, u32 status, u32 fe, u32 *hl), TP_ARGS(cxlmd, status, fe, hl), @@ -60,8 +88,8 @@ TRACE_EVENT(cxl_aer_uncorrectable_error, __array(u32, header_log, CXL_HEADERLOG_SIZE_U32) ), TP_fast_assign( - __assign_str(memdev, dev_name(&cxlmd->dev)); - __assign_str(host, dev_name(cxlmd->dev.parent)); + __assign_str(memdev); + __assign_str(host); __entry->serial = cxlmd->cxlds->serial; __entry->status = status; __entry->first_error = fe; @@ -96,6 +124,25 @@ TRACE_EVENT(cxl_aer_uncorrectable_error, { CXL_RAS_CE_PHYS_LAYER_ERR, "Received Error From Physical Layer" } \ ) +TRACE_EVENT(cxl_port_aer_correctable_error, + TP_PROTO(struct device *dev, u32 status), + TP_ARGS(dev, status), + TP_STRUCT__entry( + __string(device, dev_name(dev)) + __string(host, dev_name(dev->parent)) + __field(u32, status) + ), + TP_fast_assign( + __assign_str(device); + __assign_str(host); + __entry->status = status; + ), + TP_printk("device=%s host=%s status='%s'", + __get_str(device), __get_str(host), + show_ce_errs(__entry->status) + ) +); + TRACE_EVENT(cxl_aer_correctable_error, TP_PROTO(const struct cxl_memdev *cxlmd, u32 status), TP_ARGS(cxlmd, status), @@ -106,8 +153,8 @@ TRACE_EVENT(cxl_aer_correctable_error, __field(u32, status) ), TP_fast_assign( - __assign_str(memdev, dev_name(&cxlmd->dev)); - __assign_str(host, dev_name(cxlmd->dev.parent)); + __assign_str(memdev); + __assign_str(host); __entry->serial = cxlmd->cxlds->serial; __entry->status = status; ), @@ -142,8 +189,8 @@ TRACE_EVENT(cxl_overflow, ), TP_fast_assign( - __assign_str(memdev, dev_name(&cxlmd->dev)); - __assign_str(host, dev_name(cxlmd->dev.parent)); + __assign_str(memdev); + __assign_str(host); __entry->serial = cxlmd->cxlds->serial; __entry->log = log; __entry->count = le16_to_cpu(payload->overflow_err_count); @@ -166,11 +213,13 @@ TRACE_EVENT(cxl_overflow, #define CXL_EVENT_RECORD_FLAG_MAINT_NEEDED BIT(3) #define CXL_EVENT_RECORD_FLAG_PERF_DEGRADED BIT(4) #define CXL_EVENT_RECORD_FLAG_HW_REPLACE BIT(5) +#define CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID BIT(6) #define show_hdr_flags(flags) __print_flags(flags, " | ", \ { CXL_EVENT_RECORD_FLAG_PERMANENT, "PERMANENT_CONDITION" }, \ { CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, "MAINTENANCE_NEEDED" }, \ { CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, "PERFORMANCE_DEGRADED" }, \ - { CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" } \ + { CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" }, \ + { CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID, "MAINT_OP_SUB_CLASS_VALID" } \ ) /* @@ -197,11 +246,12 @@ TRACE_EVENT(cxl_overflow, __field(u16, hdr_related_handle) \ __field(u64, hdr_timestamp) \ __field(u8, hdr_length) \ - __field(u8, hdr_maint_op_class) + __field(u8, hdr_maint_op_class) \ + __field(u8, hdr_maint_op_sub_class) #define CXL_EVT_TP_fast_assign(cxlmd, l, hdr) \ - __assign_str(memdev, dev_name(&(cxlmd)->dev)); \ - __assign_str(host, dev_name((cxlmd)->dev.parent)); \ + __assign_str(memdev); \ + __assign_str(host); \ __entry->log = (l); \ __entry->serial = (cxlmd)->cxlds->serial; \ __entry->hdr_length = (hdr).length; \ @@ -209,17 +259,19 @@ TRACE_EVENT(cxl_overflow, __entry->hdr_handle = le16_to_cpu((hdr).handle); \ __entry->hdr_related_handle = le16_to_cpu((hdr).related_handle); \ __entry->hdr_timestamp = le64_to_cpu((hdr).timestamp); \ - __entry->hdr_maint_op_class = (hdr).maint_op_class + __entry->hdr_maint_op_class = (hdr).maint_op_class; \ + __entry->hdr_maint_op_sub_class = (hdr).maint_op_sub_class #define CXL_EVT_TP_printk(fmt, ...) \ TP_printk("memdev=%s host=%s serial=%lld log=%s : time=%llu uuid=%pUb " \ "len=%d flags='%s' handle=%x related_handle=%x " \ - "maint_op_class=%u : " fmt, \ + "maint_op_class=%u maint_op_sub_class=%u : " fmt, \ __get_str(memdev), __get_str(host), __entry->serial, \ cxl_event_log_type_str(__entry->log), \ __entry->hdr_timestamp, &__entry->hdr_uuid, __entry->hdr_length,\ show_hdr_flags(__entry->hdr_flags), __entry->hdr_handle, \ __entry->hdr_related_handle, __entry->hdr_maint_op_class, \ + __entry->hdr_maint_op_sub_class, \ ##__VA_ARGS__) TRACE_EVENT(cxl_generic_event, @@ -253,8 +305,8 @@ TRACE_EVENT(cxl_generic_event, * DRAM Event Record * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 */ -#define CXL_DPA_FLAGS_MASK 0x3F -#define CXL_DPA_MASK (~CXL_DPA_FLAGS_MASK) +#define CXL_DPA_FLAGS_MASK GENMASK(1, 0) +#define CXL_DPA_MASK GENMASK_ULL(63, 6) #define CXL_DPA_VOLATILE BIT(0) #define CXL_DPA_NOT_REPAIRABLE BIT(1) @@ -264,8 +316,30 @@ TRACE_EVENT(cxl_generic_event, ) /* + * Component ID Format + * CXL 3.1 section 8.2.9.2.1; Table 8-44 + */ +#define CXL_PLDM_COMPONENT_ID_ENTITY_VALID BIT(0) +#define CXL_PLDM_COMPONENT_ID_RES_VALID BIT(1) + +#define show_comp_id_pldm_flags(flags) __print_flags(flags, " | ", \ + { CXL_PLDM_COMPONENT_ID_ENTITY_VALID, "PLDM Entity ID" }, \ + { CXL_PLDM_COMPONENT_ID_RES_VALID, "Resource ID" } \ +) + +#define show_pldm_entity_id(flags, valid_comp_id, valid_id_format, comp_id) \ + (flags & valid_comp_id && flags & valid_id_format) ? \ + (comp_id[0] & CXL_PLDM_COMPONENT_ID_ENTITY_VALID) ? \ + __print_hex(&comp_id[1], 6) : "0x00" : "0x00" + +#define show_pldm_resource_id(flags, valid_comp_id, valid_id_format, comp_id) \ + (flags & valid_comp_id && flags & valid_id_format) ? \ + (comp_id[0] & CXL_PLDM_COMPONENT_ID_RES_VALID) ? \ + __print_hex(&comp_id[7], 4) : "0x00" : "0x00" + +/* * General Media Event Record - GMER - * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 + * CXL rev 3.1 Section 8.2.9.2.1.1; Table 8-45 */ #define CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT BIT(0) #define CXL_GMER_EVT_DESC_THRESHOLD_EVENT BIT(1) @@ -279,10 +353,18 @@ TRACE_EVENT(cxl_generic_event, #define CXL_GMER_MEM_EVT_TYPE_ECC_ERROR 0x00 #define CXL_GMER_MEM_EVT_TYPE_INV_ADDR 0x01 #define CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x02 -#define show_mem_event_type(type) __print_symbolic(type, \ - { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \ - { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \ - { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" } \ +#define CXL_GMER_MEM_EVT_TYPE_TE_STATE_VIOLATION 0x03 +#define CXL_GMER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x04 +#define CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE 0x05 +#define CXL_GMER_MEM_EVT_TYPE_CKID_VIOLATION 0x06 +#define show_gmer_mem_event_type(type) __print_symbolic(type, \ + { CXL_GMER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \ + { CXL_GMER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \ + { CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" }, \ + { CXL_GMER_MEM_EVT_TYPE_TE_STATE_VIOLATION, "TE State Violation" }, \ + { CXL_GMER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \ + { CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE, "Adv Prog CME Counter Expiration" }, \ + { CXL_GMER_MEM_EVT_TYPE_CKID_VIOLATION, "CKID Violation" } \ ) #define CXL_GMER_TRANS_UNKNOWN 0x00 @@ -292,6 +374,8 @@ TRACE_EVENT(cxl_generic_event, #define CXL_GMER_TRANS_HOST_INJECT_POISON 0x04 #define CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB 0x05 #define CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT 0x06 +#define CXL_GMER_TRANS_INTERNAL_MEDIA_ECS 0x07 +#define CXL_GMER_TRANS_MEDIA_INITIALIZATION 0x08 #define show_trans_type(type) __print_symbolic(type, \ { CXL_GMER_TRANS_UNKNOWN, "Unknown" }, \ { CXL_GMER_TRANS_HOST_READ, "Host Read" }, \ @@ -299,26 +383,66 @@ TRACE_EVENT(cxl_generic_event, { CXL_GMER_TRANS_HOST_SCAN_MEDIA, "Host Scan Media" }, \ { CXL_GMER_TRANS_HOST_INJECT_POISON, "Host Inject Poison" }, \ { CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB, "Internal Media Scrub" }, \ - { CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT, "Internal Media Management" } \ + { CXL_GMER_TRANS_INTERNAL_MEDIA_MANAGEMENT, "Internal Media Management" }, \ + { CXL_GMER_TRANS_INTERNAL_MEDIA_ECS, "Internal Media Error Check Scrub" }, \ + { CXL_GMER_TRANS_MEDIA_INITIALIZATION, "Media Initialization" } \ ) #define CXL_GMER_VALID_CHANNEL BIT(0) #define CXL_GMER_VALID_RANK BIT(1) #define CXL_GMER_VALID_DEVICE BIT(2) #define CXL_GMER_VALID_COMPONENT BIT(3) +#define CXL_GMER_VALID_COMPONENT_ID_FORMAT BIT(4) #define show_valid_flags(flags) __print_flags(flags, "|", \ { CXL_GMER_VALID_CHANNEL, "CHANNEL" }, \ { CXL_GMER_VALID_RANK, "RANK" }, \ { CXL_GMER_VALID_DEVICE, "DEVICE" }, \ - { CXL_GMER_VALID_COMPONENT, "COMPONENT" } \ + { CXL_GMER_VALID_COMPONENT, "COMPONENT" }, \ + { CXL_GMER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" } \ +) + +#define CXL_GMER_CME_EV_FLAG_CME_MULTIPLE_MEDIA BIT(0) +#define CXL_GMER_CME_EV_FLAG_THRESHOLD_EXCEEDED BIT(1) +#define show_cme_threshold_ev_flags(flags) __print_flags(flags, "|", \ + { \ + CXL_GMER_CME_EV_FLAG_CME_MULTIPLE_MEDIA, \ + "Corrected Memory Errors in Multiple Media Components" \ + }, { \ + CXL_GMER_CME_EV_FLAG_THRESHOLD_EXCEEDED, \ + "Exceeded Programmable Threshold" \ + } \ +) + +#define CXL_GMER_MEM_EVT_SUB_TYPE_NOT_REPORTED 0x00 +#define CXL_GMER_MEM_EVT_SUB_TYPE_INTERNAL_DATAPATH_ERROR 0x01 +#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_COMMAND_TRAINING_ERROR 0x02 +#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CONTROL_TRAINING_ERROR 0x03 +#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_DATA_TRAINING_ERROR 0x04 +#define CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CRC_ERROR 0x05 +#define show_mem_event_sub_type(sub_type) __print_symbolic(sub_type, \ + { CXL_GMER_MEM_EVT_SUB_TYPE_NOT_REPORTED, "Not Reported" }, \ + { CXL_GMER_MEM_EVT_SUB_TYPE_INTERNAL_DATAPATH_ERROR, "Internal Datapath Error" }, \ + { \ + CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_COMMAND_TRAINING_ERROR, \ + "Media Link Command Training Error" \ + }, { \ + CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CONTROL_TRAINING_ERROR, \ + "Media Link Control Training Error" \ + }, { \ + CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_DATA_TRAINING_ERROR, \ + "Media Link Data Training Error" \ + }, { \ + CXL_GMER_MEM_EVT_SUB_TYPE_MEDIA_LINK_CRC_ERROR, "Media Link CRC Error" \ + } \ ) TRACE_EVENT(cxl_general_media, TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, + struct cxl_region *cxlr, u64 hpa, u64 hpa_alias0, struct cxl_event_gen_media *rec), - TP_ARGS(cxlmd, log, rec), + TP_ARGS(cxlmd, log, cxlr, hpa, hpa_alias0, rec), TP_STRUCT__entry( CXL_EVT_TP_entry @@ -330,54 +454,103 @@ TRACE_EVENT(cxl_general_media, __field(u8, channel) __field(u32, device) __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE) - __field(u16, validity_flags) /* Following are out of order to pack trace record */ + __field(u64, hpa) + __field(u64, hpa_alias0) + __field_struct(uuid_t, region_uuid) + __field(u16, validity_flags) __field(u8, rank) __field(u8, dpa_flags) + __field(u32, cme_count) + __field(u8, sub_type) + __field(u8, cme_threshold_ev_flags) + __string(region_name, cxlr ? dev_name(&cxlr->dev) : "") ), TP_fast_assign( - CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); + CXL_EVT_TP_fast_assign(cxlmd, log, rec->media_hdr.hdr); __entry->hdr_uuid = CXL_EVENT_GEN_MEDIA_UUID; /* General Media */ - __entry->dpa = le64_to_cpu(rec->phys_addr); + __entry->dpa = le64_to_cpu(rec->media_hdr.phys_addr); __entry->dpa_flags = __entry->dpa & CXL_DPA_FLAGS_MASK; /* Mask after flags have been parsed */ __entry->dpa &= CXL_DPA_MASK; - __entry->descriptor = rec->descriptor; - __entry->type = rec->type; - __entry->transaction_type = rec->transaction_type; - __entry->channel = rec->channel; - __entry->rank = rec->rank; + __entry->descriptor = rec->media_hdr.descriptor; + __entry->type = rec->media_hdr.type; + __entry->sub_type = rec->sub_type; + __entry->transaction_type = rec->media_hdr.transaction_type; + __entry->channel = rec->media_hdr.channel; + __entry->rank = rec->media_hdr.rank; __entry->device = get_unaligned_le24(rec->device); memcpy(__entry->comp_id, &rec->component_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE); - __entry->validity_flags = get_unaligned_le16(&rec->validity_flags); + __entry->validity_flags = get_unaligned_le16(&rec->media_hdr.validity_flags); + __entry->hpa = hpa; + __entry->hpa_alias0 = hpa_alias0; + if (cxlr) { + __assign_str(region_name); + uuid_copy(&__entry->region_uuid, &cxlr->params.uuid); + } else { + __assign_str(region_name); + uuid_copy(&__entry->region_uuid, &uuid_null); + } + __entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags; + __entry->cme_count = get_unaligned_le24(rec->cme_count); ), CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \ - "descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \ - "device=%x comp_id=%s validity_flags='%s'", + "descriptor='%s' type='%s' sub_type='%s' " \ + "transaction_type='%s' channel=%u rank=%u " \ + "device=%x validity_flags='%s' " \ + "comp_id=%s comp_id_pldm_valid_flags='%s' " \ + "pldm_entity_id=%s pldm_resource_id=%s " \ + "hpa=%llx hpa_alias0=%llx region=%s region_uuid=%pUb " \ + "cme_threshold_ev_flags='%s' cme_count=%u", __entry->dpa, show_dpa_flags(__entry->dpa_flags), show_event_desc_flags(__entry->descriptor), - show_mem_event_type(__entry->type), + show_gmer_mem_event_type(__entry->type), + show_mem_event_sub_type(__entry->sub_type), show_trans_type(__entry->transaction_type), __entry->channel, __entry->rank, __entry->device, + show_valid_flags(__entry->validity_flags), __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE), - show_valid_flags(__entry->validity_flags) + show_comp_id_pldm_flags(__entry->comp_id[0]), + show_pldm_entity_id(__entry->validity_flags, CXL_GMER_VALID_COMPONENT, + CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id), + show_pldm_resource_id(__entry->validity_flags, CXL_GMER_VALID_COMPONENT, + CXL_GMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id), + __entry->hpa, __entry->hpa_alias0, __get_str(region_name), &__entry->region_uuid, + show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags), __entry->cme_count ) ); /* * DRAM Event Record - DER * - * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 + * CXL rev 3.1 section 8.2.9.2.1.2; Table 8-46 */ /* * DRAM Event Record defines many fields the same as the General Media Event * Record. Reuse those definitions as appropriate. */ +#define CXL_DER_MEM_EVT_TYPE_ECC_ERROR 0x00 +#define CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR 0x01 +#define CXL_DER_MEM_EVT_TYPE_INV_ADDR 0x02 +#define CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR 0x03 +#define CXL_DER_MEM_EVT_TYPE_TE_STATE_VIOLATION 0x04 +#define CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE 0x05 +#define CXL_DER_MEM_EVT_TYPE_CKID_VIOLATION 0x06 +#define show_dram_mem_event_type(type) __print_symbolic(type, \ + { CXL_DER_MEM_EVT_TYPE_ECC_ERROR, "ECC Error" }, \ + { CXL_DER_MEM_EVT_TYPE_SCRUB_MEDIA_ECC_ERROR, "Scrub Media ECC Error" }, \ + { CXL_DER_MEM_EVT_TYPE_INV_ADDR, "Invalid Address" }, \ + { CXL_DER_MEM_EVT_TYPE_DATA_PATH_ERROR, "Data Path Error" }, \ + { CXL_DER_MEM_EVT_TYPE_TE_STATE_VIOLATION, "TE State Violation" }, \ + { CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE, "Adv Prog CME Counter Expiration" }, \ + { CXL_DER_MEM_EVT_TYPE_CKID_VIOLATION, "CKID Violation" } \ +) + #define CXL_DER_VALID_CHANNEL BIT(0) #define CXL_DER_VALID_RANK BIT(1) #define CXL_DER_VALID_NIBBLE BIT(2) @@ -386,23 +559,30 @@ TRACE_EVENT(cxl_general_media, #define CXL_DER_VALID_ROW BIT(5) #define CXL_DER_VALID_COLUMN BIT(6) #define CXL_DER_VALID_CORRECTION_MASK BIT(7) -#define show_dram_valid_flags(flags) __print_flags(flags, "|", \ - { CXL_DER_VALID_CHANNEL, "CHANNEL" }, \ - { CXL_DER_VALID_RANK, "RANK" }, \ - { CXL_DER_VALID_NIBBLE, "NIBBLE" }, \ - { CXL_DER_VALID_BANK_GROUP, "BANK GROUP" }, \ - { CXL_DER_VALID_BANK, "BANK" }, \ - { CXL_DER_VALID_ROW, "ROW" }, \ - { CXL_DER_VALID_COLUMN, "COLUMN" }, \ - { CXL_DER_VALID_CORRECTION_MASK, "CORRECTION MASK" } \ +#define CXL_DER_VALID_COMPONENT BIT(8) +#define CXL_DER_VALID_COMPONENT_ID_FORMAT BIT(9) +#define CXL_DER_VALID_SUB_CHANNEL BIT(10) +#define show_dram_valid_flags(flags) __print_flags(flags, "|", \ + { CXL_DER_VALID_CHANNEL, "CHANNEL" }, \ + { CXL_DER_VALID_RANK, "RANK" }, \ + { CXL_DER_VALID_NIBBLE, "NIBBLE" }, \ + { CXL_DER_VALID_BANK_GROUP, "BANK GROUP" }, \ + { CXL_DER_VALID_BANK, "BANK" }, \ + { CXL_DER_VALID_ROW, "ROW" }, \ + { CXL_DER_VALID_COLUMN, "COLUMN" }, \ + { CXL_DER_VALID_CORRECTION_MASK, "CORRECTION MASK" }, \ + { CXL_DER_VALID_COMPONENT, "COMPONENT" }, \ + { CXL_DER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" }, \ + { CXL_DER_VALID_SUB_CHANNEL, "SUB CHANNEL" } \ ) TRACE_EVENT(cxl_dram, TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, + struct cxl_region *cxlr, u64 hpa, u64 hpa_alias0, struct cxl_event_dram *rec), - TP_ARGS(cxlmd, log, rec), + TP_ARGS(cxlmd, log, cxlr, hpa, hpa_alias0, rec), TP_STRUCT__entry( CXL_EVT_TP_entry @@ -417,26 +597,37 @@ TRACE_EVENT(cxl_dram, __field(u32, nibble_mask) __field(u32, row) __array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE) + __field(u64, hpa) + __field(u64, hpa_alias0) + __field_struct(uuid_t, region_uuid) __field(u8, rank) /* Out of order to pack trace record */ __field(u8, bank_group) /* Out of order to pack trace record */ __field(u8, bank) /* Out of order to pack trace record */ __field(u8, dpa_flags) /* Out of order to pack trace record */ + /* Following are out of order to pack trace record */ + __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE) + __field(u32, cvme_count) + __field(u8, sub_type) + __field(u8, sub_channel) + __field(u8, cme_threshold_ev_flags) + __string(region_name, cxlr ? dev_name(&cxlr->dev) : "") ), TP_fast_assign( - CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); + CXL_EVT_TP_fast_assign(cxlmd, log, rec->media_hdr.hdr); __entry->hdr_uuid = CXL_EVENT_DRAM_UUID; /* DRAM */ - __entry->dpa = le64_to_cpu(rec->phys_addr); + __entry->dpa = le64_to_cpu(rec->media_hdr.phys_addr); __entry->dpa_flags = __entry->dpa & CXL_DPA_FLAGS_MASK; __entry->dpa &= CXL_DPA_MASK; - __entry->descriptor = rec->descriptor; - __entry->type = rec->type; - __entry->transaction_type = rec->transaction_type; - __entry->validity_flags = get_unaligned_le16(rec->validity_flags); - __entry->channel = rec->channel; - __entry->rank = rec->rank; + __entry->descriptor = rec->media_hdr.descriptor; + __entry->type = rec->media_hdr.type; + __entry->sub_type = rec->sub_type; + __entry->transaction_type = rec->media_hdr.transaction_type; + __entry->validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags); + __entry->channel = rec->media_hdr.channel; + __entry->rank = rec->media_hdr.rank; __entry->nibble_mask = get_unaligned_le24(rec->nibble_mask); __entry->bank_group = rec->bank_group; __entry->bank = rec->bank; @@ -444,28 +635,56 @@ TRACE_EVENT(cxl_dram, __entry->column = get_unaligned_le16(rec->column); memcpy(__entry->cor_mask, &rec->correction_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE); + __entry->hpa = hpa; + __entry->hpa_alias0 = hpa_alias0; + if (cxlr) { + __assign_str(region_name); + uuid_copy(&__entry->region_uuid, &cxlr->params.uuid); + } else { + __assign_str(region_name); + uuid_copy(&__entry->region_uuid, &uuid_null); + } + memcpy(__entry->comp_id, &rec->component_id, + CXL_EVENT_GEN_MED_COMP_ID_SIZE); + __entry->sub_channel = rec->sub_channel; + __entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags; + __entry->cvme_count = get_unaligned_le24(rec->cvme_count); ), - CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \ + CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' sub_type='%s' " \ "transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \ "bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \ - "validity_flags='%s'", + "validity_flags='%s' " \ + "comp_id=%s comp_id_pldm_valid_flags='%s' " \ + "pldm_entity_id=%s pldm_resource_id=%s " \ + "hpa=%llx hpa_alias0=%llx region=%s region_uuid=%pUb " \ + "sub_channel=%u cme_threshold_ev_flags='%s' cvme_count=%u", __entry->dpa, show_dpa_flags(__entry->dpa_flags), show_event_desc_flags(__entry->descriptor), - show_mem_event_type(__entry->type), + show_dram_mem_event_type(__entry->type), + show_mem_event_sub_type(__entry->sub_type), show_trans_type(__entry->transaction_type), __entry->channel, __entry->rank, __entry->nibble_mask, __entry->bank_group, __entry->bank, __entry->row, __entry->column, __print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE), - show_dram_valid_flags(__entry->validity_flags) + show_dram_valid_flags(__entry->validity_flags), + __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE), + show_comp_id_pldm_flags(__entry->comp_id[0]), + show_pldm_entity_id(__entry->validity_flags, CXL_DER_VALID_COMPONENT, + CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id), + show_pldm_resource_id(__entry->validity_flags, CXL_DER_VALID_COMPONENT, + CXL_DER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id), + __entry->hpa, __entry->hpa_alias0, __get_str(region_name), &__entry->region_uuid, + __entry->sub_channel, show_cme_threshold_ev_flags(__entry->cme_threshold_ev_flags), + __entry->cvme_count ) ); /* * Memory Module Event Record - MMER * - * CXL res 3.0 section 8.2.9.2.1.3; Table 8-45 + * CXL res 3.1 section 8.2.9.2.1.3; Table 8-47 */ #define CXL_MMER_HEALTH_STATUS_CHANGE 0x00 #define CXL_MMER_MEDIA_STATUS_CHANGE 0x01 @@ -473,27 +692,35 @@ TRACE_EVENT(cxl_dram, #define CXL_MMER_TEMP_CHANGE 0x03 #define CXL_MMER_DATA_PATH_ERROR 0x04 #define CXL_MMER_LSA_ERROR 0x05 +#define CXL_MMER_UNRECOV_SIDEBAND_BUS_ERROR 0x06 +#define CXL_MMER_MEMORY_MEDIA_FRU_ERROR 0x07 +#define CXL_MMER_POWER_MANAGEMENT_FAULT 0x08 #define show_dev_evt_type(type) __print_symbolic(type, \ { CXL_MMER_HEALTH_STATUS_CHANGE, "Health Status Change" }, \ { CXL_MMER_MEDIA_STATUS_CHANGE, "Media Status Change" }, \ { CXL_MMER_LIFE_USED_CHANGE, "Life Used Change" }, \ { CXL_MMER_TEMP_CHANGE, "Temperature Change" }, \ { CXL_MMER_DATA_PATH_ERROR, "Data Path Error" }, \ - { CXL_MMER_LSA_ERROR, "LSA Error" } \ + { CXL_MMER_LSA_ERROR, "LSA Error" }, \ + { CXL_MMER_UNRECOV_SIDEBAND_BUS_ERROR, "Unrecoverable Internal Sideband Bus Error" }, \ + { CXL_MMER_MEMORY_MEDIA_FRU_ERROR, "Memory Media FRU Error" }, \ + { CXL_MMER_POWER_MANAGEMENT_FAULT, "Power Management Fault" } \ ) /* * Device Health Information - DHI * - * CXL res 3.0 section 8.2.9.8.3.1; Table 8-100 + * CXL res 3.1 section 8.2.9.9.3.1; Table 8-133 */ #define CXL_DHI_HS_MAINTENANCE_NEEDED BIT(0) #define CXL_DHI_HS_PERFORMANCE_DEGRADED BIT(1) #define CXL_DHI_HS_HW_REPLACEMENT_NEEDED BIT(2) +#define CXL_DHI_HS_MEM_CAPACITY_DEGRADED BIT(3) #define show_health_status_flags(flags) __print_flags(flags, "|", \ { CXL_DHI_HS_MAINTENANCE_NEEDED, "MAINTENANCE_NEEDED" }, \ { CXL_DHI_HS_PERFORMANCE_DEGRADED, "PERFORMANCE_DEGRADED" }, \ - { CXL_DHI_HS_HW_REPLACEMENT_NEEDED, "REPLACEMENT_NEEDED" } \ + { CXL_DHI_HS_HW_REPLACEMENT_NEEDED, "REPLACEMENT_NEEDED" }, \ + { CXL_DHI_HS_MEM_CAPACITY_DEGRADED, "MEM_CAPACITY_DEGRADED" } \ ) #define CXL_DHI_MS_NORMAL 0x00 @@ -547,6 +774,26 @@ TRACE_EVENT(cxl_dram, #define CXL_DHI_AS_COR_VOL_ERR_CNT(as) ((as & 0x10) >> 4) #define CXL_DHI_AS_COR_PER_ERR_CNT(as) ((as & 0x20) >> 5) +#define CXL_MMER_VALID_COMPONENT BIT(0) +#define CXL_MMER_VALID_COMPONENT_ID_FORMAT BIT(1) +#define show_mem_module_valid_flags(flags) __print_flags(flags, "|", \ + { CXL_MMER_VALID_COMPONENT, "COMPONENT" }, \ + { CXL_MMER_VALID_COMPONENT_ID_FORMAT, "COMPONENT PLDM FORMAT" } \ +) +#define CXL_MMER_DEV_EVT_SUB_TYPE_NOT_REPORTED 0x00 +#define CXL_MMER_DEV_EVT_SUB_TYPE_INVALID_CONFIG_DATA 0x01 +#define CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_CONFIG_DATA 0x02 +#define CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_MEM_MEDIA_FRU 0x03 +#define show_dev_event_sub_type(sub_type) __print_symbolic(sub_type, \ + { CXL_MMER_DEV_EVT_SUB_TYPE_NOT_REPORTED, "Not Reported" }, \ + { CXL_MMER_DEV_EVT_SUB_TYPE_INVALID_CONFIG_DATA, "Invalid Config Data" }, \ + { CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_CONFIG_DATA, "Unsupported Config Data" }, \ + { \ + CXL_MMER_DEV_EVT_SUB_TYPE_UNSUPP_MEM_MEDIA_FRU, \ + "Unsupported Memory Media FRU" \ + } \ +) + TRACE_EVENT(cxl_memory_module, TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, @@ -569,6 +816,9 @@ TRACE_EVENT(cxl_memory_module, __field(u32, cor_per_err_cnt) __field(s16, device_temp) __field(u8, add_status) + __field(u8, event_sub_type) + __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE) + __field(u16, validity_flags) ), TP_fast_assign( @@ -577,6 +827,7 @@ TRACE_EVENT(cxl_memory_module, /* Memory Module Event */ __entry->event_type = rec->event_type; + __entry->event_sub_type = rec->event_sub_type; /* Device Health Info */ __entry->health_status = rec->info.health_status; @@ -587,13 +838,20 @@ TRACE_EVENT(cxl_memory_module, __entry->cor_per_err_cnt = get_unaligned_le32(rec->info.cor_per_err_cnt); __entry->device_temp = get_unaligned_le16(rec->info.device_temp); __entry->add_status = rec->info.add_status; + __entry->validity_flags = get_unaligned_le16(rec->validity_flags); + memcpy(__entry->comp_id, &rec->component_id, + CXL_EVENT_GEN_MED_COMP_ID_SIZE); ), - CXL_EVT_TP_printk("event_type='%s' health_status='%s' media_status='%s' " \ - "as_life_used=%s as_dev_temp=%s as_cor_vol_err_cnt=%s " \ + CXL_EVT_TP_printk("event_type='%s' event_sub_type='%s' health_status='%s' " \ + "media_status='%s' as_life_used=%s as_dev_temp=%s as_cor_vol_err_cnt=%s " \ "as_cor_per_err_cnt=%s life_used=%u device_temp=%d " \ - "dirty_shutdown_cnt=%u cor_vol_err_cnt=%u cor_per_err_cnt=%u", + "dirty_shutdown_cnt=%u cor_vol_err_cnt=%u cor_per_err_cnt=%u " \ + "validity_flags='%s' " \ + "comp_id=%s comp_id_pldm_valid_flags='%s' " \ + "pldm_entity_id=%s pldm_resource_id=%s", show_dev_evt_type(__entry->event_type), + show_dev_event_sub_type(__entry->event_sub_type), show_health_status_flags(__entry->health_status), show_media_status(__entry->media_status), show_two_bit_status(CXL_DHI_AS_LIFE_USED(__entry->add_status)), @@ -602,7 +860,14 @@ TRACE_EVENT(cxl_memory_module, show_one_bit_status(CXL_DHI_AS_COR_PER_ERR_CNT(__entry->add_status)), __entry->life_used, __entry->device_temp, __entry->dirty_shutdown_cnt, __entry->cor_vol_err_cnt, - __entry->cor_per_err_cnt + __entry->cor_per_err_cnt, + show_mem_module_valid_flags(__entry->validity_flags), + __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE), + show_comp_id_pldm_flags(__entry->comp_id[0]), + show_pldm_entity_id(__entry->validity_flags, CXL_MMER_VALID_COMPONENT, + CXL_MMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id), + show_pldm_resource_id(__entry->validity_flags, CXL_MMER_VALID_COMPONENT, + CXL_MMER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id) ) ); @@ -642,8 +907,6 @@ TRACE_EVENT(cxl_memory_module, #define cxl_poison_overflow(flags, time) \ (flags & CXL_POISON_FLAG_OVERFLOW ? le64_to_cpu(time) : 0) -u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa); - TRACE_EVENT(cxl_poison, TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr, @@ -660,6 +923,7 @@ TRACE_EVENT(cxl_poison, __string(region, cxlr ? dev_name(&cxlr->dev) : "") __field(u64, overflow_ts) __field(u64, hpa) + __field(u64, hpa_alias0) __field(u64, dpa) __field(u32, dpa_length) __array(char, uuid, 16) @@ -668,8 +932,8 @@ TRACE_EVENT(cxl_poison, ), TP_fast_assign( - __assign_str(memdev, dev_name(&cxlmd->dev)); - __assign_str(host, dev_name(cxlmd->dev.parent)); + __assign_str(memdev); + __assign_str(host); __entry->serial = cxlmd->cxlds->serial; __entry->overflow_ts = cxl_poison_overflow(flags, overflow_ts); __entry->dpa = cxl_poison_record_dpa(record); @@ -678,20 +942,26 @@ TRACE_EVENT(cxl_poison, __entry->trace_type = trace_type; __entry->flags = flags; if (cxlr) { - __assign_str(region, dev_name(&cxlr->dev)); + __assign_str(region); memcpy(__entry->uuid, &cxlr->params.uuid, 16); - __entry->hpa = cxl_trace_hpa(cxlr, cxlmd, - __entry->dpa); + __entry->hpa = cxl_dpa_to_hpa(cxlr, cxlmd, + __entry->dpa); + if (__entry->hpa != ULLONG_MAX && cxlr->params.cache_size) + __entry->hpa_alias0 = __entry->hpa + + cxlr->params.cache_size; + else + __entry->hpa_alias0 = ULLONG_MAX; } else { - __assign_str(region, ""); + __assign_str(region); memset(__entry->uuid, 0, 16); __entry->hpa = ULLONG_MAX; + __entry->hpa_alias0 = ULLONG_MAX; } ), TP_printk("memdev=%s host=%s serial=%lld trace_type=%s region=%s " \ - "region_uuid=%pU hpa=0x%llx dpa=0x%llx dpa_length=0x%x " \ - "source=%s flags=%s overflow_time=%llu", + "region_uuid=%pU hpa=0x%llx hpa_alias0=0x%llx dpa=0x%llx " \ + "dpa_length=0x%x source=%s flags=%s overflow_time=%llu", __get_str(memdev), __get_str(host), __entry->serial, @@ -699,6 +969,7 @@ TRACE_EVENT(cxl_poison, __get_str(region), __entry->uuid, __entry->hpa, + __entry->hpa_alias0, __entry->dpa, __entry->dpa_length, show_poison_source(__entry->source), |