summaryrefslogtreecommitdiff
path: root/drivers/cxl/core/hdm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cxl/core/hdm.c')
-rw-r--r--drivers/cxl/core/hdm.c955
1 files changed, 691 insertions, 264 deletions
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index dcc16d7cb8f3..1c5d2022c87a 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
-#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/delay.h>
@@ -17,14 +16,16 @@
* for enumerating these registers and capabilities.
*/
-DECLARE_RWSEM(cxl_dpa_rwsem);
+struct cxl_rwsem cxl_rwsem = {
+ .region = __RWSEM_INITIALIZER(cxl_rwsem.region),
+ .dpa = __RWSEM_INITIALIZER(cxl_rwsem.dpa),
+};
-static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map)
+static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld)
{
int rc;
- rc = cxl_decoder_add_locked(cxld, target_map);
+ rc = cxl_decoder_add_locked(cxld);
if (rc) {
put_device(&cxld->dev);
dev_err(&port->dev, "Failed to add decoder\n");
@@ -35,7 +36,8 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
if (rc)
return rc;
- dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
+ dev_dbg(port->uport_dev, "%s added to %s\n",
+ dev_name(&cxld->dev), dev_name(&port->dev));
return 0;
}
@@ -47,12 +49,17 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
* are claimed and passed to the single dport. Disable the range until the first
* CXL region is enumerated / activated.
*/
-int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
+static int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
{
struct cxl_switch_decoder *cxlsd;
- struct cxl_dport *dport = NULL;
- int single_port_map[1];
- unsigned long index;
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+
+ /*
+ * Capability checks are moot for passthrough decoders, support
+ * any and all possibilities.
+ */
+ cxlhdm->interleave_mask = ~0U;
+ cxlhdm->iw_cap_mask = ~0UL;
cxlsd = cxl_switch_decoder_alloc(port, 1);
if (IS_ERR(cxlsd))
@@ -60,13 +67,8 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
device_lock_assert(&port->dev);
- xa_for_each(&port->dports, index, dport)
- break;
- single_port_map[0] = dport->port_id;
-
- return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
+ return add_hdm_decoder(port, &cxlsd->cxld);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL);
static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
{
@@ -80,53 +82,99 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
cxlhdm->interleave_mask |= GENMASK(11, 8);
if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap))
cxlhdm->interleave_mask |= GENMASK(14, 12);
+ cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8);
+ if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap))
+ cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12);
+ if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap))
+ cxlhdm->iw_cap_mask |= BIT(16);
}
-static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
- struct cxl_component_regs *regs)
+static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
{
- struct cxl_register_map map = {
- .resource = port->component_reg_phys,
- .base = crb,
- .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
- };
+ struct cxl_hdm *cxlhdm;
+ void __iomem *hdm;
+ u32 ctrl;
+ int i;
- cxl_probe_component_regs(&port->dev, crb, &map.component_map);
- if (!map.component_map.hdm_decoder.valid) {
- dev_err(&port->dev, "HDM decoder registers invalid\n");
- return -ENXIO;
+ if (!info)
+ return false;
+
+ cxlhdm = dev_get_drvdata(&info->port->dev);
+ hdm = cxlhdm->regs.hdm_decoder;
+
+ if (!hdm)
+ return true;
+
+ /*
+ * If HDM decoders are present and the driver is in control of
+ * Mem_Enable skip DVSEC based emulation
+ */
+ if (!info->mem_enabled)
+ return false;
+
+ /*
+ * If any decoders are committed already, there should not be any
+ * emulated DVSEC decoders.
+ */
+ for (i = 0; i < cxlhdm->decoder_count; i++) {
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
+ dev_dbg(&info->port->dev,
+ "decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n",
+ info->port->id, i,
+ FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl),
+ readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)),
+ readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)),
+ readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)),
+ readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i)));
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
+ return false;
}
- return cxl_map_component_regs(&port->dev, regs, &map,
- BIT(CXL_CM_CAP_CAP_ID_HDM));
+ return true;
}
/**
* devm_cxl_setup_hdm - map HDM decoder component registers
* @port: cxl_port to map
+ * @info: cached DVSEC range register info
*/
-struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
+static struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
+ struct cxl_endpoint_dvsec_info *info)
{
+ struct cxl_register_map *reg_map = &port->reg_map;
struct device *dev = &port->dev;
struct cxl_hdm *cxlhdm;
- void __iomem *crb;
int rc;
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
if (!cxlhdm)
return ERR_PTR(-ENOMEM);
-
cxlhdm->port = port;
- crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
- if (!crb) {
- dev_err(dev, "No component registers mapped\n");
- return ERR_PTR(-ENXIO);
+ dev_set_drvdata(dev, cxlhdm);
+
+ /* Memory devices can configure device HDM using DVSEC range regs. */
+ if (reg_map->resource == CXL_RESOURCE_NONE) {
+ if (!info || !info->mem_enabled) {
+ dev_err(dev, "No component registers mapped\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ cxlhdm->decoder_count = info->ranges;
+ return cxlhdm;
}
- rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs);
- iounmap(crb);
- if (rc)
+ if (!reg_map->component_map.hdm_decoder.valid) {
+ dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
+ /* unique error code to indicate no HDM decoder capability */
+ return ERR_PTR(-ENODEV);
+ }
+
+ rc = cxl_map_component_regs(reg_map, &cxlhdm->regs,
+ BIT(CXL_CM_CAP_CAP_ID_HDM));
+ if (rc) {
+ dev_err(dev, "Failed to map HDM capability.\n");
return ERR_PTR(rc);
+ }
parse_hdm_decoder_caps(cxlhdm);
if (cxlhdm->decoder_count == 0) {
@@ -134,11 +182,18 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port)
return ERR_PTR(-ENXIO);
}
- dev_set_drvdata(dev, cxlhdm);
+ /*
+ * Now that the hdm capability is parsed, decide if range
+ * register emulation is needed and fixup cxlhdm accordingly.
+ */
+ if (should_emulate_decoders(info)) {
+ dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
+ str_plural(info->ranges));
+ cxlhdm->decoder_count = info->ranges;
+ }
return cxlhdm;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL);
static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
{
@@ -152,15 +207,45 @@ void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds)
{
struct resource *p1, *p2;
- down_read(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) {
__cxl_dpa_debug(file, p1, 0);
for (p2 = p1->child; p2; p2 = p2->sibling)
__cxl_dpa_debug(file, p2, 1);
}
- up_read(&cxl_dpa_rwsem);
}
-EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL);
+EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, "CXL");
+
+/* See request_skip() kernel-doc */
+static resource_size_t __adjust_skip(struct cxl_dev_state *cxlds,
+ const resource_size_t skip_base,
+ const resource_size_t skip_len,
+ const char *requester)
+{
+ const resource_size_t skip_end = skip_base + skip_len - 1;
+
+ for (int i = 0; i < cxlds->nr_partitions; i++) {
+ const struct resource *part_res = &cxlds->part[i].res;
+ resource_size_t adjust_start, adjust_end, size;
+
+ adjust_start = max(skip_base, part_res->start);
+ adjust_end = min(skip_end, part_res->end);
+
+ if (adjust_end < adjust_start)
+ continue;
+
+ size = adjust_end - adjust_start + 1;
+
+ if (!requester)
+ __release_region(&cxlds->dpa_res, adjust_start, size);
+ else if (!__request_region(&cxlds->dpa_res, adjust_start, size,
+ requester, 0))
+ return adjust_start - skip_base;
+ }
+
+ return skip_len;
+}
+#define release_skip(c, b, l) __adjust_skip((c), (b), (l), NULL)
/*
* Must be called in a context that synchronizes against this decoder's
@@ -174,13 +259,13 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
struct resource *res = cxled->dpa_res;
resource_size_t skip_start;
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
/* save @skip_start, before @res is released */
skip_start = res->start - cxled->skip;
__release_region(&cxlds->dpa_res, res->start, resource_size(res));
if (cxled->skip)
- __release_region(&cxlds->dpa_res, skip_start, cxled->skip);
+ release_skip(cxlds, skip_start, cxled->skip);
cxled->skip = 0;
cxled->dpa_res = NULL;
put_device(&cxled->cxld.dev);
@@ -189,9 +274,8 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
static void cxl_dpa_release(void *cxled)
{
- down_write(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
__cxl_dpa_release(cxled);
- up_write(&cxl_dpa_rwsem);
}
/*
@@ -202,11 +286,63 @@ static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled)
{
struct cxl_port *port = cxled_to_port(cxled);
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
devm_remove_action(&port->dev, cxl_dpa_release, cxled);
__cxl_dpa_release(cxled);
}
+/**
+ * request_skip() - Track DPA 'skip' in @cxlds->dpa_res resource tree
+ * @cxlds: CXL.mem device context that parents @cxled
+ * @cxled: Endpoint decoder establishing new allocation that skips lower DPA
+ * @skip_base: DPA < start of new DPA allocation (DPAnew)
+ * @skip_len: @skip_base + @skip_len == DPAnew
+ *
+ * DPA 'skip' arises from out-of-sequence DPA allocation events relative
+ * to free capacity across multiple partitions. It is a wasteful event
+ * as usable DPA gets thrown away, but if a deployment has, for example,
+ * a dual RAM+PMEM device, wants to use PMEM, and has unallocated RAM
+ * DPA, the free RAM DPA must be sacrificed to start allocating PMEM.
+ * See third "Implementation Note" in CXL 3.1 8.2.4.19.13 "Decoder
+ * Protection" for more details.
+ *
+ * A 'skip' always covers the last allocated DPA in a previous partition
+ * to the start of the current partition to allocate. Allocations never
+ * start in the middle of a partition, and allocations are always
+ * de-allocated in reverse order (see cxl_dpa_free(), or natural devm
+ * unwind order from forced in-order allocation).
+ *
+ * If @cxlds->nr_partitions was guaranteed to be <= 2 then the 'skip'
+ * would always be contained to a single partition. Given
+ * @cxlds->nr_partitions may be > 2 it results in cases where the 'skip'
+ * might span "tail capacity of partition[0], all of partition[1], ...,
+ * all of partition[N-1]" to support allocating from partition[N]. That
+ * in turn interacts with the partition 'struct resource' boundaries
+ * within @cxlds->dpa_res whereby 'skip' requests need to be divided by
+ * partition. I.e. this is a quirk of using a 'struct resource' tree to
+ * detect range conflicts while also tracking partition boundaries in
+ * @cxlds->dpa_res.
+ */
+static int request_skip(struct cxl_dev_state *cxlds,
+ struct cxl_endpoint_decoder *cxled,
+ const resource_size_t skip_base,
+ const resource_size_t skip_len)
+{
+ resource_size_t skipped = __adjust_skip(cxlds, skip_base, skip_len,
+ dev_name(&cxled->cxld.dev));
+
+ if (skipped == skip_len)
+ return 0;
+
+ dev_dbg(cxlds->dev,
+ "%s: failed to reserve skipped space (%pa %pa %pa)\n",
+ dev_name(&cxled->cxld.dev), &skip_base, &skip_len, &skipped);
+
+ release_skip(cxlds, skip_base, skipped);
+
+ return -EBUSY;
+}
+
static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped)
@@ -216,11 +352,15 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &port->dev;
struct resource *res;
+ int rc;
- lockdep_assert_held_write(&cxl_dpa_rwsem);
+ lockdep_assert_held_write(&cxl_rwsem.dpa);
- if (!len)
- goto success;
+ if (!len) {
+ dev_warn(dev, "decoder%d.%d: empty reservation attempted\n",
+ port->id, cxled->cxld.id);
+ return -EINVAL;
+ }
if (cxled->dpa_res) {
dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n",
@@ -241,14 +381,9 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
}
if (skipped) {
- res = __request_region(&cxlds->dpa_res, base - skipped, skipped,
- dev_name(&cxled->cxld.dev), 0);
- if (!res) {
- dev_dbg(dev,
- "decoder%d.%d: failed to reserve skipped space\n",
- port->id, cxled->cxld.id);
- return -EBUSY;
- }
+ rc = request_skip(cxlds, cxled, base - skipped, skipped);
+ if (rc)
+ return rc;
}
res = __request_region(&cxlds->dpa_res, base, len,
dev_name(&cxled->cxld.dev), 0);
@@ -256,229 +391,295 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n",
port->id, cxled->cxld.id);
if (skipped)
- __release_region(&cxlds->dpa_res, base - skipped,
- skipped);
+ release_skip(cxlds, base - skipped, skipped);
return -EBUSY;
}
cxled->dpa_res = res;
cxled->skip = skipped;
- if (resource_contains(&cxlds->pmem_res, res))
- cxled->mode = CXL_DECODER_PMEM;
- else if (resource_contains(&cxlds->ram_res, res))
- cxled->mode = CXL_DECODER_RAM;
- else {
- dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
- cxled->cxld.id, cxled->dpa_res);
- cxled->mode = CXL_DECODER_MIXED;
- }
+ /*
+ * When allocating new capacity, ->part is already set, when
+ * discovering decoder settings at initial enumeration, ->part
+ * is not set.
+ */
+ if (cxled->part < 0)
+ for (int i = 0; cxlds->nr_partitions; i++)
+ if (resource_contains(&cxlds->part[i].res, res)) {
+ cxled->part = i;
+ break;
+ }
+
+ if (cxled->part < 0)
+ dev_warn(dev, "decoder%d.%d: %pr does not map any partition\n",
+ port->id, cxled->cxld.id, res);
-success:
port->hdm_end++;
get_device(&cxled->cxld.dev);
return 0;
}
-static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
+static int add_dpa_res(struct device *dev, struct resource *parent,
+ struct resource *res, resource_size_t start,
+ resource_size_t size, const char *type)
+{
+ int rc;
+
+ *res = (struct resource) {
+ .name = type,
+ .start = start,
+ .end = start + size - 1,
+ .flags = IORESOURCE_MEM,
+ };
+ if (resource_size(res) == 0) {
+ dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
+ return 0;
+ }
+ rc = request_resource(parent, res);
+ if (rc) {
+ dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
+ res, rc);
+ return rc;
+ }
+
+ dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
+
+ return 0;
+}
+
+static const char *cxl_mode_name(enum cxl_partition_mode mode)
+{
+ switch (mode) {
+ case CXL_PARTMODE_RAM:
+ return "ram";
+ case CXL_PARTMODE_PMEM:
+ return "pmem";
+ default:
+ return "";
+ };
+}
+
+/* if this fails the caller must destroy @cxlds, there is no recovery */
+int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info)
+{
+ struct device *dev = cxlds->dev;
+
+ guard(rwsem_write)(&cxl_rwsem.dpa);
+
+ if (cxlds->nr_partitions)
+ return -EBUSY;
+
+ if (!info->size || !info->nr_partitions) {
+ cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
+ cxlds->nr_partitions = 0;
+ return 0;
+ }
+
+ cxlds->dpa_res = DEFINE_RES_MEM(0, info->size);
+
+ for (int i = 0; i < info->nr_partitions; i++) {
+ const struct cxl_dpa_part_info *part = &info->part[i];
+ int rc;
+
+ cxlds->part[i].perf.qos_class = CXL_QOS_CLASS_INVALID;
+ cxlds->part[i].mode = part->mode;
+
+ /* Require ordered + contiguous partitions */
+ if (i) {
+ const struct cxl_dpa_part_info *prev = &info->part[i - 1];
+
+ if (prev->range.end + 1 != part->range.start)
+ return -EINVAL;
+ }
+ rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->part[i].res,
+ part->range.start, range_len(&part->range),
+ cxl_mode_name(part->mode));
+ if (rc)
+ return rc;
+ cxlds->nr_partitions++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_dpa_setup);
+
+int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
resource_size_t base, resource_size_t len,
resource_size_t skipped)
{
struct cxl_port *port = cxled_to_port(cxled);
int rc;
- down_write(&cxl_dpa_rwsem);
- rc = __cxl_dpa_reserve(cxled, base, len, skipped);
- up_write(&cxl_dpa_rwsem);
+ scoped_guard(rwsem_write, &cxl_rwsem.dpa)
+ rc = __cxl_dpa_reserve(cxled, base, len, skipped);
if (rc)
return rc;
return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled);
}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL");
resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
{
- resource_size_t size = 0;
-
- down_read(&cxl_dpa_rwsem);
+ guard(rwsem_read)(&cxl_rwsem.dpa);
if (cxled->dpa_res)
- size = resource_size(cxled->dpa_res);
- up_read(&cxl_dpa_rwsem);
+ return resource_size(cxled->dpa_res);
- return size;
+ return 0;
}
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
{
resource_size_t base = -1;
- down_read(&cxl_dpa_rwsem);
+ lockdep_assert_held(&cxl_rwsem.dpa);
if (cxled->dpa_res)
base = cxled->dpa_res->start;
- up_read(&cxl_dpa_rwsem);
return base;
}
+bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr)
+{
+ struct resource _addr = DEFINE_RES_MEM(addr, 1);
+
+ return resource_contains(res, &_addr);
+}
+
int cxl_dpa_free(struct cxl_endpoint_decoder *cxled)
{
struct cxl_port *port = cxled_to_port(cxled);
struct device *dev = &cxled->cxld.dev;
- int rc;
- down_write(&cxl_dpa_rwsem);
- if (!cxled->dpa_res) {
- rc = 0;
- goto out;
- }
+ guard(rwsem_write)(&cxl_rwsem.dpa);
+ if (!cxled->dpa_res)
+ return 0;
if (cxled->cxld.region) {
dev_dbg(dev, "decoder assigned to: %s\n",
dev_name(&cxled->cxld.region->dev));
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
dev_dbg(dev, "decoder enabled\n");
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
if (cxled->cxld.id != port->hdm_end) {
dev_dbg(dev, "expected decoder%d.%d\n", port->id,
port->hdm_end);
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
+
devm_cxl_dpa_release(cxled);
- rc = 0;
-out:
- up_write(&cxl_dpa_rwsem);
- return rc;
+ return 0;
}
-int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
- enum cxl_decoder_mode mode)
+int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
+ enum cxl_partition_mode mode)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &cxled->cxld.dev;
- int rc;
+ int part;
- switch (mode) {
- case CXL_DECODER_RAM:
- case CXL_DECODER_PMEM:
- break;
- default:
+ guard(rwsem_write)(&cxl_rwsem.dpa);
+ if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
+ return -EBUSY;
+
+ for (part = 0; part < cxlds->nr_partitions; part++)
+ if (cxlds->part[part].mode == mode)
+ break;
+
+ if (part >= cxlds->nr_partitions) {
dev_dbg(dev, "unsupported mode: %d\n", mode);
return -EINVAL;
}
- down_write(&cxl_dpa_rwsem);
- if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
- rc = -EBUSY;
- goto out;
- }
-
- /*
- * Only allow modes that are supported by the current partition
- * configuration
- */
- if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
- dev_dbg(dev, "no available pmem capacity\n");
- rc = -ENXIO;
- goto out;
- }
- if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
- dev_dbg(dev, "no available ram capacity\n");
- rc = -ENXIO;
- goto out;
+ if (!resource_size(&cxlds->part[part].res)) {
+ dev_dbg(dev, "no available capacity for mode: %d\n", mode);
+ return -ENXIO;
}
- cxled->mode = mode;
- rc = 0;
-out:
- up_write(&cxl_dpa_rwsem);
-
- return rc;
+ cxled->part = part;
+ return 0;
}
-int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
{
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
- resource_size_t free_ram_start, free_pmem_start;
- struct cxl_port *port = cxled_to_port(cxled);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &cxled->cxld.dev;
- resource_size_t start, avail, skip;
+ struct resource *res, *prev = NULL;
+ resource_size_t start, avail, skip, skip_start;
struct resource *p, *last;
- int rc;
+ int part;
- down_write(&cxl_dpa_rwsem);
+ guard(rwsem_write)(&cxl_rwsem.dpa);
if (cxled->cxld.region) {
dev_dbg(dev, "decoder attached to %s\n",
dev_name(&cxled->cxld.region->dev));
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
dev_dbg(dev, "decoder enabled\n");
- rc = -EBUSY;
- goto out;
+ return -EBUSY;
}
- for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
- last = p;
- if (last)
- free_ram_start = last->end + 1;
- else
- free_ram_start = cxlds->ram_res.start;
+ part = cxled->part;
+ if (part < 0) {
+ dev_dbg(dev, "partition not set\n");
+ return -EBUSY;
+ }
- for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
+ res = &cxlds->part[part].res;
+ for (p = res->child, last = NULL; p; p = p->sibling)
last = p;
if (last)
- free_pmem_start = last->end + 1;
+ start = last->end + 1;
else
- free_pmem_start = cxlds->pmem_res.start;
-
- if (cxled->mode == CXL_DECODER_RAM) {
- start = free_ram_start;
- avail = cxlds->ram_res.end - start + 1;
- skip = 0;
- } else if (cxled->mode == CXL_DECODER_PMEM) {
- resource_size_t skip_start, skip_end;
+ start = res->start;
- start = free_pmem_start;
- avail = cxlds->pmem_res.end - start + 1;
- skip_start = free_ram_start;
-
- /*
- * If some pmem is already allocated, then that allocation
- * already handled the skip.
- */
- if (cxlds->pmem_res.child &&
- skip_start == cxlds->pmem_res.child->start)
- skip_end = skip_start - 1;
- else
- skip_end = start - 1;
- skip = skip_end - skip_start + 1;
- } else {
- dev_dbg(dev, "mode not set\n");
- rc = -EINVAL;
- goto out;
+ /*
+ * To allocate at partition N, a skip needs to be calculated for all
+ * unallocated space at lower partitions indices.
+ *
+ * If a partition has any allocations, the search can end because a
+ * previous cxl_dpa_alloc() invocation is assumed to have accounted for
+ * all previous partitions.
+ */
+ skip_start = CXL_RESOURCE_NONE;
+ for (int i = part; i; i--) {
+ prev = &cxlds->part[i - 1].res;
+ for (p = prev->child, last = NULL; p; p = p->sibling)
+ last = p;
+ if (last) {
+ skip_start = last->end + 1;
+ break;
+ }
+ skip_start = prev->start;
}
+ avail = res->end - start + 1;
+ if (skip_start == CXL_RESOURCE_NONE)
+ skip = 0;
+ else
+ skip = res->start - skip_start;
+
if (size > avail) {
- dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
- cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
- &avail);
- rc = -ENOSPC;
- goto out;
+ dev_dbg(dev, "%llu exceeds available %s capacity: %llu\n", size,
+ res->name, (u64)avail);
+ return -ENOSPC;
}
- rc = __cxl_dpa_reserve(cxled, start, size, skip);
-out:
- up_write(&cxl_dpa_rwsem);
+ return __cxl_dpa_reserve(cxled, start, size, skip);
+}
+
+int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size)
+{
+ struct cxl_port *port = cxled_to_port(cxled);
+ int rc;
+ rc = __cxl_dpa_alloc(cxled, size);
if (rc)
return rc;
@@ -509,21 +710,16 @@ static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl)
static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
{
- u32p_replace_bits(ctrl, !!(cxld->target_type == 3),
- CXL_HDM_DECODER0_CTRL_TYPE);
+ u32p_replace_bits(ctrl,
+ !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM),
+ CXL_HDM_DECODER0_CTRL_HOSTONLY);
}
-static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
+static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
{
struct cxl_dport **t = &cxlsd->target[0];
int ways = cxlsd->cxld.interleave_ways;
- if (dev_WARN_ONCE(&cxlsd->cxld.dev,
- ways > 8 || ways > cxlsd->nr_targets,
- "ways: %d overflows targets: %d\n", ways,
- cxlsd->nr_targets))
- return -ENXIO;
-
*tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
if (ways > 1)
*tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
@@ -539,8 +735,6 @@ static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
*tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
if (ways > 7)
*tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
-
- return 0;
}
/*
@@ -569,26 +763,12 @@ static int cxld_await_commit(void __iomem *hdm, int id)
return -ETIMEDOUT;
}
-static int cxl_decoder_commit(struct cxl_decoder *cxld)
+static void setup_hw_decoder(struct cxl_decoder *cxld, void __iomem *hdm)
{
- struct cxl_port *port = to_cxl_port(cxld->dev.parent);
- struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
- void __iomem *hdm = cxlhdm->regs.hdm_decoder;
- int id = cxld->id, rc;
+ int id = cxld->id;
u64 base, size;
u32 ctrl;
- if (cxld->flags & CXL_DECODER_F_ENABLE)
- return 0;
-
- if (port->commit_end + 1 != id) {
- dev_dbg(&port->dev,
- "%s: out of order commit, expected decoder%d.%d\n",
- dev_name(&cxld->dev), port->id, port->commit_end + 1);
- return -EBUSY;
- }
-
- down_read(&cxl_dpa_rwsem);
/* common decoder settings */
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
cxld_set_interleave(cxld, &ctrl);
@@ -608,13 +788,7 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
u64 targets;
- rc = cxlsd_set_targets(cxlsd, &targets);
- if (rc) {
- dev_dbg(&port->dev, "%s: target configuration error\n",
- dev_name(&cxld->dev));
- goto err;
- }
-
+ cxlsd_set_targets(cxlsd, &targets);
writel(upper_32_bits(targets), tl_hi);
writel(lower_32_bits(targets), tl_lo);
} else {
@@ -628,11 +802,50 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
}
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
- up_read(&cxl_dpa_rwsem);
+}
+
+static int cxl_decoder_commit(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+ struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ int id = cxld->id, rc;
+
+ if (cxld->flags & CXL_DECODER_F_ENABLE)
+ return 0;
+
+ if (cxl_num_decoders_committed(port) != id) {
+ dev_dbg(&port->dev,
+ "%s: out of order commit, expected decoder%d.%d\n",
+ dev_name(&cxld->dev), port->id,
+ cxl_num_decoders_committed(port));
+ return -EBUSY;
+ }
+
+ /*
+ * For endpoint decoders hosted on CXL memory devices that
+ * support the sanitize operation, make sure sanitize is not in-flight.
+ */
+ if (is_endpoint_decoder(&cxld->dev)) {
+ struct cxl_endpoint_decoder *cxled =
+ to_cxl_endpoint_decoder(&cxld->dev);
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_memdev_state *mds =
+ to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (mds && mds->security.sanitize_active) {
+ dev_dbg(&cxlmd->dev,
+ "attempted to commit %s during sanitize\n",
+ dev_name(&cxld->dev));
+ return -EBUSY;
+ }
+ }
+
+ scoped_guard(rwsem_read, &cxl_rwsem.dpa)
+ setup_hw_decoder(cxld, hdm);
port->commit_end++;
rc = cxld_await_commit(hdm, cxld->id);
-err:
if (rc) {
dev_dbg(&port->dev, "%s: error %d committing decoder\n",
dev_name(&cxld->dev), rc);
@@ -644,7 +857,44 @@ err:
return 0;
}
-static int cxl_decoder_reset(struct cxl_decoder *cxld)
+static int commit_reap(struct device *dev, void *data)
+{
+ struct cxl_port *port = to_cxl_port(dev->parent);
+ struct cxl_decoder *cxld;
+
+ if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev))
+ return 0;
+
+ cxld = to_cxl_decoder(dev);
+ if (port->commit_end == cxld->id &&
+ ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
+ port->commit_end--;
+ dev_dbg(&port->dev, "reap: %s commit_end: %d\n",
+ dev_name(&cxld->dev), port->commit_end);
+ }
+
+ return 0;
+}
+
+void cxl_port_commit_reap(struct cxl_decoder *cxld)
+{
+ struct cxl_port *port = to_cxl_port(cxld->dev.parent);
+
+ lockdep_assert_held_write(&cxl_rwsem.region);
+
+ /*
+ * Once the highest committed decoder is disabled, free any other
+ * decoders that were pinned allocated by out-of-order release.
+ */
+ port->commit_end--;
+ dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev),
+ port->commit_end);
+ device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL,
+ commit_reap);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, "CXL");
+
+static void cxl_decoder_reset(struct cxl_decoder *cxld)
{
struct cxl_port *port = to_cxl_port(cxld->dev.parent);
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
@@ -653,16 +903,18 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
u32 ctrl;
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
- return 0;
+ return;
- if (port->commit_end != id) {
+ if (test_bit(CXL_DECODER_F_LOCK, &cxld->flags))
+ return;
+
+ if (port->commit_end == id)
+ cxl_port_commit_reap(cxld);
+ else
dev_dbg(&port->dev,
"%s: out of order reset, expected decoder%d.%d\n",
dev_name(&cxld->dev), port->id, port->commit_end);
- return -EBUSY;
- }
- down_read(&cxl_dpa_rwsem);
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT;
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id));
@@ -671,20 +923,65 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id));
writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id));
writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id));
- up_read(&cxl_dpa_rwsem);
- port->commit_end--;
cxld->flags &= ~CXL_DECODER_F_ENABLE;
+ /* Userspace is now responsible for reconfiguring this decoder */
+ if (is_endpoint_decoder(&cxld->dev)) {
+ struct cxl_endpoint_decoder *cxled;
+
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
+ cxled->state = CXL_DECODER_STATE_MANUAL;
+ }
+}
+
+static int cxl_setup_hdm_decoder_from_dvsec(
+ struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
+ int which, struct cxl_endpoint_dvsec_info *info)
+{
+ struct cxl_endpoint_decoder *cxled;
+ u64 len;
+ int rc;
+
+ if (!is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
+ len = range_len(&info->dvsec_range[which]);
+ if (!len)
+ return -ENOENT;
+
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
+ cxld->commit = NULL;
+ cxld->reset = NULL;
+ cxld->hpa_range = info->dvsec_range[which];
+
+ /*
+ * Set the emulated decoder as locked pending additional support to
+ * change the range registers at run time.
+ */
+ cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
+ port->commit_end = cxld->id;
+
+ rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
+ if (rc) {
+ dev_err(&port->dev,
+ "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
+ port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
+ return rc;
+ }
+ *dpa_base += len;
+ cxled->state = CXL_DECODER_STATE_AUTO;
+
return 0;
}
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map, void __iomem *hdm, int which,
- u64 *dpa_base)
+ void __iomem *hdm, int which,
+ u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
{
struct cxl_endpoint_decoder *cxled = NULL;
- u64 size, base, skip, dpa_size;
+ u64 size, base, skip, dpa_size, lo, hi;
bool committed;
u32 remainder;
int i, rc;
@@ -694,12 +991,17 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
unsigned char target_id[8];
} target_list;
- if (is_endpoint_decoder(&cxld->dev))
- cxled = to_cxl_endpoint_decoder(&cxld->dev);
+ if (should_emulate_decoders(info))
+ return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
+ which, info);
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
- base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
- size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
+ lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
+ hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which));
+ base = (hi << 32) + lo;
+ lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which));
+ hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which));
+ size = (hi << 32) + lo;
committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED);
cxld->commit = cxl_decoder_commit;
cxld->reset = cxl_decoder_reset;
@@ -712,6 +1014,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
return -ENXIO;
}
+ if (info)
+ cxled = to_cxl_endpoint_decoder(&cxld->dev);
cxld->hpa_range = (struct range) {
.start = base,
.end = base + size - 1,
@@ -722,24 +1026,49 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
cxld->flags |= CXL_DECODER_F_ENABLE;
if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK)
cxld->flags |= CXL_DECODER_F_LOCK;
- if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl))
- cxld->target_type = CXL_DECODER_EXPANDER;
+ if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl))
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
else
- cxld->target_type = CXL_DECODER_ACCELERATOR;
- if (cxld->id != port->commit_end + 1) {
+ cxld->target_type = CXL_DECODER_DEVMEM;
+
+ guard(rwsem_write)(&cxl_rwsem.region);
+ if (cxld->id != cxl_num_decoders_committed(port)) {
dev_warn(&port->dev,
"decoder%d.%d: Committed out of order\n",
port->id, cxld->id);
return -ENXIO;
}
+
+ if (size == 0) {
+ dev_warn(&port->dev,
+ "decoder%d.%d: Committed with zero size\n",
+ port->id, cxld->id);
+ return -ENXIO;
+ }
port->commit_end = cxld->id;
} else {
- /* unless / until type-2 drivers arrive, assume type-3 */
- if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) {
- ctrl |= CXL_HDM_DECODER0_CTRL_TYPE;
+ if (cxled) {
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
+ /*
+ * Default by devtype until a device arrives that needs
+ * more precision.
+ */
+ if (cxlds->type == CXL_DEVTYPE_CLASSMEM)
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
+ else
+ cxld->target_type = CXL_DECODER_DEVMEM;
+ } else {
+ /* To be overridden by region type at commit time */
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
+ }
+
+ if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) &&
+ cxld->target_type == CXL_DECODER_HOSTONLYMEM) {
+ ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY;
writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
}
- cxld->target_type = CXL_DECODER_EXPANDER;
}
rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl),
&cxld->interleave_ways);
@@ -751,14 +1080,23 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
}
rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
&cxld->interleave_granularity);
- if (rc)
+ if (rc) {
+ dev_warn(&port->dev,
+ "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n",
+ port->id, cxld->id, ctrl);
return rc;
+ }
+
+ dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
+ port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
+ cxld->interleave_ways, cxld->interleave_granularity);
if (!cxled) {
- target_list.value =
- ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
+ lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which));
+ hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
+ target_list.value = (hi << 32) + lo;
for (i = 0; i < cxld->interleave_ways; i++)
- target_map[i] = target_list.target_id[i];
+ cxld->target_map[i] = target_list.target_id[i];
return 0;
}
@@ -773,7 +1111,9 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
port->id, cxld->id, size, cxld->interleave_ways);
return -ENXIO;
}
- skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
+ lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
+ hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which));
+ skip = (hi << 32) + lo;
rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
if (rc) {
dev_err(&port->dev,
@@ -783,21 +1123,21 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
return rc;
}
*dpa_base += dpa_size + skip;
+
+ cxled->state = CXL_DECODER_STATE_AUTO;
+
return 0;
}
-/**
- * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
- * @cxlhdm: Structure to populate with HDM capabilities
- */
-int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
+static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
{
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
- struct cxl_port *port = cxlhdm->port;
- int i, committed;
- u64 dpa_base = 0;
+ int committed, i;
u32 ctrl;
+ if (!hdm)
+ return;
+
/*
* Since the register resource was recently claimed via request_region()
* be careful about trusting the "not-committed" status until the commit
@@ -814,9 +1154,24 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
/* ensure that future checks of committed can be trusted */
if (committed != cxlhdm->decoder_count)
msleep(20);
+}
+
+/**
+ * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
+ * @cxlhdm: Structure to populate with HDM capabilities
+ * @info: cached DVSEC range register info
+ */
+static int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info)
+{
+ void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+ struct cxl_port *port = cxlhdm->port;
+ int i;
+ u64 dpa_base = 0;
+
+ cxl_settle_decoders(cxlhdm);
for (i = 0; i < cxlhdm->decoder_count; i++) {
- int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
int rc, target_count = cxlhdm->target_count;
struct cxl_decoder *cxld;
@@ -826,7 +1181,8 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
cxled = cxl_endpoint_decoder_alloc(port);
if (IS_ERR(cxled)) {
dev_warn(&port->dev,
- "Failed to allocate the decoder\n");
+ "Failed to allocate decoder%d.%d\n",
+ port->id, i);
return PTR_ERR(cxled);
}
cxld = &cxled->cxld;
@@ -836,25 +1192,96 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm)
cxlsd = cxl_switch_decoder_alloc(port, target_count);
if (IS_ERR(cxlsd)) {
dev_warn(&port->dev,
- "Failed to allocate the decoder\n");
+ "Failed to allocate decoder%d.%d\n",
+ port->id, i);
return PTR_ERR(cxlsd);
}
cxld = &cxlsd->cxld;
}
- rc = init_hdm_decoder(port, cxld, target_map, hdm, i, &dpa_base);
+ rc = init_hdm_decoder(port, cxld, hdm, i, &dpa_base, info);
if (rc) {
+ dev_warn(&port->dev,
+ "Failed to initialize decoder%d.%d\n",
+ port->id, i);
put_device(&cxld->dev);
return rc;
}
- rc = add_hdm_decoder(port, cxld, target_map);
+ rc = add_hdm_decoder(port, cxld);
if (rc) {
dev_warn(&port->dev,
- "Failed to add decoder to port\n");
+ "Failed to add decoder%d.%d\n", port->id, i);
return rc;
}
}
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL);
+
+/**
+ * __devm_cxl_switch_port_decoders_setup - allocate and setup switch decoders
+ * @port: CXL port context
+ *
+ * Return 0 or -errno on error
+ */
+int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_hdm *cxlhdm;
+
+ if (is_cxl_root(port) || is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ cxlhdm = devm_cxl_setup_hdm(port, NULL);
+ if (!IS_ERR(cxlhdm))
+ return devm_cxl_enumerate_decoders(cxlhdm, NULL);
+
+ if (PTR_ERR(cxlhdm) != -ENODEV) {
+ dev_err(&port->dev, "Failed to map HDM decoder capability\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ if (cxl_port_get_possible_dports(port) == 1) {
+ dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
+ return devm_cxl_add_passthrough_decoder(port);
+ }
+
+ dev_err(&port->dev, "HDM decoder capability not found\n");
+ return -ENXIO;
+}
+EXPORT_SYMBOL_NS_GPL(__devm_cxl_switch_port_decoders_setup, "CXL");
+
+/**
+ * devm_cxl_endpoint_decoders_setup - allocate and setup endpoint decoders
+ * @port: CXL port context
+ *
+ * Return 0 or -errno on error
+ */
+int devm_cxl_endpoint_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+ struct cxl_endpoint_dvsec_info info = { .port = port };
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_hdm *cxlhdm;
+ int rc;
+
+ if (!is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ rc = cxl_dvsec_rr_decode(cxlds, &info);
+ if (rc < 0)
+ return rc;
+
+ cxlhdm = devm_cxl_setup_hdm(port, &info);
+ if (IS_ERR(cxlhdm)) {
+ if (PTR_ERR(cxlhdm) == -ENODEV)
+ dev_err(&port->dev, "HDM decoder registers not found\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
+ if (rc)
+ return rc;
+
+ return devm_cxl_enumerate_decoders(cxlhdm, &info);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_endpoint_decoders_setup, "CXL");