diff options
Diffstat (limited to 'drivers')
203 files changed, 5791 insertions, 2724 deletions
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index a1a99ec3f12c..712624cba2b6 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -335,6 +335,63 @@ static int add_or_reset_cxl_resource(struct resource *parent, struct resource *r return rc; } +static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd) +{ + struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; + struct range *hpa = &cxld->hpa_range; + resource_size_t size = range_len(hpa); + resource_size_t start = hpa->start; + resource_size_t cache_size; + struct resource res; + int nid, rc; + + res = DEFINE_RES(start, size, 0); + nid = phys_to_target_node(start); + + rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size); + if (rc) + return rc; + + /* + * The cache range is expected to be within the CFMWS. + * Currently there is only support cache_size == cxl_size. CXL + * size is then half of the total CFMWS window size. + */ + size = size >> 1; + if (cache_size && size != cache_size) { + dev_warn(&cxld->dev, + "Extended Linear Cache size %pa != CXL size %pa. No Support!", + &cache_size, &size); + return -ENXIO; + } + + cxlrd->cache_size = cache_size; + + return 0; +} + +static void cxl_setup_extended_linear_cache(struct cxl_root_decoder *cxlrd) +{ + int rc; + + rc = cxl_acpi_set_cache_size(cxlrd); + if (!rc) + return; + + if (rc != -EOPNOTSUPP) { + /* + * Failing to support extended linear cache region resize does not + * prevent the region from functioning. Only causes cxl list showing + * incorrect region size. + */ + dev_warn(cxlrd->cxlsd.cxld.dev.parent, + "Extended linear cache calculation failed rc:%d\n", rc); + } + + /* Ignoring return code */ + cxlrd->cache_size = 0; +} + DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *, if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev)) DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T)) @@ -394,6 +451,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, ig = CXL_DECODER_MIN_GRANULARITY; cxld->interleave_granularity = ig; + cxl_setup_extended_linear_cache(cxlrd); + if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) { if (ways != 1 && ways != 3) { cxims_ctx = (struct cxl_cxims_context) { diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile index 79e2ef81fde8..5ad8fef210b5 100644 --- a/drivers/cxl/core/Makefile +++ b/drivers/cxl/core/Makefile @@ -15,7 +15,6 @@ cxl_core-y += hdm.o cxl_core-y += pmu.o cxl_core-y += cdat.o cxl_core-y += ras.o -cxl_core-y += acpi.o cxl_core-$(CONFIG_TRACING) += trace.o cxl_core-$(CONFIG_CXL_REGION) += region.o cxl_core-$(CONFIG_CXL_MCE) += mce.o diff --git a/drivers/cxl/core/acpi.c b/drivers/cxl/core/acpi.c deleted file mode 100644 index f13b4dae6ac5..000000000000 --- a/drivers/cxl/core/acpi.c +++ /dev/null @@ -1,11 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* Copyright(c) 2024 Intel Corporation. All rights reserved. */ -#include <linux/acpi.h> -#include "cxl.h" -#include "core.h" - -int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res, - int nid, resource_size_t *size) -{ - return hmat_get_extended_linear_cache_size(backing_res, nid, size); -} diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c index 0ccef2f2a26a..c0af645425f4 100644 --- a/drivers/cxl/core/cdat.c +++ b/drivers/cxl/core/cdat.c @@ -336,7 +336,7 @@ static int match_cxlrd_hb(struct device *dev, void *data) cxlrd = to_cxl_root_decoder(dev); cxlsd = &cxlrd->cxlsd; - guard(rwsem_read)(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_rwsem.region); for (int i = 0; i < cxlsd->nr_targets; i++) { if (host_bridge == cxlsd->target[i]->dport_dev) return 1; @@ -987,7 +987,7 @@ void cxl_region_shared_upstream_bandwidth_update(struct cxl_region *cxlr) bool is_root; int rc; - lockdep_assert_held(&cxl_dpa_rwsem); + lockdep_assert_held(&cxl_rwsem.dpa); struct xarray *usp_xa __free(free_perf_xa) = kzalloc(sizeof(*usp_xa), GFP_KERNEL); @@ -1057,7 +1057,7 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr, { struct cxl_dpa_perf *perf; - lockdep_assert_held(&cxl_dpa_rwsem); + lockdep_assert_held(&cxl_rwsem.dpa); perf = cxled_get_dpa_perf(cxled); if (IS_ERR(perf)) diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 29b61828a847..2669f251d677 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -5,6 +5,7 @@ #define __CXL_CORE_H__ #include <cxl/mailbox.h> +#include <linux/rwsem.h> extern const struct device_type cxl_nvdimm_bridge_type; extern const struct device_type cxl_nvdimm_type; @@ -12,6 +13,11 @@ extern const struct device_type cxl_pmu_type; extern struct attribute_group cxl_base_attribute_group; +enum cxl_detach_mode { + DETACH_ONLY, + DETACH_INVALIDATE, +}; + #ifdef CONFIG_CXL_REGION extern struct device_attribute dev_attr_create_pmem_region; extern struct device_attribute dev_attr_create_ram_region; @@ -20,7 +26,11 @@ extern struct device_attribute dev_attr_region; extern const struct device_type cxl_pmem_region_type; extern const struct device_type cxl_dax_region_type; extern const struct device_type cxl_region_type; -void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled); + +int cxl_decoder_detach(struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled, int pos, + enum cxl_detach_mode mode); + #define CXL_REGION_ATTR(x) (&dev_attr_##x.attr) #define CXL_REGION_TYPE(x) (&cxl_region_type) #define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr), @@ -48,8 +58,11 @@ static inline int cxl_get_poison_by_endpoint(struct cxl_port *port) { return 0; } -static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled) +static inline int cxl_decoder_detach(struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled, + int pos, enum cxl_detach_mode mode) { + return 0; } static inline int cxl_region_init(void) { @@ -80,6 +93,7 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size); int cxl_dpa_free(struct cxl_endpoint_decoder *cxled); resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled); resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled); +bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr); enum cxl_rcrb { CXL_RCRB_DOWNSTREAM, @@ -96,8 +110,20 @@ u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb); #define PCI_RCRB_CAP_HDR_NEXT_MASK GENMASK(15, 8) #define PCI_CAP_EXP_SIZEOF 0x3c -extern struct rw_semaphore cxl_dpa_rwsem; -extern struct rw_semaphore cxl_region_rwsem; +struct cxl_rwsem { + /* + * All changes to HPA (interleave configuration) occur with this + * lock held for write. + */ + struct rw_semaphore region; + /* + * All changes to a device DPA space occur with this lock held + * for write. + */ + struct rw_semaphore dpa; +}; + +extern struct cxl_rwsem cxl_rwsem; int cxl_memdev_init(void); void cxl_memdev_exit(void); @@ -120,8 +146,6 @@ int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port, int cxl_ras_init(void); void cxl_ras_exit(void); int cxl_gpf_port_setup(struct cxl_dport *dport); -int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res, - int nid, resource_size_t *size); #ifdef CONFIG_CXL_FEATURES struct cxl_feat_entry * diff --git a/drivers/cxl/core/edac.c b/drivers/cxl/core/edac.c index 623aaa4439c4..79994ca9bc9f 100644 --- a/drivers/cxl/core/edac.c +++ b/drivers/cxl/core/edac.c @@ -115,10 +115,9 @@ static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx, flags, min_cycle); } - struct rw_semaphore *region_lock __free(rwsem_read_release) = - rwsem_read_intr_acquire(&cxl_region_rwsem); - if (!region_lock) - return -EINTR; + ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region); + if ((ret = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) + return ret; cxlr = cxl_ps_ctx->cxlr; p = &cxlr->params; @@ -158,10 +157,9 @@ static int cxl_scrub_set_attrbs_region(struct device *dev, struct cxl_region *cxlr; int ret, i; - struct rw_semaphore *region_lock __free(rwsem_read_release) = - rwsem_read_intr_acquire(&cxl_region_rwsem); - if (!region_lock) - return -EINTR; + ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region); + if ((ret = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) + return ret; cxlr = cxl_ps_ctx->cxlr; p = &cxlr->params; @@ -697,7 +695,7 @@ static int cxl_set_ecs_threshold(struct device *dev, u8 *log_cap, u16 *config, ECS_THRESHOLD_IDX_4096); break; default: - dev_dbg(dev, "Invalid CXL ECS threshold count(%d) to set\n", + dev_dbg(dev, "Invalid CXL ECS threshold count(%u) to set\n", val); dev_dbg(dev, "Supported ECS threshold counts: %u, %u, %u\n", ECS_THRESHOLD_256, ECS_THRESHOLD_1024, @@ -1340,16 +1338,15 @@ cxl_mem_perform_sparing(struct device *dev, struct cxl_memdev_sparing_in_payload sparing_pi; struct cxl_event_dram *rec = NULL; u16 validity_flags = 0; + int ret; - struct rw_semaphore *region_lock __free(rwsem_read_release) = - rwsem_read_intr_acquire(&cxl_region_rwsem); - if (!region_lock) - return -EINTR; + ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region); + if ((ret = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem))) + return ret; - struct rw_semaphore *dpa_lock __free(rwsem_read_release) = - rwsem_read_intr_acquire(&cxl_dpa_rwsem); - if (!dpa_lock) - return -EINTR; + ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa); + if ((ret = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem))) + return ret; if (!cxl_sparing_ctx->cap_safe_when_in_use) { /* Memory to repair must be offline */ @@ -1523,7 +1520,7 @@ static int cxl_mem_sparing_set_dpa(struct device *dev, void *drv_data, u64 dpa) struct cxl_memdev *cxlmd = ctx->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; - if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) + if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa)) return -EINVAL; ctx->dpa = dpa; @@ -1787,16 +1784,15 @@ static int cxl_mem_perform_ppr(struct cxl_ppr_context *cxl_ppr_ctx) struct cxl_memdev_ppr_maintenance_attrbs maintenance_attrbs; struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd; struct cxl_mem_repair_attrbs attrbs = { 0 }; + int ret; - struct rw_semaphore *region_lock __free(rwsem_read_release) = - rwsem_read_intr_acquire(&cxl_region_rwsem); - if (!region_lock) - return -EINTR; + ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region); + if ((ret = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem))) + return ret; - struct rw_semaphore *dpa_lock __free(rwsem_read_release) = - rwsem_read_intr_acquire(&cxl_dpa_rwsem); - if (!dpa_lock) - return -EINTR; + ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa); + if ((ret = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem))) + return ret; if (!cxl_ppr_ctx->media_accessible || !cxl_ppr_ctx->data_retained) { /* Memory to repair must be offline */ @@ -1892,7 +1888,7 @@ static int cxl_ppr_set_dpa(struct device *dev, void *drv_data, u64 dpa) struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd; struct cxl_dev_state *cxlds = cxlmd->cxlds; - if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) + if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa)) return -EINVAL; cxl_ppr_ctx->dpa = dpa; @@ -1923,8 +1919,11 @@ static int cxl_ppr_set_nibble_mask(struct device *dev, void *drv_data, static int cxl_do_ppr(struct device *dev, void *drv_data, u32 val) { struct cxl_ppr_context *cxl_ppr_ctx = drv_data; + struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd; + struct cxl_dev_state *cxlds = cxlmd->cxlds; - if (!cxl_ppr_ctx->dpa || val != EDAC_DO_MEM_REPAIR) + if (val != EDAC_DO_MEM_REPAIR || + !cxl_resource_contains_addr(&cxlds->dpa_res, cxl_ppr_ctx->dpa)) return -EINVAL; return cxl_mem_perform_ppr(cxl_ppr_ctx); diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index ab1007495f6b..e9e1d555cec6 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -16,7 +16,10 @@ * for enumerating these registers and capabilities. */ -DECLARE_RWSEM(cxl_dpa_rwsem); +struct cxl_rwsem cxl_rwsem = { + .region = __RWSEM_INITIALIZER(cxl_rwsem.region), + .dpa = __RWSEM_INITIALIZER(cxl_rwsem.dpa), +}; static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, int *target_map) @@ -214,7 +217,7 @@ void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds) { struct resource *p1, *p2; - guard(rwsem_read)(&cxl_dpa_rwsem); + guard(rwsem_read)(&cxl_rwsem.dpa); for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) { __cxl_dpa_debug(file, p1, 0); for (p2 = p1->child; p2; p2 = p2->sibling) @@ -266,7 +269,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled) struct resource *res = cxled->dpa_res; resource_size_t skip_start; - lockdep_assert_held_write(&cxl_dpa_rwsem); + lockdep_assert_held_write(&cxl_rwsem.dpa); /* save @skip_start, before @res is released */ skip_start = res->start - cxled->skip; @@ -281,7 +284,7 @@ static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled) static void cxl_dpa_release(void *cxled) { - guard(rwsem_write)(&cxl_dpa_rwsem); + guard(rwsem_write)(&cxl_rwsem.dpa); __cxl_dpa_release(cxled); } @@ -293,7 +296,7 @@ static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled) { struct cxl_port *port = cxled_to_port(cxled); - lockdep_assert_held_write(&cxl_dpa_rwsem); + lockdep_assert_held_write(&cxl_rwsem.dpa); devm_remove_action(&port->dev, cxl_dpa_release, cxled); __cxl_dpa_release(cxled); } @@ -361,7 +364,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, struct resource *res; int rc; - lockdep_assert_held_write(&cxl_dpa_rwsem); + lockdep_assert_held_write(&cxl_rwsem.dpa); if (!len) { dev_warn(dev, "decoder%d.%d: empty reservation attempted\n", @@ -470,7 +473,7 @@ int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info) { struct device *dev = cxlds->dev; - guard(rwsem_write)(&cxl_dpa_rwsem); + guard(rwsem_write)(&cxl_rwsem.dpa); if (cxlds->nr_partitions) return -EBUSY; @@ -516,9 +519,8 @@ int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, struct cxl_port *port = cxled_to_port(cxled); int rc; - down_write(&cxl_dpa_rwsem); - rc = __cxl_dpa_reserve(cxled, base, len, skipped); - up_write(&cxl_dpa_rwsem); + scoped_guard(rwsem_write, &cxl_rwsem.dpa) + rc = __cxl_dpa_reserve(cxled, base, len, skipped); if (rc) return rc; @@ -529,7 +531,7 @@ EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, "CXL"); resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled) { - guard(rwsem_read)(&cxl_dpa_rwsem); + guard(rwsem_read)(&cxl_rwsem.dpa); if (cxled->dpa_res) return resource_size(cxled->dpa_res); @@ -540,19 +542,26 @@ resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled) { resource_size_t base = -1; - lockdep_assert_held(&cxl_dpa_rwsem); + lockdep_assert_held(&cxl_rwsem.dpa); if (cxled->dpa_res) base = cxled->dpa_res->start; return base; } +bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr) +{ + struct resource _addr = DEFINE_RES_MEM(addr, 1); + + return resource_contains(res, &_addr); +} + int cxl_dpa_free(struct cxl_endpoint_decoder *cxled) { struct cxl_port *port = cxled_to_port(cxled); struct device *dev = &cxled->cxld.dev; - guard(rwsem_write)(&cxl_dpa_rwsem); + guard(rwsem_write)(&cxl_rwsem.dpa); if (!cxled->dpa_res) return 0; if (cxled->cxld.region) { @@ -582,7 +591,7 @@ int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled, struct device *dev = &cxled->cxld.dev; int part; - guard(rwsem_write)(&cxl_dpa_rwsem); + guard(rwsem_write)(&cxl_rwsem.dpa); if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) return -EBUSY; @@ -614,7 +623,7 @@ static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size) struct resource *p, *last; int part; - guard(rwsem_write)(&cxl_dpa_rwsem); + guard(rwsem_write)(&cxl_rwsem.dpa); if (cxled->cxld.region) { dev_dbg(dev, "decoder attached to %s\n", dev_name(&cxled->cxld.region->dev)); @@ -764,46 +773,12 @@ static int cxld_await_commit(void __iomem *hdm, int id) return -ETIMEDOUT; } -static int cxl_decoder_commit(struct cxl_decoder *cxld) +static void setup_hw_decoder(struct cxl_decoder *cxld, void __iomem *hdm) { - struct cxl_port *port = to_cxl_port(cxld->dev.parent); - struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); - void __iomem *hdm = cxlhdm->regs.hdm_decoder; - int id = cxld->id, rc; + int id = cxld->id; u64 base, size; u32 ctrl; - if (cxld->flags & CXL_DECODER_F_ENABLE) - return 0; - - if (cxl_num_decoders_committed(port) != id) { - dev_dbg(&port->dev, - "%s: out of order commit, expected decoder%d.%d\n", - dev_name(&cxld->dev), port->id, - cxl_num_decoders_committed(port)); - return -EBUSY; - } - - /* - * For endpoint decoders hosted on CXL memory devices that - * support the sanitize operation, make sure sanitize is not in-flight. - */ - if (is_endpoint_decoder(&cxld->dev)) { - struct cxl_endpoint_decoder *cxled = - to_cxl_endpoint_decoder(&cxld->dev); - struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); - struct cxl_memdev_state *mds = - to_cxl_memdev_state(cxlmd->cxlds); - - if (mds && mds->security.sanitize_active) { - dev_dbg(&cxlmd->dev, - "attempted to commit %s during sanitize\n", - dev_name(&cxld->dev)); - return -EBUSY; - } - } - - down_read(&cxl_dpa_rwsem); /* common decoder settings */ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); cxld_set_interleave(cxld, &ctrl); @@ -837,7 +812,47 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld) } writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); - up_read(&cxl_dpa_rwsem); +} + +static int cxl_decoder_commit(struct cxl_decoder *cxld) +{ + struct cxl_port *port = to_cxl_port(cxld->dev.parent); + struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); + void __iomem *hdm = cxlhdm->regs.hdm_decoder; + int id = cxld->id, rc; + + if (cxld->flags & CXL_DECODER_F_ENABLE) + return 0; + + if (cxl_num_decoders_committed(port) != id) { + dev_dbg(&port->dev, + "%s: out of order commit, expected decoder%d.%d\n", + dev_name(&cxld->dev), port->id, + cxl_num_decoders_committed(port)); + return -EBUSY; + } + + /* + * For endpoint decoders hosted on CXL memory devices that + * support the sanitize operation, make sure sanitize is not in-flight. + */ + if (is_endpoint_decoder(&cxld->dev)) { + struct cxl_endpoint_decoder *cxled = + to_cxl_endpoint_decoder(&cxld->dev); + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_memdev_state *mds = + to_cxl_memdev_state(cxlmd->cxlds); + + if (mds && mds->security.sanitize_active) { + dev_dbg(&cxlmd->dev, + "attempted to commit %s during sanitize\n", + dev_name(&cxld->dev)); + return -EBUSY; + } + } + + scoped_guard(rwsem_read, &cxl_rwsem.dpa) + setup_hw_decoder(cxld, hdm); port->commit_end++; rc = cxld_await_commit(hdm, cxld->id); @@ -875,7 +890,7 @@ void cxl_port_commit_reap(struct cxl_decoder *cxld) { struct cxl_port *port = to_cxl_port(cxld->dev.parent); - lockdep_assert_held_write(&cxl_region_rwsem); + lockdep_assert_held_write(&cxl_rwsem.region); /* * Once the highest committed decoder is disabled, free any other @@ -907,7 +922,6 @@ static void cxl_decoder_reset(struct cxl_decoder *cxld) "%s: out of order reset, expected decoder%d.%d\n", dev_name(&cxld->dev), port->id, port->commit_end); - down_read(&cxl_dpa_rwsem); ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); @@ -916,7 +930,6 @@ static void cxl_decoder_reset(struct cxl_decoder *cxld) writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); - up_read(&cxl_dpa_rwsem); cxld->flags &= ~CXL_DECODER_F_ENABLE; @@ -1025,7 +1038,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, else cxld->target_type = CXL_DECODER_DEVMEM; - guard(rwsem_write)(&cxl_region_rwsem); + guard(rwsem_write)(&cxl_rwsem.region); if (cxld->id != cxl_num_decoders_committed(port)) { dev_warn(&port->dev, "decoder%d.%d: Committed out of order\n", diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 2689e6453c5a..fa6dd0c94656 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -899,6 +899,10 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd, trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic); return; } + if (event_type == CXL_CPER_EVENT_MEM_SPARING) { + trace_cxl_memory_sparing(cxlmd, type, &evt->mem_sparing); + return; + } if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) { u64 dpa, hpa = ULLONG_MAX, hpa_alias = ULLONG_MAX; @@ -909,8 +913,8 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd, * translations. Take topology mutation locks and lookup * { HPA, REGION } from { DPA, MEMDEV } in the event record. */ - guard(rwsem_read)(&cxl_region_rwsem); - guard(rwsem_read)(&cxl_dpa_rwsem); + guard(rwsem_read)(&cxl_rwsem.region); + guard(rwsem_read)(&cxl_rwsem.dpa); dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK; cxlr = cxl_dpa_to_region(cxlmd, dpa); @@ -926,12 +930,30 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd, if (cxl_store_rec_gen_media((struct cxl_memdev *)cxlmd, evt)) dev_dbg(&cxlmd->dev, "CXL store rec_gen_media failed\n"); + if (evt->gen_media.media_hdr.descriptor & + CXL_GMER_EVT_DESC_THRESHOLD_EVENT) + WARN_ON_ONCE((evt->gen_media.media_hdr.type & + CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) && + !get_unaligned_le24(evt->gen_media.cme_count)); + else + WARN_ON_ONCE(evt->gen_media.media_hdr.type & + CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE); + trace_cxl_general_media(cxlmd, type, cxlr, hpa, hpa_alias, &evt->gen_media); } else if (event_type == CXL_CPER_EVENT_DRAM) { if (cxl_store_rec_dram((struct cxl_memdev *)cxlmd, evt)) dev_dbg(&cxlmd->dev, "CXL store rec_dram failed\n"); + if (evt->dram.media_hdr.descriptor & + CXL_GMER_EVT_DESC_THRESHOLD_EVENT) + WARN_ON_ONCE((evt->dram.media_hdr.type & + CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) && + !get_unaligned_le24(evt->dram.cvme_count)); + else + WARN_ON_ONCE(evt->dram.media_hdr.type & + CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE); + trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias, &evt->dram); } @@ -952,6 +974,8 @@ static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd, ev_type = CXL_CPER_EVENT_DRAM; else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID)) ev_type = CXL_CPER_EVENT_MEM_MODULE; + else if (uuid_equal(uuid, &CXL_EVENT_MEM_SPARING_UUID)) + ev_type = CXL_CPER_EVENT_MEM_SPARING; cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event); } @@ -1265,7 +1289,7 @@ int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd) /* synchronize with cxl_mem_probe() and decoder write operations */ guard(device)(&cxlmd->dev); endpoint = cxlmd->endpoint; - guard(rwsem_read)(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_rwsem.region); /* * Require an endpoint to be safe otherwise the driver can not * be sure that the device is unmapped. @@ -1401,8 +1425,8 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, int nr_records = 0; int rc; - rc = mutex_lock_interruptible(&mds->poison.lock); - if (rc) + ACQUIRE(mutex_intr, lock)(&mds->poison.mutex); + if ((rc = ACQUIRE_ERR(mutex_intr, &lock))) return rc; po = mds->poison.list_out; @@ -1437,7 +1461,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, } } while (po->flags & CXL_POISON_FLAG_MORE); - mutex_unlock(&mds->poison.lock); return rc; } EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, "CXL"); @@ -1473,7 +1496,7 @@ int cxl_poison_state_init(struct cxl_memdev_state *mds) return rc; } - mutex_init(&mds->poison.lock); + mutex_init(&mds->poison.mutex); return 0; } EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, "CXL"); diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index f88a13adf7fa..c569e00a511f 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -232,15 +232,13 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) if (!port || !is_cxl_endpoint(port)) return -EINVAL; - rc = down_read_interruptible(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem))) return rc; - rc = down_read_interruptible(&cxl_dpa_rwsem); - if (rc) { - up_read(&cxl_region_rwsem); + ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem))) return rc; - } if (cxl_num_decoders_committed(port) == 0) { /* No regions mapped to this memdev */ @@ -249,8 +247,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) /* Regions mapped, collect poison by endpoint */ rc = cxl_get_poison_by_endpoint(port); } - up_read(&cxl_dpa_rwsem); - up_read(&cxl_region_rwsem); return rc; } @@ -267,7 +263,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) dev_dbg(cxlds->dev, "device has no dpa resource\n"); return -EINVAL; } - if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) { + if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa)) { dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n", dpa, &cxlds->dpa_res); return -EINVAL; @@ -292,19 +288,17 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) if (!IS_ENABLED(CONFIG_DEBUG_FS)) return 0; - rc = down_read_interruptible(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem))) return rc; - rc = down_read_interruptible(&cxl_dpa_rwsem); - if (rc) { - up_read(&cxl_region_rwsem); + ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem))) return rc; - } rc = cxl_validate_poison_dpa(cxlmd, dpa); if (rc) - goto out; + return rc; inject.address = cpu_to_le64(dpa); mbox_cmd = (struct cxl_mbox_cmd) { @@ -314,7 +308,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) }; rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) - goto out; + return rc; cxlr = cxl_dpa_to_region(cxlmd, dpa); if (cxlr) @@ -327,11 +321,8 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) .length = cpu_to_le32(1), }; trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT); -out: - up_read(&cxl_dpa_rwsem); - up_read(&cxl_region_rwsem); - return rc; + return 0; } EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL"); @@ -347,19 +338,17 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) if (!IS_ENABLED(CONFIG_DEBUG_FS)) return 0; - rc = down_read_interruptible(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem))) return rc; - rc = down_read_interruptible(&cxl_dpa_rwsem); - if (rc) { - up_read(&cxl_region_rwsem); + ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem))) return rc; - } rc = cxl_validate_poison_dpa(cxlmd, dpa); if (rc) - goto out; + return rc; /* * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command @@ -378,7 +367,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) - goto out; + return rc; cxlr = cxl_dpa_to_region(cxlmd, dpa); if (cxlr) @@ -391,11 +380,8 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) .length = cpu_to_le32(1), }; trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR); -out: - up_read(&cxl_dpa_rwsem); - up_read(&cxl_region_rwsem); - return rc; + return 0; } EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL"); diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index eb46c6764d20..29197376b18e 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -30,18 +30,12 @@ * instantiated by the core. */ -/* - * All changes to the interleave configuration occur with this lock held - * for write. - */ -DECLARE_RWSEM(cxl_region_rwsem); - static DEFINE_IDA(cxl_port_ida); static DEFINE_XARRAY(cxl_root_buses); int cxl_num_decoders_committed(struct cxl_port *port) { - lockdep_assert_held(&cxl_region_rwsem); + lockdep_assert_held(&cxl_rwsem.region); return port->commit_end + 1; } @@ -176,7 +170,7 @@ static ssize_t target_list_show(struct device *dev, ssize_t offset; int rc; - guard(rwsem_read)(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_rwsem.region); rc = emit_target_list(cxlsd, buf); if (rc < 0) return rc; @@ -196,7 +190,7 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr, struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); struct cxl_dev_state *cxlds = cxlmd->cxlds; - /* without @cxl_dpa_rwsem, make sure @part is not reloaded */ + /* without @cxl_rwsem.dpa, make sure @part is not reloaded */ int part = READ_ONCE(cxled->part); const char *desc; @@ -235,7 +229,7 @@ static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *at { struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); - guard(rwsem_read)(&cxl_dpa_rwsem); + guard(rwsem_read)(&cxl_rwsem.dpa); return sysfs_emit(buf, "%#llx\n", (u64)cxl_dpa_resource_start(cxled)); } static DEVICE_ATTR_RO(dpa_resource); @@ -560,7 +554,7 @@ static ssize_t decoders_committed_show(struct device *dev, { struct cxl_port *port = to_cxl_port(dev); - guard(rwsem_read)(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_rwsem.region); return sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port)); } @@ -1722,7 +1716,7 @@ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, if (xa_empty(&port->dports)) return -EINVAL; - guard(rwsem_write)(&cxl_region_rwsem); + guard(rwsem_write)(&cxl_rwsem.region); for (i = 0; i < cxlsd->cxld.interleave_ways; i++) { struct cxl_dport *dport = find_dport(port, target_map[i]); @@ -2001,12 +1995,9 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, "CXL"); static void cxld_unregister(void *dev) { - struct cxl_endpoint_decoder *cxled; - - if (is_endpoint_decoder(dev)) { - cxled = to_cxl_endpoint_decoder(dev); - cxl_decoder_kill_region(cxled); - } + if (is_endpoint_decoder(dev)) + cxl_decoder_detach(NULL, to_cxl_endpoint_decoder(dev), -1, + DETACH_INVALIDATE); device_unregister(dev); } @@ -2293,7 +2284,7 @@ static const struct attribute_group *cxl_bus_attribute_groups[] = { NULL, }; -struct bus_type cxl_bus_type = { +const struct bus_type cxl_bus_type = { .name = "cxl", .uevent = cxl_bus_uevent, .match = cxl_bus_match, diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index ba42259c3701..71cc42d05248 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -141,16 +141,12 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, struct cxl_region_params *p = &cxlr->params; ssize_t rc; - rc = down_read_interruptible(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem))) return rc; if (cxlr->mode != CXL_PARTMODE_PMEM) - rc = sysfs_emit(buf, "\n"); - else - rc = sysfs_emit(buf, "%pUb\n", &p->uuid); - up_read(&cxl_region_rwsem); - - return rc; + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%pUb\n", &p->uuid); } static int is_dup(struct device *match, void *data) @@ -162,7 +158,7 @@ static int is_dup(struct device *match, void *data) if (!is_cxl_region(match)) return 0; - lockdep_assert_held(&cxl_region_rwsem); + lockdep_assert_held(&cxl_rwsem.region); cxlr = to_cxl_region(match); p = &cxlr->params; @@ -192,27 +188,22 @@ static ssize_t uuid_store(struct device *dev, struct device_attribute *attr, if (uuid_is_null(&temp)) return -EINVAL; - rc = down_write_killable(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_write_kill, region_rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_write_kill, ®ion_rwsem))) return rc; if (uuid_equal(&p->uuid, &temp)) - goto out; + return len; - rc = -EBUSY; if (p->state >= CXL_CONFIG_ACTIVE) - goto out; + return -EBUSY; rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup); if (rc < 0) - goto out; + return rc; uuid_copy(&p->uuid, &temp); -out: - up_write(&cxl_region_rwsem); - if (rc) - return rc; return len; } static DEVICE_ATTR_RW(uuid); @@ -349,33 +340,40 @@ err: return rc; } -static ssize_t commit_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t len) +static int queue_reset(struct cxl_region *cxlr) { - struct cxl_region *cxlr = to_cxl_region(dev); struct cxl_region_params *p = &cxlr->params; - bool commit; - ssize_t rc; + int rc; - rc = kstrtobool(buf, &commit); - if (rc) + ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem))) return rc; - rc = down_write_killable(&cxl_region_rwsem); - if (rc) + /* Already in the requested state? */ + if (p->state < CXL_CONFIG_COMMIT) + return 0; + + p->state = CXL_CONFIG_RESET_PENDING; + + return 0; +} + +static int __commit(struct cxl_region *cxlr) +{ + struct cxl_region_params *p = &cxlr->params; + int rc; + + ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem))) return rc; /* Already in the requested state? */ - if (commit && p->state >= CXL_CONFIG_COMMIT) - goto out; - if (!commit && p->state < CXL_CONFIG_COMMIT) - goto out; + if (p->state >= CXL_CONFIG_COMMIT) + return 0; /* Not ready to commit? */ - if (commit && p->state < CXL_CONFIG_ACTIVE) { - rc = -ENXIO; - goto out; - } + if (p->state < CXL_CONFIG_ACTIVE) + return -ENXIO; /* * Invalidate caches before region setup to drop any speculative @@ -383,33 +381,61 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr, */ rc = cxl_region_invalidate_memregion(cxlr); if (rc) - goto out; + return rc; - if (commit) { - rc = cxl_region_decode_commit(cxlr); - if (rc == 0) - p->state = CXL_CONFIG_COMMIT; - } else { - p->state = CXL_CONFIG_RESET_PENDING; - up_write(&cxl_region_rwsem); - device_release_driver(&cxlr->dev); - down_write(&cxl_region_rwsem); + rc = cxl_region_decode_commit(cxlr); + if (rc) + return rc; - /* - * The lock was dropped, so need to revalidate that the reset is - * still pending. - */ - if (p->state == CXL_CONFIG_RESET_PENDING) { - cxl_region_decode_reset(cxlr, p->interleave_ways); - p->state = CXL_CONFIG_ACTIVE; - } - } + p->state = CXL_CONFIG_COMMIT; -out: - up_write(&cxl_region_rwsem); + return 0; +} +static ssize_t commit_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + bool commit; + ssize_t rc; + + rc = kstrtobool(buf, &commit); if (rc) return rc; + + if (commit) { + rc = __commit(cxlr); + if (rc) + return rc; + return len; + } + + rc = queue_reset(cxlr); + if (rc) + return rc; + + /* + * Unmap the region and depend the reset-pending state to ensure + * it does not go active again until post reset + */ + device_release_driver(&cxlr->dev); + + /* + * With the reset pending take cxl_rwsem.region unconditionally + * to ensure the reset gets handled before returning. + */ + guard(rwsem_write)(&cxl_rwsem.region); + + /* + * Revalidate that the reset is still pending in case another + * thread already handled this reset. + */ + if (p->state == CXL_CONFIG_RESET_PENDING) { + cxl_region_decode_reset(cxlr, p->interleave_ways); + p->state = CXL_CONFIG_ACTIVE; + } + return len; } @@ -420,13 +446,10 @@ static ssize_t commit_show(struct device *dev, struct device_attribute *attr, struct cxl_region_params *p = &cxlr->params; ssize_t rc; - rc = down_read_interruptible(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) return rc; - rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT); - up_read(&cxl_region_rwsem); - - return rc; + return sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT); } static DEVICE_ATTR_RW(commit); @@ -450,15 +473,12 @@ static ssize_t interleave_ways_show(struct device *dev, { struct cxl_region *cxlr = to_cxl_region(dev); struct cxl_region_params *p = &cxlr->params; - ssize_t rc; + int rc; - rc = down_read_interruptible(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) return rc; - rc = sysfs_emit(buf, "%d\n", p->interleave_ways); - up_read(&cxl_region_rwsem); - - return rc; + return sysfs_emit(buf, "%d\n", p->interleave_ways); } static const struct attribute_group *get_cxl_region_target_group(void); @@ -493,23 +513,21 @@ static ssize_t interleave_ways_store(struct device *dev, return -EINVAL; } - rc = down_write_killable(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem))) return rc; - if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { - rc = -EBUSY; - goto out; - } + + if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) + return -EBUSY; save = p->interleave_ways; p->interleave_ways = val; rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); - if (rc) + if (rc) { p->interleave_ways = save; -out: - up_write(&cxl_region_rwsem); - if (rc) return rc; + } + return len; } static DEVICE_ATTR_RW(interleave_ways); @@ -520,15 +538,12 @@ static ssize_t interleave_granularity_show(struct device *dev, { struct cxl_region *cxlr = to_cxl_region(dev); struct cxl_region_params *p = &cxlr->params; - ssize_t rc; + int rc; - rc = down_read_interruptible(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) return rc; - rc = sysfs_emit(buf, "%d\n", p->interleave_granularity); - up_read(&cxl_region_rwsem); - - return rc; + return sysfs_emit(buf, "%d\n", p->interleave_granularity); } static ssize_t interleave_granularity_store(struct device *dev, @@ -561,19 +576,15 @@ static ssize_t interleave_granularity_store(struct device *dev, if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity) return -EINVAL; - rc = down_write_killable(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem))) return rc; - if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { - rc = -EBUSY; - goto out; - } + + if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) + return -EBUSY; p->interleave_granularity = val; -out: - up_write(&cxl_region_rwsem); - if (rc) - return rc; + return len; } static DEVICE_ATTR_RW(interleave_granularity); @@ -584,17 +595,15 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr, struct cxl_region *cxlr = to_cxl_region(dev); struct cxl_region_params *p = &cxlr->params; u64 resource = -1ULL; - ssize_t rc; + int rc; - rc = down_read_interruptible(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) return rc; + if (p->res) resource = p->res->start; - rc = sysfs_emit(buf, "%#llx\n", resource); - up_read(&cxl_region_rwsem); - - return rc; + return sysfs_emit(buf, "%#llx\n", resource); } static DEVICE_ATTR_RO(resource); @@ -622,7 +631,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size) struct resource *res; u64 remainder = 0; - lockdep_assert_held_write(&cxl_region_rwsem); + lockdep_assert_held_write(&cxl_rwsem.region); /* Nothing to do... */ if (p->res && resource_size(p->res) == size) @@ -664,7 +673,7 @@ static void cxl_region_iomem_release(struct cxl_region *cxlr) struct cxl_region_params *p = &cxlr->params; if (device_is_registered(&cxlr->dev)) - lockdep_assert_held_write(&cxl_region_rwsem); + lockdep_assert_held_write(&cxl_rwsem.region); if (p->res) { /* * Autodiscovered regions may not have been able to insert their @@ -681,7 +690,7 @@ static int free_hpa(struct cxl_region *cxlr) { struct cxl_region_params *p = &cxlr->params; - lockdep_assert_held_write(&cxl_region_rwsem); + lockdep_assert_held_write(&cxl_rwsem.region); if (!p->res) return 0; @@ -705,15 +714,14 @@ static ssize_t size_store(struct device *dev, struct device_attribute *attr, if (rc) return rc; - rc = down_write_killable(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem))) return rc; if (val) rc = alloc_hpa(cxlr, val); else rc = free_hpa(cxlr); - up_write(&cxl_region_rwsem); if (rc) return rc; @@ -729,15 +737,12 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr, u64 size = 0; ssize_t rc; - rc = down_read_interruptible(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) return rc; if (p->res) size = resource_size(p->res); - rc = sysfs_emit(buf, "%#llx\n", size); - up_read(&cxl_region_rwsem); - - return rc; + return sysfs_emit(buf, "%#llx\n", size); } static DEVICE_ATTR_RW(size); @@ -763,26 +768,20 @@ static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos) struct cxl_endpoint_decoder *cxled; int rc; - rc = down_read_interruptible(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) return rc; if (pos >= p->interleave_ways) { dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, p->interleave_ways); - rc = -ENXIO; - goto out; + return -ENXIO; } cxled = p->targets[pos]; if (!cxled) - rc = sysfs_emit(buf, "\n"); - else - rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev)); -out: - up_read(&cxl_region_rwsem); - - return rc; + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev)); } static int check_commit_order(struct device *dev, void *data) @@ -897,7 +896,7 @@ cxl_port_pick_region_decoder(struct cxl_port *port, /* * This decoder is pinned registered as long as the endpoint decoder is * registered, and endpoint decoder unregistration holds the - * cxl_region_rwsem over unregister events, so no need to hold on to + * cxl_rwsem.region over unregister events, so no need to hold on to * this extra reference. */ put_device(dev); @@ -1088,7 +1087,7 @@ static int cxl_port_attach_region(struct cxl_port *port, unsigned long index; int rc = -EBUSY; - lockdep_assert_held_write(&cxl_region_rwsem); + lockdep_assert_held_write(&cxl_rwsem.region); cxl_rr = cxl_rr_load(port, cxlr); if (cxl_rr) { @@ -1198,7 +1197,7 @@ static void cxl_port_detach_region(struct cxl_port *port, struct cxl_region_ref *cxl_rr; struct cxl_ep *ep = NULL; - lockdep_assert_held_write(&cxl_region_rwsem); + lockdep_assert_held_write(&cxl_rwsem.region); cxl_rr = cxl_rr_load(port, cxlr); if (!cxl_rr) @@ -2094,27 +2093,43 @@ static int cxl_region_attach(struct cxl_region *cxlr, return 0; } -static int cxl_region_detach(struct cxl_endpoint_decoder *cxled) +static struct cxl_region * +__cxl_decoder_detach(struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled, int pos, + enum cxl_detach_mode mode) { - struct cxl_port *iter, *ep_port = cxled_to_port(cxled); - struct cxl_region *cxlr = cxled->cxld.region; struct cxl_region_params *p; - int rc = 0; - lockdep_assert_held_write(&cxl_region_rwsem); + lockdep_assert_held_write(&cxl_rwsem.region); - if (!cxlr) - return 0; + if (!cxled) { + p = &cxlr->params; - p = &cxlr->params; - get_device(&cxlr->dev); + if (pos >= p->interleave_ways) { + dev_dbg(&cxlr->dev, "position %d out of range %d\n", + pos, p->interleave_ways); + return NULL; + } + + if (!p->targets[pos]) + return NULL; + cxled = p->targets[pos]; + } else { + cxlr = cxled->cxld.region; + if (!cxlr) + return NULL; + p = &cxlr->params; + } + + if (mode == DETACH_INVALIDATE) + cxled->part = -1; if (p->state > CXL_CONFIG_ACTIVE) { cxl_region_decode_reset(cxlr, p->interleave_ways); p->state = CXL_CONFIG_ACTIVE; } - for (iter = ep_port; !is_cxl_root(iter); + for (struct cxl_port *iter = cxled_to_port(cxled); !is_cxl_root(iter); iter = to_cxl_port(iter->dev.parent)) cxl_port_detach_region(iter, cxlr, cxled); @@ -2125,7 +2140,7 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled) dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n", dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), cxled->pos); - goto out; + return NULL; } if (p->state == CXL_CONFIG_ACTIVE) { @@ -2139,74 +2154,79 @@ static int cxl_region_detach(struct cxl_endpoint_decoder *cxled) .end = -1, }; - /* notify the region driver that one of its targets has departed */ - up_write(&cxl_region_rwsem); - device_release_driver(&cxlr->dev); - down_write(&cxl_region_rwsem); -out: - put_device(&cxlr->dev); - return rc; + get_device(&cxlr->dev); + return cxlr; } -void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled) +/* + * Cleanup a decoder's interest in a region. There are 2 cases to + * handle, removing an unknown @cxled from a known position in a region + * (detach_target()) or removing a known @cxled from an unknown @cxlr + * (cxld_unregister()) + * + * When the detachment finds a region release the region driver. + */ +int cxl_decoder_detach(struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled, int pos, + enum cxl_detach_mode mode) { - down_write(&cxl_region_rwsem); - cxled->part = -1; - cxl_region_detach(cxled); - up_write(&cxl_region_rwsem); + struct cxl_region *detach; + + /* when the decoder is being destroyed lock unconditionally */ + if (mode == DETACH_INVALIDATE) { + guard(rwsem_write)(&cxl_rwsem.region); + detach = __cxl_decoder_detach(cxlr, cxled, pos, mode); + } else { + int rc; + + ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem))) + return rc; + detach = __cxl_decoder_detach(cxlr, cxled, pos, mode); + } + + if (detach) { + device_release_driver(&detach->dev); + put_device(&detach->dev); + } + return 0; +} + +static int __attach_target(struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled, int pos, + unsigned int state) +{ + int rc; + + if (state == TASK_INTERRUPTIBLE) { + ACQUIRE(rwsem_write_kill, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_write_kill, &rwsem))) + return rc; + guard(rwsem_read)(&cxl_rwsem.dpa); + return cxl_region_attach(cxlr, cxled, pos); + } + guard(rwsem_write)(&cxl_rwsem.region); + guard(rwsem_read)(&cxl_rwsem.dpa); + return cxl_region_attach(cxlr, cxled, pos); } static int attach_target(struct cxl_region *cxlr, struct cxl_endpoint_decoder *cxled, int pos, unsigned int state) { - int rc = 0; + int rc = __attach_target(cxlr, cxled, pos, state); - if (state == TASK_INTERRUPTIBLE) - rc = down_write_killable(&cxl_region_rwsem); - else - down_write(&cxl_region_rwsem); - if (rc) - return rc; - - down_read(&cxl_dpa_rwsem); - rc = cxl_region_attach(cxlr, cxled, pos); - up_read(&cxl_dpa_rwsem); - up_write(&cxl_region_rwsem); - - if (rc) - dev_warn(cxled->cxld.dev.parent, - "failed to attach %s to %s: %d\n", - dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc); + if (rc == 0) + return 0; + dev_warn(cxled->cxld.dev.parent, "failed to attach %s to %s: %d\n", + dev_name(&cxled->cxld.dev), dev_name(&cxlr->dev), rc); return rc; } static int detach_target(struct cxl_region *cxlr, int pos) { - struct cxl_region_params *p = &cxlr->params; - int rc; - - rc = down_write_killable(&cxl_region_rwsem); - if (rc) - return rc; - - if (pos >= p->interleave_ways) { - dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, - p->interleave_ways); - rc = -ENXIO; - goto out; - } - - if (!p->targets[pos]) { - rc = 0; - goto out; - } - - rc = cxl_region_detach(p->targets[pos]); -out: - up_write(&cxl_region_rwsem); - return rc; + return cxl_decoder_detach(cxlr, NULL, pos, DETACH_ONLY); } static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos, @@ -2460,7 +2480,7 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb, return NOTIFY_DONE; /* - * No need to hold cxl_region_rwsem; region parameters are stable + * No need to hold cxl_rwsem.region; region parameters are stable * within the cxl_region driver. */ region_nid = phys_to_target_node(cxlr->params.res->start); @@ -2483,7 +2503,7 @@ static int cxl_region_calculate_adistance(struct notifier_block *nb, int region_nid; /* - * No need to hold cxl_region_rwsem; region parameters are stable + * No need to hold cxl_rwsem.region; region parameters are stable * within the cxl_region driver. */ region_nid = phys_to_target_node(cxlr->params.res->start); @@ -2632,17 +2652,13 @@ static ssize_t region_show(struct device *dev, struct device_attribute *attr, struct cxl_decoder *cxld = to_cxl_decoder(dev); ssize_t rc; - rc = down_read_interruptible(&cxl_region_rwsem); - if (rc) + ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) return rc; if (cxld->region) - rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev)); - else - rc = sysfs_emit(buf, "\n"); - up_read(&cxl_region_rwsem); - - return rc; + return sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev)); + return sysfs_emit(buf, "\n"); } DEVICE_ATTR_RO(region); @@ -2847,7 +2863,7 @@ static int __cxl_dpa_to_region(struct device *dev, void *arg) if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res)) return 0; - if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) + if (!cxl_resource_contains_addr(cxled->dpa_res, dpa)) return 0; /* @@ -2959,7 +2975,7 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, if (cxlrd->hpa_to_spa) hpa = cxlrd->hpa_to_spa(cxlrd, hpa); - if (hpa < p->res->start || hpa > p->res->end) { + if (!cxl_resource_contains_addr(p->res, hpa)) { dev_dbg(&cxlr->dev, "Addr trans fail: hpa 0x%llx not in region\n", hpa); return ULLONG_MAX; @@ -2981,7 +2997,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr) struct device *dev; int i; - guard(rwsem_read)(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_rwsem.region); if (p->state != CXL_CONFIG_COMMIT) return -ENXIO; @@ -2993,7 +3009,7 @@ static int cxl_pmem_region_alloc(struct cxl_region *cxlr) cxlr_pmem->hpa_range.start = p->res->start; cxlr_pmem->hpa_range.end = p->res->end; - /* Snapshot the region configuration underneath the cxl_region_rwsem */ + /* Snapshot the region configuration underneath the cxl_rwsem.region */ cxlr_pmem->nr_mappings = p->nr_targets; for (i = 0; i < p->nr_targets; i++) { struct cxl_endpoint_decoder *cxled = p->targets[i]; @@ -3070,7 +3086,7 @@ static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr) struct cxl_dax_region *cxlr_dax; struct device *dev; - guard(rwsem_read)(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_rwsem.region); if (p->state != CXL_CONFIG_COMMIT) return ERR_PTR(-ENXIO); @@ -3270,7 +3286,7 @@ static int match_region_by_range(struct device *dev, const void *data) cxlr = to_cxl_region(dev); p = &cxlr->params; - guard(rwsem_read)(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_rwsem.region); if (p->res && p->res->start == r->start && p->res->end == r->end) return 1; @@ -3282,15 +3298,10 @@ static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr, { struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); struct cxl_region_params *p = &cxlr->params; - int nid = phys_to_target_node(res->start); resource_size_t size = resource_size(res); resource_size_t cache_size, start; - int rc; - - rc = cxl_acpi_get_extended_linear_cache_size(res, nid, &cache_size); - if (rc) - return rc; + cache_size = cxlrd->cache_size; if (!cache_size) return 0; @@ -3330,7 +3341,7 @@ static int __construct_region(struct cxl_region *cxlr, struct resource *res; int rc; - guard(rwsem_write)(&cxl_region_rwsem); + guard(rwsem_write)(&cxl_rwsem.region); p = &cxlr->params; if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { dev_err(cxlmd->dev.parent, @@ -3466,10 +3477,10 @@ int cxl_add_to_region(struct cxl_endpoint_decoder *cxled) attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE); - down_read(&cxl_region_rwsem); - p = &cxlr->params; - attach = p->state == CXL_CONFIG_COMMIT; - up_read(&cxl_region_rwsem); + scoped_guard(rwsem_read, &cxl_rwsem.region) { + p = &cxlr->params; + attach = p->state == CXL_CONFIG_COMMIT; + } if (attach) { /* @@ -3494,12 +3505,12 @@ u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa) if (!endpoint) return ~0ULL; - guard(rwsem_write)(&cxl_region_rwsem); + guard(rwsem_write)(&cxl_rwsem.region); xa_for_each(&endpoint->regions, index, iter) { struct cxl_region_params *p = &iter->region->params; - if (p->res->start <= spa && spa <= p->res->end) { + if (cxl_resource_contains_addr(p->res, spa)) { if (!p->cache_size) return ~0ULL; @@ -3531,40 +3542,45 @@ static void shutdown_notifiers(void *_cxlr) unregister_mt_adistance_algorithm(&cxlr->adist_notifier); } -static int cxl_region_probe(struct device *dev) +static int cxl_region_can_probe(struct cxl_region *cxlr) { - struct cxl_region *cxlr = to_cxl_region(dev); struct cxl_region_params *p = &cxlr->params; int rc; - rc = down_read_interruptible(&cxl_region_rwsem); - if (rc) { + ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region); + if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem))) { dev_dbg(&cxlr->dev, "probe interrupted\n"); return rc; } if (p->state < CXL_CONFIG_COMMIT) { dev_dbg(&cxlr->dev, "config state: %d\n", p->state); - rc = -ENXIO; - goto out; + return -ENXIO; } if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) { dev_err(&cxlr->dev, "failed to activate, re-commit region and retry\n"); - rc = -ENXIO; - goto out; + return -ENXIO; } + return 0; +} + +static int cxl_region_probe(struct device *dev) +{ + struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + int rc; + + rc = cxl_region_can_probe(cxlr); + if (rc) + return rc; + /* * From this point on any path that changes the region's state away from * CXL_CONFIG_COMMIT is also responsible for releasing the driver. */ -out: - up_read(&cxl_region_rwsem); - - if (rc) - return rc; cxlr->node_notifier.notifier_call = cxl_region_perf_attrs_callback; cxlr->node_notifier.priority = CXL_CALLBACK_PRI; diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index 25ebfbc1616c..a53ec4798b12 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -214,12 +214,16 @@ TRACE_EVENT(cxl_overflow, #define CXL_EVENT_RECORD_FLAG_PERF_DEGRADED BIT(4) #define CXL_EVENT_RECORD_FLAG_HW_REPLACE BIT(5) #define CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID BIT(6) +#define CXL_EVENT_RECORD_FLAG_LD_ID_VALID BIT(7) +#define CXL_EVENT_RECORD_FLAG_HEAD_ID_VALID BIT(8) #define show_hdr_flags(flags) __print_flags(flags, " | ", \ { CXL_EVENT_RECORD_FLAG_PERMANENT, "PERMANENT_CONDITION" }, \ { CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, "MAINTENANCE_NEEDED" }, \ { CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, "PERFORMANCE_DEGRADED" }, \ { CXL_EVENT_RECORD_FLAG_HW_REPLACE, "HARDWARE_REPLACEMENT_NEEDED" }, \ - { CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID, "MAINT_OP_SUB_CLASS_VALID" } \ + { CXL_EVENT_RECORD_FLAG_MAINT_OP_SUB_CLASS_VALID, "MAINT_OP_SUB_CLASS_VALID" }, \ + { CXL_EVENT_RECORD_FLAG_LD_ID_VALID, "LD_ID_VALID" }, \ + { CXL_EVENT_RECORD_FLAG_HEAD_ID_VALID, "HEAD_ID_VALID" } \ ) /* @@ -247,7 +251,9 @@ TRACE_EVENT(cxl_overflow, __field(u64, hdr_timestamp) \ __field(u8, hdr_length) \ __field(u8, hdr_maint_op_class) \ - __field(u8, hdr_maint_op_sub_class) + __field(u8, hdr_maint_op_sub_class) \ + __field(u16, hdr_ld_id) \ + __field(u8, hdr_head_id) #define CXL_EVT_TP_fast_assign(cxlmd, l, hdr) \ __assign_str(memdev); \ @@ -260,18 +266,22 @@ TRACE_EVENT(cxl_overflow, __entry->hdr_related_handle = le16_to_cpu((hdr).related_handle); \ __entry->hdr_timestamp = le64_to_cpu((hdr).timestamp); \ __entry->hdr_maint_op_class = (hdr).maint_op_class; \ - __entry->hdr_maint_op_sub_class = (hdr).maint_op_sub_class + __entry->hdr_maint_op_sub_class = (hdr).maint_op_sub_class; \ + __entry->hdr_ld_id = le16_to_cpu((hdr).ld_id); \ + __entry->hdr_head_id = (hdr).head_id #define CXL_EVT_TP_printk(fmt, ...) \ TP_printk("memdev=%s host=%s serial=%lld log=%s : time=%llu uuid=%pUb " \ "len=%d flags='%s' handle=%x related_handle=%x " \ - "maint_op_class=%u maint_op_sub_class=%u : " fmt, \ + "maint_op_class=%u maint_op_sub_class=%u " \ + "ld_id=%x head_id=%x : " fmt, \ __get_str(memdev), __get_str(host), __entry->serial, \ cxl_event_log_type_str(__entry->log), \ __entry->hdr_timestamp, &__entry->hdr_uuid, __entry->hdr_length,\ show_hdr_flags(__entry->hdr_flags), __entry->hdr_handle, \ __entry->hdr_related_handle, __entry->hdr_maint_op_class, \ __entry->hdr_maint_op_sub_class, \ + __entry->hdr_ld_id, __entry->hdr_head_id, \ ##__VA_ARGS__) TRACE_EVENT(cxl_generic_event, @@ -496,7 +506,10 @@ TRACE_EVENT(cxl_general_media, uuid_copy(&__entry->region_uuid, &uuid_null); } __entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags; - __entry->cme_count = get_unaligned_le24(rec->cme_count); + if (rec->media_hdr.descriptor & CXL_GMER_EVT_DESC_THRESHOLD_EVENT) + __entry->cme_count = get_unaligned_le24(rec->cme_count); + else + __entry->cme_count = 0; ), CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \ @@ -648,7 +661,10 @@ TRACE_EVENT(cxl_dram, CXL_EVENT_GEN_MED_COMP_ID_SIZE); __entry->sub_channel = rec->sub_channel; __entry->cme_threshold_ev_flags = rec->cme_threshold_ev_flags; - __entry->cvme_count = get_unaligned_le24(rec->cvme_count); + if (rec->media_hdr.descriptor & CXL_GMER_EVT_DESC_THRESHOLD_EVENT) + __entry->cvme_count = get_unaligned_le24(rec->cvme_count); + else + __entry->cvme_count = 0; ), CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' sub_type='%s' " \ @@ -871,6 +887,111 @@ TRACE_EVENT(cxl_memory_module, ) ); +/* + * Memory Sparing Event Record - MSER + * + * CXL rev 3.2 section 8.2.10.2.1.4; Table 8-60 + */ +#define CXL_MSER_QUERY_RESOURCE_FLAG BIT(0) +#define CXL_MSER_HARD_SPARING_FLAG BIT(1) +#define CXL_MSER_DEV_INITED_FLAG BIT(2) +#define show_mem_sparing_flags(flags) __print_flags(flags, "|", \ + { CXL_MSER_QUERY_RESOURCE_FLAG, "Query Resources" }, \ + { CXL_MSER_HARD_SPARING_FLAG, "Hard Sparing" }, \ + { CXL_MSER_DEV_INITED_FLAG, "Device Initiated Sparing" } \ +) + +#define CXL_MSER_VALID_CHANNEL BIT(0) +#define CXL_MSER_VALID_RANK BIT(1) +#define CXL_MSER_VALID_NIBBLE BIT(2) +#define CXL_MSER_VALID_BANK_GROUP BIT(3) +#define CXL_MSER_VALID_BANK BIT(4) +#define CXL_MSER_VALID_ROW BIT(5) +#define CXL_MSER_VALID_COLUMN BIT(6) +#define CXL_MSER_VALID_COMPONENT_ID BIT(7) +#define CXL_MSER_VALID_COMPONENT_ID_FORMAT BIT(8) +#define CXL_MSER_VALID_SUB_CHANNEL BIT(9) +#define show_mem_sparing_valid_flags(flags) __print_flags(flags, "|", \ + { CXL_MSER_VALID_CHANNEL, "CHANNEL" }, \ + { CXL_MSER_VALID_RANK, "RANK" }, \ + { CXL_MSER_VALID_NIBBLE, "NIBBLE" }, \ + { CXL_MSER_VALID_BANK_GROUP, "BANK GROUP" }, \ + { CXL_MSER_VALID_BANK, "BANK" }, \ + { CXL_MSER_VALID_ROW, "ROW" }, \ + { CXL_MSER_VALID_COLUMN, "COLUMN" }, \ + { CXL_MSER_VALID_COMPONENT_ID, "COMPONENT ID" }, \ + { CXL_MSER_VALID_COMPONENT_ID_FORMAT, "COMPONENT ID PLDM FORMAT" }, \ + { CXL_MSER_VALID_SUB_CHANNEL, "SUB CHANNEL" } \ +) + +TRACE_EVENT(cxl_memory_sparing, + + TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, + struct cxl_event_mem_sparing *rec), + + TP_ARGS(cxlmd, log, rec), + + TP_STRUCT__entry( + CXL_EVT_TP_entry + + /* Memory Sparing Event */ + __field(u8, flags) + __field(u8, result) + __field(u16, validity_flags) + __field(u16, res_avail) + __field(u8, channel) + __field(u8, rank) + __field(u32, nibble_mask) + __field(u8, bank_group) + __field(u8, bank) + __field(u32, row) + __field(u16, column) + __field(u8, sub_channel) + __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE) + ), + + TP_fast_assign( + CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr); + __entry->hdr_uuid = CXL_EVENT_MEM_SPARING_UUID; + + /* Memory Sparing Event */ + __entry->flags = rec->flags; + __entry->result = rec->result; + __entry->validity_flags = le16_to_cpu(rec->validity_flags); + __entry->res_avail = le16_to_cpu(rec->res_avail); + __entry->channel = rec->channel; + __entry->rank = rec->rank; + __entry->nibble_mask = get_unaligned_le24(rec->nibble_mask); + __entry->bank_group = rec->bank_group; + __entry->bank = rec->bank; + __entry->row = get_unaligned_le24(rec->row); + __entry->column = le16_to_cpu(rec->column); + __entry->sub_channel = rec->sub_channel; + memcpy(__entry->comp_id, &rec->component_id, + CXL_EVENT_GEN_MED_COMP_ID_SIZE); + ), + + CXL_EVT_TP_printk("flags='%s' result=%u validity_flags='%s' " \ + "spare resource avail=%u channel=%u rank=%u " \ + "nibble_mask=%x bank_group=%u bank=%u " \ + "row=%u column=%u sub_channel=%u " \ + "comp_id=%s comp_id_pldm_valid_flags='%s' " \ + "pldm_entity_id=%s pldm_resource_id=%s", + show_mem_sparing_flags(__entry->flags), + __entry->result, + show_mem_sparing_valid_flags(__entry->validity_flags), + __entry->res_avail, __entry->channel, __entry->rank, + __entry->nibble_mask, __entry->bank_group, __entry->bank, + __entry->row, __entry->column, __entry->sub_channel, + __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE), + show_comp_id_pldm_flags(__entry->comp_id[0]), + show_pldm_entity_id(__entry->validity_flags, CXL_MSER_VALID_COMPONENT_ID, + CXL_MSER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id), + show_pldm_resource_id(__entry->validity_flags, CXL_MSER_VALID_COMPONENT_ID, + CXL_MSER_VALID_COMPONENT_ID_FORMAT, __entry->comp_id) + ) +); + #define show_poison_trace_type(type) \ __print_symbolic(type, \ { CXL_POISON_TRACE_LIST, "List" }, \ diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index ad863572ddb7..847e37be42c4 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -424,6 +424,7 @@ typedef u64 (*cxl_hpa_to_spa_fn)(struct cxl_root_decoder *cxlrd, u64 hpa); /** * struct cxl_root_decoder - Static platform CXL address decoder * @res: host / parent resource for region allocations + * @cache_size: extended linear cache size if exists, otherwise zero. * @region_id: region id for next region provisioning event * @hpa_to_spa: translate CXL host-physical-address to Platform system-physical-address * @platform_data: platform specific configuration data @@ -433,6 +434,7 @@ typedef u64 (*cxl_hpa_to_spa_fn)(struct cxl_root_decoder *cxlrd, u64 hpa); */ struct cxl_root_decoder { struct resource *res; + resource_size_t cache_size; atomic_t region_id; cxl_hpa_to_spa_fn hpa_to_spa; void *platform_data; @@ -470,7 +472,7 @@ enum cxl_config_state { * @nr_targets: number of targets * @cache_size: extended linear cache size if exists, otherwise zero. * - * State transitions are protected by the cxl_region_rwsem + * State transitions are protected by cxl_rwsem.region */ struct cxl_region_params { enum cxl_config_state state; @@ -816,7 +818,7 @@ int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds, bool is_cxl_region(struct device *dev); -extern struct bus_type cxl_bus_type; +extern const struct bus_type cxl_bus_type; struct cxl_driver { const char *name; @@ -913,15 +915,4 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port); #endif u16 cxl_gpf_get_dvsec(struct device *dev); - -static inline struct rw_semaphore *rwsem_read_intr_acquire(struct rw_semaphore *rwsem) -{ - if (down_read_interruptible(rwsem)) - return NULL; - - return rwsem; -} - -DEFINE_FREE(rwsem_read_release, struct rw_semaphore *, if (_T) up_read(_T)) - #endif /* __CXL_H__ */ diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 551b0ba2caa1..751478dfc410 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -254,7 +254,7 @@ enum security_cmd_enabled_bits { * @max_errors: Maximum media error records held in device cache * @enabled_cmds: All poison commands enabled in the CEL * @list_out: The poison list payload returned by device - * @lock: Protect reads of the poison list + * @mutex: Protect reads of the poison list * * Reads of the poison list are synchronized to ensure that a reader * does not get an incomplete list because their request overlapped @@ -265,7 +265,7 @@ struct cxl_poison_state { u32 max_errors; DECLARE_BITMAP(enabled_cmds, CXL_POISON_ENABLED_MAX); struct cxl_mbox_poison_out *list_out; - struct mutex lock; /* Protect reads of poison list */ + struct mutex mutex; /* Protect reads of poison list */ }; /* @@ -634,6 +634,14 @@ struct cxl_mbox_identify { 0x13, 0xb7, 0x74) /* + * Memory Sparing Event Record UUID + * CXL rev 3.2 section 8.2.10.2.1.4: Table 8-60 + */ +#define CXL_EVENT_MEM_SPARING_UUID \ + UUID_INIT(0xe71f3a40, 0x2d29, 0x4092, 0x8a, 0x39, 0x4d, 0x1c, 0x96, \ + 0x6c, 0x7c, 0x65) + +/* * Get Event Records output payload * CXL rev 3.0 section 8.2.9.2.2; Table 8-50 */ diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 785aa2af5eaa..bd100ac31672 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -379,7 +379,7 @@ static int cxl_pci_mbox_send(struct cxl_mailbox *cxl_mbox, { int rc; - mutex_lock_io(&cxl_mbox->mbox_mutex); + mutex_lock(&cxl_mbox->mbox_mutex); rc = __cxl_pci_mbox_send_cmd(cxl_mbox, cmd); mutex_unlock(&cxl_mbox->mbox_mutex); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index db87dd2a07f7..05c7c7d9e5a4 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -89,7 +89,6 @@ config APPLE_ADMAC tristate "Apple ADMAC support" depends on ARCH_APPLE || COMPILE_TEST select DMA_ENGINE - default ARCH_APPLE help Enable support for Audio DMA Controller found on Apple Silicon SoCs. @@ -111,7 +110,7 @@ config AT_HDMAC config AT_XDMAC tristate "Atmel XDMA support" - depends on ARCH_AT91 + depends on ARCH_MICROCHIP select DMA_ENGINE help Support the Atmel XDMA controller. @@ -572,6 +571,15 @@ config PLX_DMA These are exposed via extra functions on the switch's upstream port. Each function exposes one DMA channel. +config SOPHGO_CV1800B_DMAMUX + tristate "Sophgo CV1800/SG2000 series SoC DMA multiplexer support" + depends on MFD_SYSCON + depends on ARCH_SOPHGO || COMPILE_TEST + help + Support for the DMA multiplexer on Sophgo CV1800/SG2000 + series SoCs. + Say Y here if your board have this soc. + config STE_DMA40 bool "ST-Ericsson DMA40 support" depends on ARCH_U8500 diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index ba9732644752..a54d7688392b 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -71,6 +71,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ obj-$(CONFIG_PXA_DMA) += pxa_dma.o obj-$(CONFIG_RENESAS_DMA) += sh/ obj-$(CONFIG_SF_PDMA) += sf-pdma/ +obj-$(CONFIG_SOPHGO_CV1800B_DMAMUX) += cv1800b-dmamux.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_SPRD_DMA) += sprd-dma.o obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o diff --git a/drivers/dma/cv1800b-dmamux.c b/drivers/dma/cv1800b-dmamux.c new file mode 100644 index 000000000000..e900d6595617 --- /dev/null +++ b/drivers/dma/cv1800b-dmamux.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2025 Inochi Amaoto <inochiama@gmail.com> + */ + +#include <linux/bitops.h> +#include <linux/cleanup.h> +#include <linux/module.h> +#include <linux/of_dma.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/llist.h> +#include <linux/regmap.h> +#include <linux/spinlock.h> +#include <linux/mfd/syscon.h> + +#define REG_DMA_CHANNEL_REMAP0 0x154 +#define REG_DMA_CHANNEL_REMAP1 0x158 +#define REG_DMA_INT_MUX 0x298 + +#define DMAMUX_NCELLS 2 +#define MAX_DMA_MAPPING_ID 42 +#define MAX_DMA_CPU_ID 2 +#define MAX_DMA_CH_ID 7 + +#define DMAMUX_INTMUX_REGISTER_LEN 4 +#define DMAMUX_NR_CH_PER_REGISTER 4 +#define DMAMUX_BIT_PER_CH 8 +#define DMAMUX_CH_MASk GENMASK(5, 0) +#define DMAMUX_INT_BIT_PER_CPU 10 +#define DMAMUX_CH_UPDATE_BIT BIT(31) + +#define DMAMUX_CH_REGPOS(chid) \ + ((chid) / DMAMUX_NR_CH_PER_REGISTER) +#define DMAMUX_CH_REGOFF(chid) \ + ((chid) % DMAMUX_NR_CH_PER_REGISTER) +#define DMAMUX_CH_REG(chid) \ + ((DMAMUX_CH_REGPOS(chid) * sizeof(u32)) + \ + REG_DMA_CHANNEL_REMAP0) +#define DMAMUX_CH_SET(chid, val) \ + (((val) << (DMAMUX_CH_REGOFF(chid) * DMAMUX_BIT_PER_CH)) | \ + DMAMUX_CH_UPDATE_BIT) +#define DMAMUX_CH_MASK(chid) \ + DMAMUX_CH_SET(chid, DMAMUX_CH_MASk) + +#define DMAMUX_INT_BIT(chid, cpuid) \ + BIT((cpuid) * DMAMUX_INT_BIT_PER_CPU + (chid)) +#define DMAMUX_INTEN_BIT(cpuid) \ + DMAMUX_INT_BIT(8, cpuid) +#define DMAMUX_INT_CH_BIT(chid, cpuid) \ + (DMAMUX_INT_BIT(chid, cpuid) | DMAMUX_INTEN_BIT(cpuid)) +#define DMAMUX_INT_MASK(chid) \ + (DMAMUX_INT_BIT(chid, 0) | \ + DMAMUX_INT_BIT(chid, 1) | \ + DMAMUX_INT_BIT(chid, 2)) +#define DMAMUX_INT_CH_MASK(chid, cpuid) \ + (DMAMUX_INT_MASK(chid) | DMAMUX_INTEN_BIT(cpuid)) + +struct cv1800_dmamux_data { + struct dma_router dmarouter; + struct regmap *regmap; + spinlock_t lock; + struct llist_head free_maps; + struct llist_head reserve_maps; + DECLARE_BITMAP(mapped_peripherals, MAX_DMA_MAPPING_ID); +}; + +struct cv1800_dmamux_map { + struct llist_node node; + unsigned int channel; + unsigned int peripheral; + unsigned int cpu; +}; + +static void cv1800_dmamux_free(struct device *dev, void *route_data) +{ + struct cv1800_dmamux_data *dmamux = dev_get_drvdata(dev); + struct cv1800_dmamux_map *map = route_data; + + guard(spinlock_irqsave)(&dmamux->lock); + + regmap_update_bits(dmamux->regmap, + DMAMUX_CH_REG(map->channel), + DMAMUX_CH_MASK(map->channel), + DMAMUX_CH_UPDATE_BIT); + + regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX, + DMAMUX_INT_CH_MASK(map->channel, map->cpu), + DMAMUX_INTEN_BIT(map->cpu)); + + dev_dbg(dev, "free channel %u for req %u (cpu %u)\n", + map->channel, map->peripheral, map->cpu); +} + +static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); + struct cv1800_dmamux_data *dmamux = platform_get_drvdata(pdev); + struct cv1800_dmamux_map *map; + struct llist_node *node; + unsigned long flags; + unsigned int chid, devid, cpuid; + int ret; + + if (dma_spec->args_count != DMAMUX_NCELLS) { + dev_err(&pdev->dev, "invalid number of dma mux args\n"); + return ERR_PTR(-EINVAL); + } + + devid = dma_spec->args[0]; + cpuid = dma_spec->args[1]; + dma_spec->args_count = 1; + + if (devid > MAX_DMA_MAPPING_ID) { + dev_err(&pdev->dev, "invalid device id: %u\n", devid); + return ERR_PTR(-EINVAL); + } + + if (cpuid > MAX_DMA_CPU_ID) { + dev_err(&pdev->dev, "invalid cpu id: %u\n", cpuid); + return ERR_PTR(-EINVAL); + } + + dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); + if (!dma_spec->np) { + dev_err(&pdev->dev, "can't get dma master\n"); + return ERR_PTR(-EINVAL); + } + + spin_lock_irqsave(&dmamux->lock, flags); + + if (test_bit(devid, dmamux->mapped_peripherals)) { + llist_for_each_entry(map, dmamux->reserve_maps.first, node) { + if (map->peripheral == devid && map->cpu == cpuid) + goto found; + } + + ret = -EINVAL; + goto failed; + } else { + node = llist_del_first(&dmamux->free_maps); + if (!node) { + ret = -ENODEV; + goto failed; + } + + map = llist_entry(node, struct cv1800_dmamux_map, node); + llist_add(&map->node, &dmamux->reserve_maps); + set_bit(devid, dmamux->mapped_peripherals); + } + +found: + chid = map->channel; + map->peripheral = devid; + map->cpu = cpuid; + + regmap_set_bits(dmamux->regmap, + DMAMUX_CH_REG(chid), + DMAMUX_CH_SET(chid, devid)); + + regmap_update_bits(dmamux->regmap, REG_DMA_INT_MUX, + DMAMUX_INT_CH_MASK(chid, cpuid), + DMAMUX_INT_CH_BIT(chid, cpuid)); + + spin_unlock_irqrestore(&dmamux->lock, flags); + + dma_spec->args[0] = chid; + + dev_dbg(&pdev->dev, "register channel %u for req %u (cpu %u)\n", + chid, devid, cpuid); + + return map; + +failed: + spin_unlock_irqrestore(&dmamux->lock, flags); + of_node_put(dma_spec->np); + dev_err(&pdev->dev, "errno %d\n", ret); + return ERR_PTR(ret); +} + +static int cv1800_dmamux_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *mux_node = dev->of_node; + struct cv1800_dmamux_data *data; + struct cv1800_dmamux_map *tmp; + struct device *parent = dev->parent; + struct regmap *regmap = NULL; + unsigned int i; + + if (!parent) + return -ENODEV; + + regmap = device_node_to_regmap(parent->of_node); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + spin_lock_init(&data->lock); + init_llist_head(&data->free_maps); + init_llist_head(&data->reserve_maps); + + for (i = 0; i <= MAX_DMA_CH_ID; i++) { + tmp = devm_kmalloc(dev, sizeof(*tmp), GFP_KERNEL); + if (!tmp) { + /* It is OK for not allocating all channel */ + dev_warn(dev, "can not allocate channel %u\n", i); + continue; + } + + init_llist_node(&tmp->node); + tmp->channel = i; + llist_add(&tmp->node, &data->free_maps); + } + + /* if no channel is allocated, the probe must fail */ + if (llist_empty(&data->free_maps)) + return -ENOMEM; + + data->regmap = regmap; + data->dmarouter.dev = dev; + data->dmarouter.route_free = cv1800_dmamux_free; + + platform_set_drvdata(pdev, data); + + return of_dma_router_register(mux_node, + cv1800_dmamux_route_allocate, + &data->dmarouter); +} + +static void cv1800_dmamux_remove(struct platform_device *pdev) +{ + of_dma_controller_free(pdev->dev.of_node); +} + +static const struct of_device_id cv1800_dmamux_ids[] = { + { .compatible = "sophgo,cv1800b-dmamux", }, + { } +}; +MODULE_DEVICE_TABLE(of, cv1800_dmamux_ids); + +static struct platform_driver cv1800_dmamux_driver = { + .probe = cv1800_dmamux_probe, + .remove = cv1800_dmamux_remove, + .driver = { + .name = "cv1800-dmamux", + .of_match_table = cv1800_dmamux_ids, + }, +}; +module_platform_driver(cv1800_dmamux_driver); + +MODULE_AUTHOR("Inochi Amaoto <inochiama@gmail.com>"); +MODULE_DESCRIPTION("Sophgo CV1800/SG2000 Series SoC DMAMUX driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c index c2b88cc99e5d..b43255f914f3 100644 --- a/drivers/dma/dw-edma/dw-edma-core.c +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -24,18 +24,6 @@ #include "../virt-dma.h" static inline -struct device *dchan2dev(struct dma_chan *dchan) -{ - return &dchan->dev->device; -} - -static inline -struct device *chan2dev(struct dw_edma_chan *chan) -{ - return &chan->vc.chan.dev->device; -} - -static inline struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd) { return container_of(vd, struct dw_edma_desc, vd); diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c index b4323d243d6d..4be81db24a19 100644 --- a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c +++ b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c @@ -48,11 +48,6 @@ struct dpdmai_cmd_destroy { __le32 dpdmai_id; } __packed; -static inline u64 mc_enc(int lsoffset, int width, u64 val) -{ - return (val & MAKE_UMASK64(width)) << lsoffset; -} - /** * dpdmai_open() - Open a control session for the specified object * @mc_io: Pointer to MC portal's I/O object diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index 823f5c6bc2e1..21e13f1207cb 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -148,6 +148,9 @@ * @__reserved1: Reserved field. * @cfg8b_w1: Compound descriptor command queue origin produced * by qDMA and dynamic debug field. + * @__reserved2: Reserved field. + * @cmd: Command for QDMA (see FSL_QDMA_CMD_RWTTYPE and + * others). * @data: Pointer to the memory 40-bit address, describes DMA * source information and DMA destination information. */ diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 80355d03004d..35bdefd3728b 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -1036,7 +1036,6 @@ static void idxd_reset_prepare(struct pci_dev *pdev) const char *idxd_name; int rc; - dev = &idxd->pdev->dev; idxd_name = dev_name(idxd_confdev(idxd)); struct idxd_saved_states *idxd_saved __free(kfree) = diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index 006ba206ab1b..9c1c546fe443 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -45,7 +45,7 @@ union gen_cap_reg { u64 rsvd3:32; }; u64 bits; -} __packed; +}; #define IDXD_GENCAP_OFFSET 0x10 union wq_cap_reg { @@ -65,7 +65,7 @@ union wq_cap_reg { u64 rsvd4:8; }; u64 bits; -} __packed; +}; #define IDXD_WQCAP_OFFSET 0x20 #define IDXD_WQCFG_MIN 5 @@ -79,7 +79,7 @@ union group_cap_reg { u64 rsvd:45; }; u64 bits; -} __packed; +}; #define IDXD_GRPCAP_OFFSET 0x30 union engine_cap_reg { @@ -88,7 +88,7 @@ union engine_cap_reg { u64 rsvd:56; }; u64 bits; -} __packed; +}; #define IDXD_ENGCAP_OFFSET 0x38 @@ -114,7 +114,7 @@ union offsets_reg { u64 rsvd:48; }; u64 bits[2]; -} __packed; +}; #define IDXD_TABLE_MULT 0x100 @@ -128,7 +128,7 @@ union gencfg_reg { u32 rsvd2:18; }; u32 bits; -} __packed; +}; #define IDXD_GENCTRL_OFFSET 0x88 union genctrl_reg { @@ -139,7 +139,7 @@ union genctrl_reg { u32 rsvd:29; }; u32 bits; -} __packed; +}; #define IDXD_GENSTATS_OFFSET 0x90 union gensts_reg { @@ -149,7 +149,7 @@ union gensts_reg { u32 rsvd:28; }; u32 bits; -} __packed; +}; enum idxd_device_status_state { IDXD_DEVICE_STATE_DISABLED = 0, @@ -183,7 +183,7 @@ union idxd_command_reg { u32 int_req:1; }; u32 bits; -} __packed; +}; enum idxd_cmd { IDXD_CMD_ENABLE_DEVICE = 1, @@ -213,7 +213,7 @@ union cmdsts_reg { u8 active:1; }; u32 bits; -} __packed; +}; #define IDXD_CMDSTS_ACTIVE 0x80000000 #define IDXD_CMDSTS_ERR_MASK 0xff #define IDXD_CMDSTS_RES_SHIFT 8 @@ -284,7 +284,7 @@ union sw_err_reg { u64 rsvd5; }; u64 bits[4]; -} __packed; +}; union iaa_cap_reg { struct { @@ -303,7 +303,7 @@ union iaa_cap_reg { u64 rsvd:52; }; u64 bits; -} __packed; +}; #define IDXD_IAACAP_OFFSET 0x180 @@ -320,7 +320,7 @@ union evlcfg_reg { u64 rsvd2:28; }; u64 bits[2]; -} __packed; +}; #define IDXD_EVL_SIZE_MIN 0x0040 #define IDXD_EVL_SIZE_MAX 0xffff @@ -334,7 +334,7 @@ union msix_perm { u32 pasid:20; }; u32 bits; -} __packed; +}; union group_flags { struct { @@ -352,13 +352,13 @@ union group_flags { u64 rsvd5:26; }; u64 bits; -} __packed; +}; struct grpcfg { u64 wqs[4]; u64 engines; union group_flags flags; -} __packed; +}; union wqcfg { struct { @@ -410,7 +410,7 @@ union wqcfg { u64 op_config[4]; }; u32 bits[16]; -} __packed; +}; #define WQCFG_PASID_IDX 2 #define WQCFG_PRIVL_IDX 2 @@ -474,7 +474,7 @@ union idxd_perfcap { u64 rsvd3:8; }; u64 bits; -} __packed; +}; #define IDXD_EVNTCAP_OFFSET 0x80 union idxd_evntcap { @@ -483,7 +483,7 @@ union idxd_evntcap { u64 rsvd:36; }; u64 bits; -} __packed; +}; struct idxd_event { union { @@ -493,7 +493,7 @@ struct idxd_event { }; u32 val; }; -} __packed; +}; #define IDXD_CNTRCAP_OFFSET 0x800 struct idxd_cntrcap { @@ -506,7 +506,7 @@ struct idxd_cntrcap { u32 val; }; struct idxd_event events[]; -} __packed; +}; #define IDXD_PERFRST_OFFSET 0x10 union idxd_perfrst { @@ -516,7 +516,7 @@ union idxd_perfrst { u32 rsvd:30; }; u32 val; -} __packed; +}; #define IDXD_OVFSTATUS_OFFSET 0x30 #define IDXD_PERFFRZ_OFFSET 0x20 @@ -533,7 +533,7 @@ union idxd_cntrcfg { u64 rsvd3:4; }; u64 val; -} __packed; +}; #define IDXD_FLTCFG_OFFSET 0x300 @@ -543,7 +543,7 @@ union idxd_cntrdata { u64 event_count_value; }; u64 val; -} __packed; +}; union event_cfg { struct { @@ -551,7 +551,7 @@ union event_cfg { u64 event_enc:28; }; u64 val; -} __packed; +}; union filter_cfg { struct { @@ -562,7 +562,7 @@ union filter_cfg { u64 eng:8; }; u64 val; -} __packed; +}; #define IDXD_EVLSTATUS_OFFSET 0xf0 @@ -580,7 +580,7 @@ union evl_status_reg { u32 bits_upper32; }; u64 bits; -} __packed; +}; #define IDXD_MAX_BATCH_IDENT 256 @@ -620,17 +620,17 @@ struct __evl_entry { }; u64 fault_addr; u64 rsvd5; -} __packed; +}; struct dsa_evl_entry { struct __evl_entry e; struct dsa_completion_record cr; -} __packed; +}; struct iax_evl_entry { struct __evl_entry e; u64 rsvd[4]; struct iax_completion_record cr; -} __packed; +}; #endif diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index c8dc504510f1..b7fb843c67a6 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -641,7 +641,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) int chan_num = TDMA_CHANNEL_NUM; struct gen_pool *pool = NULL; - type = (enum mmp_tdma_type)device_get_match_data(&pdev->dev); + type = (kernel_ulong_t)device_get_match_data(&pdev->dev); /* always have couple channels */ tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index fa6e4646fdc2..1fdcb0f5c9e7 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -1061,8 +1061,16 @@ mv_xor_channel_add(struct mv_xor_device *xordev, */ mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev, mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); + if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_src_addr)) + return ERR_PTR(-ENOMEM); + mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev, mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_dst_addr)) { + ret = -ENOMEM; + goto err_unmap_src; + } + /* allocate coherent memory for hardware descriptors * note: writecombine gives slightly better performance, but @@ -1071,8 +1079,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev, mv_chan->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool, GFP_KERNEL); - if (!mv_chan->dma_desc_pool_virt) - return ERR_PTR(-ENOMEM); + if (!mv_chan->dma_desc_pool_virt) { + ret = -ENOMEM; + goto err_unmap_dst; + } /* discover transaction capabilities from the platform data */ dma_dev->cap_mask = cap_mask; @@ -1155,6 +1165,13 @@ err_free_irq: err_free_dma: dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); +err_unmap_dst: + dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr, + MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); +err_unmap_src: + dma_unmap_single(dma_dev->dev, mv_chan->dummy_src_addr, + MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); + return ERR_PTR(ret); } diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 7a2488a0d6a3..765462303de0 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -711,6 +711,9 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan) list_add_tail(&ldesc->node, &lhead); ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev, hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE); + if (dma_mapping_error(dchan->device->dev, + ldesc->hwdesc_dma_addr)) + goto unmap_error; dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__, hwdesc, &ldesc->hwdesc_dma_addr); @@ -737,6 +740,16 @@ static int nbpf_desc_page_alloc(struct nbpf_channel *chan) spin_unlock_irq(&chan->lock); return ARRAY_SIZE(dpage->desc); + +unmap_error: + while (i--) { + ldesc--; hwdesc--; + + dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr, + sizeof(hwdesc), DMA_TO_DEVICE); + } + + return -ENOMEM; } static void nbpf_desc_put(struct nbpf_desc *desc) diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c index b1f0001cc99c..8e87738086b2 100644 --- a/drivers/dma/qcom/gpi.c +++ b/drivers/dma/qcom/gpi.c @@ -569,17 +569,6 @@ static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val) writel_relaxed(val, addr); } -/* gpi_write_reg_field - write to specific bit field */ -static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr, - u32 mask, u32 shift, u32 val) -{ - u32 tmp = gpi_read_reg(gpii, addr); - - tmp &= ~mask; - val = tmp | ((val << shift) & mask); - gpi_write_reg(gpii, addr, val); -} - static __always_inline void gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val) { diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 6ea5a880b433..8184d475a49a 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -16,7 +16,7 @@ config SH_DMAE_BASE depends on SUPERH || COMPILE_TEST depends on !SUPERH || SH_DMA depends on !SH_DMA_API - default y + default SUPERH || SH_DMA select RENESAS_DMA help Enable support for the Renesas SuperH DMA controllers. diff --git a/drivers/dma/stm32/stm32-dma.c b/drivers/dma/stm32/stm32-dma.c index 917f8e922373..04389936c8a6 100644 --- a/drivers/dma/stm32/stm32-dma.c +++ b/drivers/dma/stm32/stm32-dma.c @@ -613,7 +613,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) reg->dma_scr |= STM32_DMA_SCR_EN; stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); - dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); + dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan); } static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) @@ -676,7 +676,7 @@ static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan) chan->status = DMA_PAUSED; - dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan); + dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan); } static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan) @@ -728,7 +728,7 @@ static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan) dma_scr |= STM32_DMA_SCR_EN; stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); - dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan); + dev_dbg(chan2dev(chan), "vchan %p: reconfigured after pause/resume\n", &chan->vchan); } static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr) @@ -744,7 +744,7 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr) /* cyclic while CIRC/DBM disable => post resume reconfiguration needed */ if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))) stm32_dma_post_resume_reconfigure(chan); - else if (scr & STM32_DMA_SCR_DBM) + else if (scr & STM32_DMA_SCR_DBM && chan->desc->num_sgs > 2) stm32_dma_configure_next_sg(chan); } else { chan->busy = false; @@ -820,7 +820,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c) spin_lock_irqsave(&chan->vchan.lock, flags); if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) { - dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan); + dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan); stm32_dma_start_transfer(chan); } @@ -922,7 +922,7 @@ static int stm32_dma_resume(struct dma_chan *c) spin_unlock_irqrestore(&chan->vchan.lock, flags); - dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan); + dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan); return 0; } diff --git a/drivers/dma/stm32/stm32-dma3.c b/drivers/dma/stm32/stm32-dma3.c index 0c6c4258b195..50e7106c5cb7 100644 --- a/drivers/dma/stm32/stm32-dma3.c +++ b/drivers/dma/stm32/stm32-dma3.c @@ -801,7 +801,7 @@ static void stm32_dma3_chan_start(struct stm32_dma3_chan *chan) chan->dma_status = DMA_IN_PROGRESS; - dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); + dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan); } static int stm32_dma3_chan_suspend(struct stm32_dma3_chan *chan, bool susp) @@ -1452,7 +1452,7 @@ static int stm32_dma3_pause(struct dma_chan *c) chan->dma_status = DMA_PAUSED; - dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan); + dev_dbg(chan2dev(chan), "vchan %p: paused\n", &chan->vchan); return 0; } @@ -1465,7 +1465,7 @@ static int stm32_dma3_resume(struct dma_chan *c) chan->dma_status = DMA_IN_PROGRESS; - dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan); + dev_dbg(chan2dev(chan), "vchan %p: resumed\n", &chan->vchan); return 0; } @@ -1490,7 +1490,7 @@ static int stm32_dma3_terminate_all(struct dma_chan *c) spin_unlock_irqrestore(&chan->vchan.lock, flags); vchan_dma_desc_free_list(&chan->vchan, &head); - dev_dbg(chan2dev(chan), "vchan %pK: terminated\n", &chan->vchan); + dev_dbg(chan2dev(chan), "vchan %p: terminated\n", &chan->vchan); return 0; } @@ -1543,7 +1543,7 @@ static void stm32_dma3_issue_pending(struct dma_chan *c) spin_lock_irqsave(&chan->vchan.lock, flags); if (vchan_issue_pending(&chan->vchan) && !chan->swdesc) { - dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan); + dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan); stm32_dma3_chan_start(chan); } diff --git a/drivers/dma/stm32/stm32-mdma.c b/drivers/dma/stm32/stm32-mdma.c index e6d525901de7..080c1c725216 100644 --- a/drivers/dma/stm32/stm32-mdma.c +++ b/drivers/dma/stm32/stm32-mdma.c @@ -1187,7 +1187,7 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan) chan->busy = true; - dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); + dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan); } static void stm32_mdma_issue_pending(struct dma_chan *c) @@ -1200,7 +1200,7 @@ static void stm32_mdma_issue_pending(struct dma_chan *c) if (!vchan_issue_pending(&chan->vchan)) goto end; - dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan); + dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan); if (!chan->desc && !chan->busy) stm32_mdma_start_transfer(chan); @@ -1220,7 +1220,7 @@ static int stm32_mdma_pause(struct dma_chan *c) spin_unlock_irqrestore(&chan->vchan.lock, flags); if (!ret) - dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan); + dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan); return ret; } @@ -1261,7 +1261,7 @@ static int stm32_mdma_resume(struct dma_chan *c) spin_unlock_irqrestore(&chan->vchan.lock, flags); - dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan); + dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan); return 0; } diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index 24796aaaddfa..00d2fd38d17f 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -1249,11 +1249,10 @@ static int sun4i_dma_probe(struct platform_device *pdev) if (priv->irq < 0) return priv->irq; - priv->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(priv->clk)) { - dev_err(&pdev->dev, "No clock specified\n"); - return PTR_ERR(priv->clk); - } + priv->clk = devm_clk_get_enabled(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk), + "Couldn't start the clock\n"); if (priv->cfg->has_reset) { priv->rst = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL); @@ -1328,12 +1327,6 @@ static int sun4i_dma_probe(struct platform_device *pdev) vchan_init(&vchan->vc, &priv->slave); } - ret = clk_prepare_enable(priv->clk); - if (ret) { - dev_err(&pdev->dev, "Couldn't enable the clock\n"); - return ret; - } - /* * Make sure the IRQs are all disabled and accounted for. The bootloader * likes to leave these dirty @@ -1343,33 +1336,23 @@ static int sun4i_dma_probe(struct platform_device *pdev) ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt, 0, dev_name(&pdev->dev), priv); - if (ret) { - dev_err(&pdev->dev, "Cannot request IRQ\n"); - goto err_clk_disable; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, "Cannot request IRQ\n"); - ret = dma_async_device_register(&priv->slave); - if (ret) { - dev_warn(&pdev->dev, "Failed to register DMA engine device\n"); - goto err_clk_disable; - } + ret = dmaenginem_async_device_register(&priv->slave); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "Failed to register DMA engine device\n"); ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate, priv); - if (ret) { - dev_err(&pdev->dev, "of_dma_controller_register failed\n"); - goto err_dma_unregister; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, + "Failed to register translation function\n"); dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n"); return 0; - -err_dma_unregister: - dma_async_device_unregister(&priv->slave); -err_clk_disable: - clk_disable_unprepare(priv->clk); - return ret; } static void sun4i_dma_remove(struct platform_device *pdev) @@ -1380,9 +1363,6 @@ static void sun4i_dma_remove(struct platform_device *pdev) disable_irq(priv->irq); of_dma_controller_free(pdev->dev.of_node); - dma_async_device_unregister(&priv->slave); - - clk_disable_unprepare(priv->clk); } static struct sun4i_dma_config sun4i_a10_dma_cfg = { diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig index 2adc2cca10e9..dbf168146d35 100644 --- a/drivers/dma/ti/Kconfig +++ b/drivers/dma/ti/Kconfig @@ -17,7 +17,7 @@ config TI_EDMA select DMA_ENGINE select DMA_VIRTUAL_CHANNELS select TI_DMA_CROSSBAR if (ARCH_OMAP || COMPILE_TEST) - default y + default ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE help Enable support for the TI EDMA (Enhanced DMA) controller. This DMA engine is found on TI DaVinci, AM33xx, AM43xx, DRA7xx and Keystone 2 @@ -29,7 +29,7 @@ config DMA_OMAP select DMA_ENGINE select DMA_VIRTUAL_CHANNELS select TI_DMA_CROSSBAR if (SOC_DRA7XX || COMPILE_TEST) - default y + default ARCH_OMAP help Enable support for the TI sDMA (System DMA or DMA4) controller. This DMA engine is found on OMAP and DRA7xx parts. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a1737556a77e..ef3af170dda4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -883,6 +883,7 @@ struct amdgpu_mqd_prop { uint64_t csa_addr; uint64_t fence_address; bool tmz_queue; + bool kernel_queue; }; struct amdgpu_mqd { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c index 15dde1f50328..25252231a68a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c @@ -459,7 +459,7 @@ calc: void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count) { - u64 pos, wptr_old, rptr = *ring->rptr_cpu_addr & ring->ptr_mask; + u64 pos, wptr_old, rptr; int rec_cnt_dw = count >> 2; u32 chunk, ent_sz; u8 *s = (u8 *)src; @@ -472,9 +472,11 @@ void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count) return; } + mutex_lock(&ring->adev->cper.ring_lock); + wptr_old = ring->wptr; + rptr = *ring->rptr_cpu_addr & ring->ptr_mask; - mutex_lock(&ring->adev->cper.ring_lock); while (count) { ent_sz = amdgpu_cper_ring_get_ent_sz(ring, ring->wptr); chunk = umin(ent_sz, count); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index a5c3f64cbce6..6379bb25bf5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -719,6 +719,7 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring, prop->eop_gpu_addr = ring->eop_gpu_addr; prop->use_doorbell = ring->use_doorbell; prop->doorbell_index = ring->doorbell_index; + prop->kernel_queue = true; /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index a0b50a8ac9c4..e96f24e9ad57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -32,6 +32,7 @@ static const struct kicker_device kicker_device_list[] = { {0x744B, 0x00}, + {0x7551, 0xC8} }; static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d5c0637d7392..5cacf5717016 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2414,13 +2414,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, - DMA_RESV_USAGE_BOOKKEEP, - true, timeout); + timeout = drm_sched_entity_flush(&vm->immediate, timeout); if (timeout <= 0) return timeout; - return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); + return drm_sched_entity_flush(&vm->delayed, timeout); } static void amdgpu_vm_destroy_task_info(struct kref *kref) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 09bf72237d1d..3e138527d534 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -79,6 +79,7 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_me.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc_kicker.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin"); static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = { @@ -586,7 +587,7 @@ out: static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) { - char ucode_prefix[15]; + char ucode_prefix[30]; int err; const struct rlc_firmware_header_v2_0 *rlc_hdr; uint16_t version_major; @@ -613,9 +614,14 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK); if (!amdgpu_sriov_vf(adev)) { - err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, - AMDGPU_UCODE_REQUIRED, - "amdgpu/%s_rlc.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_rlc_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_rlc.bin", ucode_prefix); if (err) goto out; rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c index df898dbb746e..58cd87db8061 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c @@ -34,12 +34,13 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_0_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu.bin"); +MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu_kicker.bin"); #define TRANSFER_RAM_MASK 0x001c0000 static int imu_v12_0_init_microcode(struct amdgpu_device *adev) { - char ucode_prefix[15]; + char ucode_prefix[30]; int err; const struct imu_firmware_header_v1_0 *imu_hdr; struct amdgpu_firmware_info *info = NULL; @@ -47,8 +48,12 @@ static int imu_v12_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s_imu.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_imu_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_imu.bin", ucode_prefix); if (err) goto out; @@ -362,7 +367,7 @@ static void program_imu_rlc_ram(struct amdgpu_device *adev, static void imu_v12_0_program_rlc_ram(struct amdgpu_device *adev) { u32 reg_data, size = 0; - const u32 *data; + const u32 *data = NULL; int r = -EINVAL; WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c index f2ab5001b492..951998454b25 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c @@ -37,39 +37,31 @@ static const char *mmhub_client_ids_v4_1_0[][2] = { [0][0] = "VMC", [4][0] = "DCEDMC", - [5][0] = "DCEVGA", [6][0] = "MP0", [7][0] = "MP1", [8][0] = "MPIO", - [16][0] = "HDP", - [17][0] = "LSDMA", - [18][0] = "JPEG", - [19][0] = "VCNU0", - [21][0] = "VSCH", - [22][0] = "VCNU1", - [23][0] = "VCN1", - [32+20][0] = "VCN0", - [2][1] = "DBGUNBIO", + [16][0] = "LSDMA", + [17][0] = "JPEG", + [19][0] = "VCNU", + [22][0] = "VSCH", + [23][0] = "HDP", + [32+23][0] = "VCNRD", [3][1] = "DCEDWB", [4][1] = "DCEDMC", - [5][1] = "DCEVGA", [6][1] = "MP0", [7][1] = "MP1", [8][1] = "MPIO", [10][1] = "DBGU0", [11][1] = "DBGU1", - [12][1] = "DBGU2", - [13][1] = "DBGU3", + [12][1] = "DBGUNBIO", [14][1] = "XDP", [15][1] = "OSSSYS", - [16][1] = "HDP", - [17][1] = "LSDMA", - [18][1] = "JPEG", - [19][1] = "VCNU0", - [20][1] = "VCN0", - [21][1] = "VSCH", - [22][1] = "VCNU1", - [23][1] = "VCN1", + [16][1] = "LSDMA", + [17][1] = "JPEG", + [18][1] = "VCNWR", + [19][1] = "VCNU", + [22][1] = "VSCH", + [23][1] = "HDP", }; static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c index 36ef4a72ad1d..38dfc5c19f2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c @@ -34,7 +34,9 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_5_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_5_ta.bin"); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 096b23ad4845..2a175fc0399c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3398,8 +3398,10 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); r = dm_dmub_hw_init(adev); - if (r) + if (r) { drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); + return r; + } dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); @@ -4983,9 +4985,9 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) caps = &dm->backlight_caps[aconnector->bl_idx]; if (get_brightness_range(caps, &min, &max)) { if (power_supply_is_system_supplied() > 0) - props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->ac_level, 100); + props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->ac_level, 100); else - props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps->dc_level, 100); + props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->dc_level, 100); /* min is zero, so max needs to be adjusted */ props.max_brightness = max - min; drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max, @@ -5410,7 +5412,8 @@ fail: static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) { - drm_atomic_private_obj_fini(&dm->atomic_obj); + if (dm->atomic_obj.state) + drm_atomic_private_obj_fini(&dm->atomic_obj); } /****************************************************************************** diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index c7d13e743e6c..b726bcd18e29 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -3988,7 +3988,7 @@ static int capabilities_show(struct seq_file *m, void *unused) struct hubbub *hubbub = dc->res_pool->hubbub; - if (hubbub->funcs->get_mall_en) + if (hubbub && hubbub->funcs->get_mall_en) hubbub->funcs->get_mall_en(hubbub, &mall_in_use); if (dc->cap_funcs.get_subvp_en) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index f984cb0cb889..ff7b867ae98b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -119,8 +119,10 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) psr_config.allow_multi_disp_optimizations = (amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT); - if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) - return false; + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { + if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) + return false; + } ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c31f7f8e409f..28aca7017f0f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -5443,7 +5443,8 @@ bool dc_update_planes_and_stream(struct dc *dc, else ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, stream, stream_update); - if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2) + if (ret && (dc->ctx->dce_version >= DCN_VERSION_3_2 || + dc->ctx->dce_version == DCN_VERSION_3_01)) clear_update_flags(srf_updates, surface_count, stream); return ret; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index 4e06468a6284..0421b267a0b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -377,10 +377,16 @@ static bool setup_engine( } /** + * cntl_stuck_hw_workaround - Workaround for I2C engine stuck state + * @dce_i2c_hw: Pointer to dce_i2c_hw structure + * * If we boot without an HDMI display, the I2C engine does not get initialized * correctly. One of its symptoms is that SW_USE_I2C does not get cleared after - * acquire, so that after setting SW_DONE_USING_I2C on release, the engine gets + * acquire. After setting SW_DONE_USING_I2C on release, the engine gets * immediately reacquired by SW, preventing DMUB from using it. + * + * This function checks the I2C arbitration status and applies a release + * workaround if necessary. */ static void cntl_stuck_hw_workaround(struct dce_i2c_hw *dce_i2c_hw) { diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index a454d16e6586..1f53a9f0c0ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -152,7 +152,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing( } /* Forward Declerations */ -static unsigned int get_min_slice_count_for_odm( +static unsigned int get_min_dsc_slice_count_for_odm( const struct display_stream_compressor *dsc, const struct dsc_enc_caps *dsc_enc_caps, const struct dc_crtc_timing *timing); @@ -466,7 +466,7 @@ bool dc_dsc_compute_bandwidth_range( struct dc_dsc_bw_range *range) { bool is_dsc_possible = false; - unsigned int min_slice_count; + unsigned int min_dsc_slice_count; struct dsc_enc_caps dsc_enc_caps; struct dsc_enc_caps dsc_common_caps; struct dc_dsc_config config = {0}; @@ -478,14 +478,14 @@ bool dc_dsc_compute_bandwidth_range( get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); - min_slice_count = get_min_slice_count_for_odm(dsc, &dsc_enc_caps, timing); + min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing); is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps, timing->pixel_encoding, &dsc_common_caps); if (is_dsc_possible) is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, - &options, link_encoding, min_slice_count, &config); + &options, link_encoding, min_dsc_slice_count, &config); if (is_dsc_possible) is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, @@ -593,14 +593,12 @@ static void build_dsc_enc_caps( struct dc *dc; - memset(&single_dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); - if (!dsc || !dsc->ctx || !dsc->ctx->dc || !dsc->funcs->dsc_get_single_enc_caps) return; dc = dsc->ctx->dc; - if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_max_clock_khz || !dc->res_pool) + if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_max_clock_khz || !dc->res_pool || dc->debug.disable_dsc) return; /* get max DSCCLK from clk_mgr */ @@ -634,7 +632,7 @@ static inline uint32_t dsc_div_by_10_round_up(uint32_t value) return (value + 9) / 10; } -static unsigned int get_min_slice_count_for_odm( +static unsigned int get_min_dsc_slice_count_for_odm( const struct display_stream_compressor *dsc, const struct dsc_enc_caps *dsc_enc_caps, const struct dc_crtc_timing *timing) @@ -651,6 +649,10 @@ static unsigned int get_min_slice_count_for_odm( } } + /* validate parameters */ + if (max_dispclk_khz == 0 || dsc_enc_caps->max_slice_width == 0) + return 1; + /* consider minimum odm slices required due to * 1) display pipe throughput (dispclk) * 2) max image width per slice @@ -669,13 +671,12 @@ static void get_dsc_enc_caps( { memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); - if (!dsc) + if (!dsc || !dsc->ctx || !dsc->ctx->dc || dsc->ctx->dc->debug.disable_dsc) return; /* check if reported cap global or only for a single DCN DSC enc */ if (dsc->funcs->dsc_get_enc_caps) { - if (!dsc->ctx->dc->debug.disable_dsc) - dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); + dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); } else { build_dsc_enc_caps(dsc, dsc_enc_caps); } @@ -1295,10 +1296,10 @@ bool dc_dsc_compute_config( { bool is_dsc_possible = false; struct dsc_enc_caps dsc_enc_caps; - unsigned int min_slice_count; + unsigned int min_dsc_slice_count; get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); - min_slice_count = get_min_slice_count_for_odm(dsc, &dsc_enc_caps, timing); + min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing); is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, @@ -1306,7 +1307,7 @@ bool dc_dsc_compute_config( timing, options, link_encoding, - min_slice_count, + min_dsc_slice_count, dsc_cfg); return is_dsc_possible; } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index de708fdc1e80..663c49cce4aa 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -926,6 +926,7 @@ static const struct dc_debug_options debug_defaults_drv = { .seamless_boot_odm_combine = true, .enable_legacy_fast_update = true, .using_dml2 = false, + .disable_dsc_power_gate = true, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 76c1adda83db..f9b0938c57ea 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -62,13 +62,14 @@ const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32}; MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin"); MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin"); +MODULE_FIRMWARE("amdgpu/smu_14_0_3_kicker.bin"); #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 int smu_v14_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char ucode_prefix[15]; + char ucode_prefix[30]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -79,8 +80,12 @@ int smu_v14_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); if (err) goto out; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index e32e680c7197..71c6ccad4b99 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -130,10 +130,10 @@ static void virtio_gpu_remove(struct virtio_device *vdev) static void virtio_gpu_shutdown(struct virtio_device *vdev) { - /* - * drm does its own synchronization on shutdown. - * Do nothing here, opt out of device reset. - */ + struct drm_device *dev = vdev->priv; + + /* stop talking to the device */ + drm_dev_unplug(dev); } static void virtio_gpu_config_changed(struct virtio_device *vdev) diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c index 8ec1ff1e4e80..e9b46a2d0019 100644 --- a/drivers/gpu/drm/xe/xe_configfs.c +++ b/drivers/gpu/drm/xe/xe_configfs.c @@ -267,7 +267,8 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function)); if (!pdev) - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENODEV); + pci_dev_put(pdev); dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 6dc84e4ed281..5bd2f7d7b4ea 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -681,6 +681,7 @@ static void sriov_update_device_info(struct xe_device *xe) /* disable features that are not available/applicable to VFs */ if (IS_SRIOV_VF(xe)) { xe->info.probe_display = 0; + xe->info.has_heci_cscfi = 0; xe->info.has_heci_gscfi = 0; xe->info.skip_guc_pc = 1; xe->info.skip_pcode = 1; diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c index e5fd0cd537bc..bd9015761aa0 100644 --- a/drivers/gpu/drm/xe/xe_device_sysfs.c +++ b/drivers/gpu/drm/xe/xe_device_sysfs.c @@ -160,8 +160,13 @@ static int late_bind_create_files(struct device *dev) ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), &cap, NULL); - if (ret) + if (ret) { + if (ret == -ENXIO) { + drm_dbg(&xe->drm, "Late binding not supported by firmware\n"); + ret = 0; + } goto out; + } if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) { ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr); diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index b6acccfcd351..3f4e6a46ff16 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -95,12 +95,8 @@ struct g2h_fence { static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer) { + memset(g2h_fence, 0, sizeof(*g2h_fence)); g2h_fence->response_buffer = response_buffer; - g2h_fence->response_data = 0; - g2h_fence->response_len = 0; - g2h_fence->fail = false; - g2h_fence->retry = false; - g2h_fence->done = false; g2h_fence->seqno = ~0x0; } diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c index 87a6dcb1b4b5..c926f840c87b 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine_group.c +++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c @@ -75,25 +75,18 @@ int xe_hw_engine_setup_groups(struct xe_gt *gt) enum xe_hw_engine_id id; struct xe_hw_engine_group *group_rcs_ccs, *group_bcs, *group_vcs_vecs; struct xe_device *xe = gt_to_xe(gt); - int err; group_rcs_ccs = hw_engine_group_alloc(xe); - if (IS_ERR(group_rcs_ccs)) { - err = PTR_ERR(group_rcs_ccs); - goto err_group_rcs_ccs; - } + if (IS_ERR(group_rcs_ccs)) + return PTR_ERR(group_rcs_ccs); group_bcs = hw_engine_group_alloc(xe); - if (IS_ERR(group_bcs)) { - err = PTR_ERR(group_bcs); - goto err_group_bcs; - } + if (IS_ERR(group_bcs)) + return PTR_ERR(group_bcs); group_vcs_vecs = hw_engine_group_alloc(xe); - if (IS_ERR(group_vcs_vecs)) { - err = PTR_ERR(group_vcs_vecs); - goto err_group_vcs_vecs; - } + if (IS_ERR(group_vcs_vecs)) + return PTR_ERR(group_vcs_vecs); for_each_hw_engine(hwe, gt, id) { switch (hwe->class) { @@ -116,15 +109,6 @@ int xe_hw_engine_setup_groups(struct xe_gt *gt) } return 0; - -err_group_vcs_vecs: - kfree(group_vcs_vecs); -err_group_bcs: - kfree(group_bcs); -err_group_rcs_ccs: - kfree(group_rcs_ccs); - - return err; } /** diff --git a/drivers/gpu/drm/xe/xe_i2c.c b/drivers/gpu/drm/xe/xe_i2c.c index db9c0340be5c..bc7dc2099470 100644 --- a/drivers/gpu/drm/xe/xe_i2c.c +++ b/drivers/gpu/drm/xe/xe_i2c.c @@ -96,8 +96,8 @@ static int xe_i2c_register_adapter(struct xe_i2c *i2c) int ret; fwnode = fwnode_create_software_node(xe_i2c_adapter_properties, NULL); - if (!fwnode) - return -ENOMEM; + if (IS_ERR(fwnode)) + return PTR_ERR(fwnode); /* * Not using platform_device_register_full() here because we don't have @@ -283,6 +283,9 @@ int xe_i2c_probe(struct xe_device *xe) if (xe->info.platform != XE_BATTLEMAGE) return 0; + if (IS_SRIOV_VF(xe)) + return 0; + xe_i2c_read_endpoint(xe_root_tile_mmio(xe), &ep); if (ep.cookie != XE_I2C_EP_COOKIE_DEVICE) return 0; diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index d991fbd90f20..5729e7d3e335 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -1941,7 +1941,7 @@ static int xe_oa_assign_hwe(struct xe_oa *oa, struct xe_oa_open_param *param) /* If not provided, OA unit defaults to OA unit 0 as per uapi */ if (!param->oa_unit) - param->oa_unit = &xe_device_get_gt(oa->xe, 0)->oa.oa_unit[0]; + param->oa_unit = &xe_root_mmio_gt(oa->xe)->oa.oa_unit[0]; /* When we have an exec_q, get hwe from the exec_q */ if (param->exec_q) { diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c index 3e0c3af235f2..465bda355443 100644 --- a/drivers/gpu/drm/xe/xe_uc.c +++ b/drivers/gpu/drm/xe/xe_uc.c @@ -164,7 +164,7 @@ static int vf_uc_load_hw(struct xe_uc *uc) err = xe_guc_opt_in_features_enable(&uc->guc); if (err) - return err; + goto err_out; err = xe_gt_record_default_lrcs(uc_to_gt(uc)); if (err) diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 18f2c92beff8..68e45a26e85f 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -437,7 +437,7 @@ find_active_client(struct list_head *head) */ bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { - if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { + if (pci_is_display(pdev)) { /* * apple-gmux is needed on pre-retina MacBook Pro * to probe the panel if pdev is the inactive GPU. diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 19955e222c2b..9c3ab9d9f69a 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -34,7 +34,7 @@ #define ROOT_SIZE VTD_PAGE_SIZE #define CONTEXT_SIZE VTD_PAGE_SIZE -#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) +#define IS_GFX_DEVICE(pdev) pci_is_display(pdev) #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB) #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e) diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index c4e5e2c977be..1c156a3f845e 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -37,6 +37,8 @@ #define COMMAND_READ BIT(3) #define COMMAND_WRITE BIT(4) #define COMMAND_COPY BIT(5) +#define COMMAND_ENABLE_DOORBELL BIT(6) +#define COMMAND_DISABLE_DOORBELL BIT(7) #define PCI_ENDPOINT_TEST_STATUS 0x8 #define STATUS_READ_SUCCESS BIT(0) @@ -48,6 +50,11 @@ #define STATUS_IRQ_RAISED BIT(6) #define STATUS_SRC_ADDR_INVALID BIT(7) #define STATUS_DST_ADDR_INVALID BIT(8) +#define STATUS_DOORBELL_SUCCESS BIT(9) +#define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10) +#define STATUS_DOORBELL_ENABLE_FAIL BIT(11) +#define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12) +#define STATUS_DOORBELL_DISABLE_FAIL BIT(13) #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10 @@ -62,6 +69,7 @@ #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28 #define PCI_ENDPOINT_TEST_FLAGS 0x2c + #define FLAG_USE_DMA BIT(0) #define PCI_ENDPOINT_TEST_CAPS 0x30 @@ -70,6 +78,10 @@ #define CAP_MSIX BIT(2) #define CAP_INTX BIT(3) +#define PCI_ENDPOINT_TEST_DB_BAR 0x34 +#define PCI_ENDPOINT_TEST_DB_OFFSET 0x38 +#define PCI_ENDPOINT_TEST_DB_DATA 0x3c + #define PCI_DEVICE_ID_TI_AM654 0xb00c #define PCI_DEVICE_ID_TI_J7200 0xb00f #define PCI_DEVICE_ID_TI_AM64 0xb010 @@ -100,6 +112,7 @@ enum pci_barno { BAR_3, BAR_4, BAR_5, + NO_BAR = -1, }; struct pci_endpoint_test { @@ -841,6 +854,73 @@ static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test, return 0; } +static int pci_endpoint_test_doorbell(struct pci_endpoint_test *test) +{ + struct pci_dev *pdev = test->pdev; + struct device *dev = &pdev->dev; + int irq_type = test->irq_type; + enum pci_barno bar; + u32 data, status; + u32 addr; + int left; + + if (irq_type < PCITEST_IRQ_TYPE_INTX || + irq_type > PCITEST_IRQ_TYPE_MSIX) { + dev_err(dev, "Invalid IRQ type\n"); + return -EINVAL; + } + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, + COMMAND_ENABLE_DOORBELL); + + left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000)); + + status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); + if (!left || (status & STATUS_DOORBELL_ENABLE_FAIL)) { + dev_err(dev, "Failed to enable doorbell\n"); + return -EINVAL; + } + + data = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_DATA); + addr = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_OFFSET); + bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR); + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, 0); + + bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR); + + writel(data, test->bar[bar] + addr); + + left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000)); + + status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); + + if (!left || !(status & STATUS_DOORBELL_SUCCESS)) + dev_err(dev, "Failed to trigger doorbell in endpoint\n"); + + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, + COMMAND_DISABLE_DOORBELL); + + wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000)); + + status |= pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); + + if (status & STATUS_DOORBELL_DISABLE_FAIL) { + dev_err(dev, "Failed to disable doorbell\n"); + return -EINVAL; + } + + if (!(status & STATUS_DOORBELL_SUCCESS)) + return -EINVAL; + + return 0; +} + static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -891,6 +971,9 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, case PCITEST_CLEAR_IRQ: ret = pci_endpoint_test_clear_irq(test); break; + case PCITEST_DOORBELL: + ret = pci_endpoint_test_doorbell(test); + break; } ret: diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 69048869ef1c..b77fd30bbfd9 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -341,7 +341,6 @@ void pci_bus_add_device(struct pci_dev *dev) { struct device_node *dn = dev->dev.of_node; struct platform_device *pdev; - int retval; /* * Can not put in pci_device_add yet because resources @@ -372,9 +371,7 @@ void pci_bus_add_device(struct pci_dev *dev) if (!dn || of_device_is_available(dn)) pci_dev_allow_binding(dev); - retval = device_attach(&dev->dev); - if (retval < 0 && retval != -EPROBE_DEFER) - pci_warn(dev, "device attach failed (%d)\n", retval); + device_initial_probe(&dev->dev); pci_dev_assign_added(dev); } diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 886f6f43a895..41748d083b93 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -13,6 +13,7 @@ config PCI_AARDVARK depends on OF depends on PCI_MSI select PCI_BRIDGE_EMUL + select IRQ_MSI_LIB help Add support for Aardvark 64bit PCIe Host Controller. This controller is part of the South Bridge of the Marvel Armada @@ -29,6 +30,7 @@ config PCIE_ALTERA_MSI tristate "Altera PCIe MSI feature" depends on PCIE_ALTERA depends on PCI_MSI + select IRQ_MSI_LIB help Say Y here if you want PCIe MSI support for the Altera FPGA. This MSI driver supports Altera MSI to GIC controller IP. @@ -62,6 +64,7 @@ config PCIE_BRCMSTB BMIPS_GENERIC || COMPILE_TEST depends on OF depends on PCI_MSI + select IRQ_MSI_LIB default ARCH_BRCMSTB || BMIPS_GENERIC help Say Y here to enable PCIe host controller support for @@ -98,6 +101,7 @@ config PCIE_IPROC_MSI bool "Broadcom iProc PCIe MSI support" depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA depends on PCI_MSI + select IRQ_MSI_LIB default ARCH_BCM_IPROC help Say Y here if you want to enable MSI support for Broadcom's iProc @@ -152,6 +156,7 @@ config PCI_IXP4XX config VMD depends on PCI_MSI && X86_64 && !UML tristate "Intel Volume Management Device Driver" + select IRQ_MSI_LIB help Adds support for the Intel Volume Management Device (VMD). VMD is a secondary PCI host bridge that allows PCI Express root ports, @@ -191,6 +196,7 @@ config PCIE_MEDIATEK depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST depends on OF depends on PCI_MSI + select IRQ_MSI_LIB help Say Y here if you want to enable PCIe controller support on MediaTek SoCs. @@ -199,6 +205,7 @@ config PCIE_MEDIATEK_GEN3 tristate "MediaTek Gen3 PCIe controller" depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST depends on PCI_MSI + select IRQ_MSI_LIB help Adds support for PCIe Gen3 MAC controller for MediaTek SoCs. This PCIe controller is compatible with Gen3, Gen2 and Gen1 speed, @@ -237,6 +244,7 @@ config PCIE_RCAR_HOST bool "Renesas R-Car PCIe controller (host mode)" depends on ARCH_RENESAS || COMPILE_TEST depends on PCI_MSI + select IRQ_MSI_LIB help Say Y here if you want PCIe controller support on R-Car SoCs in host mode. @@ -315,6 +323,7 @@ config PCIE_XILINX bool "Xilinx AXI PCIe controller" depends on OF depends on PCI_MSI + select IRQ_MSI_LIB help Say 'Y' here if you want kernel to support the Xilinx AXI PCIe Host Bridge driver. @@ -324,6 +333,7 @@ config PCIE_XILINX_DMA_PL depends on ARCH_ZYNQMP || COMPILE_TEST depends on PCI_MSI select PCI_HOST_COMMON + select IRQ_MSI_LIB help Say 'Y' here if you want kernel support for the Xilinx PL DMA PCIe host bridge. The controller is a Soft IP which can act as @@ -334,6 +344,7 @@ config PCIE_XILINX_NWL bool "Xilinx NWL PCIe controller" depends on ARCH_ZYNQMP || COMPILE_TEST depends on PCI_MSI + select IRQ_MSI_LIB help Say 'Y' here if you want kernel support for Xilinx NWL PCIe controller. The controller can act as Root Port diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 8ab6cf70c18e..77c5a19b2ab1 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -353,7 +353,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, } spin_unlock_irqrestore(&ep->lock, flags); - offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | + offset = CDNS_PCIE_NORMAL_MSG_ROUTING(PCIE_MSG_TYPE_R_LOCAL) | CDNS_PCIE_NORMAL_MSG_CODE(msg_code); writel(0, ep->irq_cpu_addr + offset); } diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index a149845d341a..1d81c4bf6c6d 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -250,26 +250,6 @@ struct cdns_pcie_rp_ib_bar { struct cdns_pcie; -enum cdns_pcie_msg_routing { - /* Route to Root Complex */ - MSG_ROUTING_TO_RC, - - /* Use Address Routing */ - MSG_ROUTING_BY_ADDR, - - /* Use ID Routing */ - MSG_ROUTING_BY_ID, - - /* Route as Broadcast Message from Root Complex */ - MSG_ROUTING_BCAST, - - /* Local message; terminate at receiver (INTx messages) */ - MSG_ROUTING_LOCAL, - - /* Gather & route to Root Complex (PME_TO_Ack message) */ - MSG_ROUTING_GATHER, -}; - struct cdns_pcie_ops { int (*start_link)(struct cdns_pcie *pcie); void (*stop_link)(struct cdns_pcie *pcie); diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index d9f0386396ed..ff6b6d9e18ec 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -19,6 +19,7 @@ config PCIE_DW_DEBUGFS config PCIE_DW_HOST bool select PCIE_DW + select IRQ_MSI_LIB config PCIE_DW_EP bool @@ -296,6 +297,7 @@ config PCIE_QCOM select PCIE_DW_HOST select CRC8 select PCIE_QCOM_COMMON + select PCI_HOST_COMMON help Say Y here to enable PCIe controller support on Qualcomm SoCs. The PCIe controller uses the DesignWare core plus Qualcomm-specific @@ -402,6 +404,16 @@ config PCIE_UNIPHIER_EP Say Y here if you want PCIe endpoint controller support on UniPhier SoCs. This driver supports Pro5 SoC. +config PCIE_SOPHGO_DW + bool "Sophgo DesignWare PCIe controller (host mode)" + depends on ARCH_SOPHGO || COMPILE_TEST + depends on PCI_MSI + depends on OF + select PCIE_DW_HOST + help + Say Y here if you want PCIe host controller support on + Sophgo SoCs. + config PCIE_SPEAR13XX bool "STMicroelectronics SPEAr PCIe controller" depends on ARCH_SPEAR13XX || COMPILE_TEST diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index 908cb7f345db..6919d27798d1 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o obj-$(CONFIG_PCIE_ROCKCHIP_DW) += pcie-dw-rockchip.o +obj-$(CONFIG_PCIE_SOPHGO_DW) += pcie-sophgo.o obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 5a38cfaf989b..80e48746bbaf 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -860,7 +860,6 @@ static int imx95_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie) { reset_control_assert(imx_pcie->pciephy_reset); - reset_control_assert(imx_pcie->apps_reset); if (imx_pcie->drvdata->core_reset) imx_pcie->drvdata->core_reset(imx_pcie, true); @@ -872,7 +871,6 @@ static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie) static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie) { reset_control_deassert(imx_pcie->pciephy_reset); - reset_control_deassert(imx_pcie->apps_reset); if (imx_pcie->drvdata->core_reset) imx_pcie->drvdata->core_reset(imx_pcie, false); @@ -1063,7 +1061,10 @@ static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid) data1 |= IMX95_PE0_LUT_VLD; regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1); - data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */ + if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) + data2 = 0x7; /* In the EP mode, only 'Device ID' is required */ + else + data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */ data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid); regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2); @@ -1096,18 +1097,14 @@ static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid) } } -static int imx_pcie_enable_device(struct pci_host_bridge *bridge, - struct pci_dev *pdev) +static int imx_pcie_add_lut_by_rid(struct imx_pcie *imx_pcie, u32 rid) { - struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); - u32 sid_i, sid_m, rid = pci_dev_id(pdev); + struct device *dev = imx_pcie->pci->dev; struct device_node *target; - struct device *dev; + u32 sid_i, sid_m; int err_i, err_m; u32 sid = 0; - dev = imx_pcie->pci->dev; - target = NULL; err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask", &target, &sid_i); @@ -1182,6 +1179,13 @@ static int imx_pcie_enable_device(struct pci_host_bridge *bridge, return imx_pcie_add_lut(imx_pcie, rid, sid); } +static int imx_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev) +{ + struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); + + return imx_pcie_add_lut_by_rid(imx_pcie, pci_dev_id(pdev)); +} + static void imx_pcie_disable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev) { @@ -1247,6 +1251,9 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp) } } + /* Make sure that PCIe LTSSM is cleared */ + imx_pcie_ltssm_disable(dev); + ret = imx_pcie_deassert_core_reset(imx_pcie); if (ret < 0) { dev_err(dev, "pcie deassert core reset failed: %d\n", ret); @@ -1385,6 +1392,8 @@ static const struct pci_epc_features imx8m_pcie_epc_features = { .msix_capable = false, .bar[BAR_1] = { .type = BAR_RESERVED, }, .bar[BAR_3] = { .type = BAR_RESERVED, }, + .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_256, }, + .bar[BAR_5] = { .type = BAR_RESERVED, }, .align = SZ_64K, }; @@ -1465,9 +1474,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, pci_epc_init_notify(ep->epc); - /* Start LTSSM. */ - imx_pcie_ltssm_enable(dev); - return 0; } @@ -1764,6 +1770,12 @@ static int imx_pcie_probe(struct platform_device *pdev) ret = imx_add_pcie_ep(imx_pcie, pdev); if (ret < 0) return ret; + + /* + * FIXME: Only single Device (EPF) is supported due to the + * Endpoint framework limitation. + */ + imx_pcie_add_lut_by_rid(imx_pcie, 0); } else { pci->pp.use_atu_msg = true; ret = dw_pcie_host_init(&pci->pp); @@ -1912,7 +1924,7 @@ static const struct imx_pcie_drvdata drvdata[] = { .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .mode_off[1] = IOMUXC_GPR12, .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, - .epc_features = &imx8m_pcie_epc_features, + .epc_features = &imx8q_pcie_epc_features, .init_phy = imx8mq_pcie_init_phy, .enable_ref_clk = imx8mm_pcie_enable_ref_clk, }, diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c index c67601096c48..0fbf86c0b97e 100644 --- a/drivers/pci/controller/dwc/pcie-designware-debugfs.c +++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c @@ -814,14 +814,14 @@ static bool dw_pcie_ptm_context_update_visible(void *drvdata) { struct dw_pcie *pci = drvdata; - return (pci->mode == DW_PCIE_EP_TYPE) ? true : false; + return pci->mode == DW_PCIE_EP_TYPE; } static bool dw_pcie_ptm_context_valid_visible(void *drvdata) { struct dw_pcie *pci = drvdata; - return (pci->mode == DW_PCIE_RC_TYPE) ? true : false; + return pci->mode == DW_PCIE_RC_TYPE; } static bool dw_pcie_ptm_local_clock_visible(void *drvdata) @@ -834,38 +834,38 @@ static bool dw_pcie_ptm_master_clock_visible(void *drvdata) { struct dw_pcie *pci = drvdata; - return (pci->mode == DW_PCIE_EP_TYPE) ? true : false; + return pci->mode == DW_PCIE_EP_TYPE; } static bool dw_pcie_ptm_t1_visible(void *drvdata) { struct dw_pcie *pci = drvdata; - return (pci->mode == DW_PCIE_EP_TYPE) ? true : false; + return pci->mode == DW_PCIE_EP_TYPE; } static bool dw_pcie_ptm_t2_visible(void *drvdata) { struct dw_pcie *pci = drvdata; - return (pci->mode == DW_PCIE_RC_TYPE) ? true : false; + return pci->mode == DW_PCIE_RC_TYPE; } static bool dw_pcie_ptm_t3_visible(void *drvdata) { struct dw_pcie *pci = drvdata; - return (pci->mode == DW_PCIE_RC_TYPE) ? true : false; + return pci->mode == DW_PCIE_RC_TYPE; } static bool dw_pcie_ptm_t4_visible(void *drvdata) { struct dw_pcie *pci = drvdata; - return (pci->mode == DW_PCIE_EP_TYPE) ? true : false; + return pci->mode == DW_PCIE_EP_TYPE; } -const struct pcie_ptm_ops dw_pcie_ptm_ops = { +static const struct pcie_ptm_ops dw_pcie_ptm_ops = { .check_capability = dw_pcie_ptm_check_capability, .context_update_write = dw_pcie_ptm_context_update_write, .context_update_read = dw_pcie_ptm_context_update_read, diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 906277f9ffaf..952f8594b501 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -10,6 +10,7 @@ #include <linux/iopoll.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/msi.h> #include <linux/of_address.h> @@ -23,35 +24,21 @@ static struct pci_ops dw_pcie_ops; static struct pci_ops dw_child_pcie_ops; -static void dw_msi_ack_irq(struct irq_data *d) -{ - irq_chip_ack_parent(d); -} - -static void dw_msi_mask_irq(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void dw_msi_unmask_irq(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - -static struct irq_chip dw_pcie_msi_irq_chip = { - .name = "PCI-MSI", - .irq_ack = dw_msi_ack_irq, - .irq_mask = dw_msi_mask_irq, - .irq_unmask = dw_msi_unmask_irq, -}; - -static struct msi_domain_info dw_pcie_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX | - MSI_FLAG_MULTI_PCI_MSI, - .chip = &dw_pcie_msi_irq_chip, +#define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY | \ + MSI_FLAG_PCI_MSI_MASK_PARENT) +#define DW_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \ + MSI_FLAG_PCI_MSIX | \ + MSI_GENERIC_FLAGS_MASK) + +static const struct msi_parent_ops dw_pcie_msi_parent_ops = { + .required_flags = DW_PCIE_MSI_FLAGS_REQUIRED, + .supported_flags = DW_PCIE_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .prefix = "DW-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; /* MSI int handler */ @@ -227,30 +214,23 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = { int dw_pcie_allocate_domains(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct fwnode_handle *fwnode = of_fwnode_handle(pci->dev->of_node); - - pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, - &dw_pcie_msi_domain_ops, pp); + struct irq_domain_info info = { + .fwnode = dev_fwnode(pci->dev), + .ops = &dw_pcie_msi_domain_ops, + .size = pp->num_vectors, + .host_data = pp, + }; + + pp->irq_domain = msi_create_parent_irq_domain(&info, &dw_pcie_msi_parent_ops); if (!pp->irq_domain) { dev_err(pci->dev, "Failed to create IRQ domain\n"); return -ENOMEM; } - irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); - - pp->msi_domain = pci_msi_create_irq_domain(fwnode, - &dw_pcie_msi_domain_info, - pp->irq_domain); - if (!pp->msi_domain) { - dev_err(pci->dev, "Failed to create MSI domain\n"); - irq_domain_remove(pp->irq_domain); - return -ENOMEM; - } - return 0; } -static void dw_pcie_free_msi(struct dw_pcie_rp *pp) +void dw_pcie_free_msi(struct dw_pcie_rp *pp) { u32 ctrl; @@ -260,22 +240,36 @@ static void dw_pcie_free_msi(struct dw_pcie_rp *pp) NULL, NULL); } - irq_domain_remove(pp->msi_domain); irq_domain_remove(pp->irq_domain); } +EXPORT_SYMBOL_GPL(dw_pcie_free_msi); -static void dw_pcie_msi_init(struct dw_pcie_rp *pp) +void dw_pcie_msi_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); u64 msi_target = (u64)pp->msi_data; + u32 ctrl, num_ctrls; if (!pci_msi_enabled() || !pp->has_msi_ctrl) return; + num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + + /* Initialize IRQ Status array */ + for (ctrl = 0; ctrl < num_ctrls; ctrl++) { + dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + pp->irq_mask[ctrl]); + dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + ~0); + } + /* Program the msi_data */ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target)); dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target)); } +EXPORT_SYMBOL_GPL(dw_pcie_msi_init); static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp) { @@ -317,7 +311,7 @@ static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp) return 0; } -static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) +int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; @@ -391,6 +385,7 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) return 0; } +EXPORT_SYMBOL_GPL(dw_pcie_msi_host_init); static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp) { @@ -909,7 +904,7 @@ static void dw_pcie_config_presets(struct dw_pcie_rp *pp) int dw_pcie_setup_rc(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - u32 val, ctrl, num_ctrls; + u32 val; int ret; /* @@ -920,20 +915,6 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp) dw_pcie_setup(pci); - if (pp->has_msi_ctrl) { - num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; - - /* Initialize IRQ Status array */ - for (ctrl = 0; ctrl < num_ctrls; ctrl++) { - dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + - (ctrl * MSI_REG_CTRL_BLOCK_SIZE), - pp->irq_mask[ctrl]); - dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + - (ctrl * MSI_REG_CTRL_BLOCK_SIZE), - ~0); - } - } - dw_pcie_msi_init(pp); /* Setup RC BARs */ diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 4d794964fa0f..89aad5a08928 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -702,18 +702,26 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci) int retries; /* Check if the link is up or not */ - for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + for (retries = 0; retries < PCIE_LINK_WAIT_MAX_RETRIES; retries++) { if (dw_pcie_link_up(pci)) break; - msleep(LINK_WAIT_SLEEP_MS); + msleep(PCIE_LINK_WAIT_SLEEP_MS); } - if (retries >= LINK_WAIT_MAX_RETRIES) { + if (retries >= PCIE_LINK_WAIT_MAX_RETRIES) { dev_info(pci->dev, "Phy link never came up\n"); return -ETIMEDOUT; } + /* + * As per PCIe r6.0, sec 6.6.1, a Downstream Port that supports Link + * speeds greater than 5.0 GT/s, software must wait a minimum of 100 ms + * after Link training completes before sending a Configuration Request. + */ + if (pci->max_link_speed > 2) + msleep(PCIE_RESET_CONFIG_WAIT_MS); + offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index ce9e18554e42..00f52d472dcd 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -62,10 +62,6 @@ #define dw_pcie_cap_set(_pci, _cap) \ set_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps) -/* Parameters for the waiting for link up routine */ -#define LINK_WAIT_MAX_RETRIES 10 -#define LINK_WAIT_SLEEP_MS 90 - /* Parameters for the waiting for iATU enabled routine */ #define LINK_WAIT_MAX_IATU_RETRIES 5 #define LINK_WAIT_IATU 9 @@ -417,7 +413,6 @@ struct dw_pcie_rp { const struct dw_pcie_host_ops *ops; int msi_irq[MAX_MSI_CTRLS]; struct irq_domain *irq_domain; - struct irq_domain *msi_domain; dma_addr_t msi_data; struct irq_chip *msi_irq_chip; u32 num_vectors; @@ -759,6 +754,9 @@ static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci) int dw_pcie_suspend_noirq(struct dw_pcie *pci); int dw_pcie_resume_noirq(struct dw_pcie *pci); irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp); +void dw_pcie_msi_init(struct dw_pcie_rp *pp); +int dw_pcie_msi_host_init(struct dw_pcie_rp *pp); +void dw_pcie_free_msi(struct dw_pcie_rp *pp); int dw_pcie_setup_rc(struct dw_pcie_rp *pp); int dw_pcie_host_init(struct dw_pcie_rp *pp); void dw_pcie_host_deinit(struct dw_pcie_rp *pp); @@ -781,6 +779,17 @@ static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp) return IRQ_NONE; } +static inline void dw_pcie_msi_init(struct dw_pcie_rp *pp) +{ } + +static inline int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) +{ + return -ENODEV; +} + +static inline void dw_pcie_free_msi(struct dw_pcie_rp *pp) +{ } + static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp) { return 0; diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c index 93171a392879..b5f5eee5a50e 100644 --- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c @@ -58,6 +58,8 @@ /* Hot Reset Control Register */ #define PCIE_CLIENT_HOT_RESET_CTRL 0x180 +#define PCIE_LTSSM_APP_DLY2_EN BIT(1) +#define PCIE_LTSSM_APP_DLY2_DONE BIT(3) #define PCIE_LTSSM_ENABLE_ENHANCE BIT(4) /* LTSSM Status Register */ @@ -458,6 +460,7 @@ static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg) if (reg & PCIE_RDLH_LINK_UP_CHGED) { if (rockchip_pcie_link_up(pci)) { + msleep(PCIE_RESET_CONFIG_WAIT_MS); dev_dbg(dev, "Received Link up event. Starting enumeration!\n"); /* Rescan the bus to enumerate endpoint devices */ pci_lock_rescan_remove(); @@ -474,7 +477,7 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg) struct rockchip_pcie *rockchip = arg; struct dw_pcie *pci = &rockchip->pci; struct device *dev = pci->dev; - u32 reg; + u32 reg, val; reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC); rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC); @@ -485,6 +488,10 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg) if (reg & PCIE_LINK_REQ_RST_NOT_INT) { dev_dbg(dev, "hot reset or link-down reset\n"); dw_pcie_ep_linkdown(&pci->ep); + /* Stop delaying link training. */ + val = HIWORD_UPDATE_BIT(PCIE_LTSSM_APP_DLY2_DONE); + rockchip_pcie_writel_apb(rockchip, val, + PCIE_CLIENT_HOT_RESET_CTRL); } if (reg & PCIE_RDLH_LINK_UP_CHGED) { @@ -566,8 +573,11 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev, return ret; } - /* LTSSM enable control mode */ - val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE); + /* + * LTSSM enable control mode, and automatically delay link training on + * hot reset/link-down reset. + */ + val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE | PCIE_LTSSM_APP_DLY2_EN); rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL); rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_EP_MODE, diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index c789e3f85655..294babe1816e 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -21,7 +21,9 @@ #include <linux/limits.h> #include <linux/init.h> #include <linux/of.h> +#include <linux/of_pci.h> #include <linux/pci.h> +#include <linux/pci-ecam.h> #include <linux/pm_opp.h> #include <linux/pm_runtime.h> #include <linux/platform_device.h> @@ -34,6 +36,7 @@ #include <linux/units.h> #include "../../pci.h" +#include "../pci-host-common.h" #include "pcie-designware.h" #include "pcie-qcom-common.h" @@ -255,13 +258,21 @@ struct qcom_pcie_ops { * @ops: qcom PCIe ops structure * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache * snooping + * @firmware_managed: Set if the Root Complex is firmware managed */ struct qcom_pcie_cfg { const struct qcom_pcie_ops *ops; bool override_no_snoop; + bool firmware_managed; bool no_l0s; }; +struct qcom_pcie_port { + struct list_head list; + struct gpio_desc *reset; + struct phy *phy; +}; + struct qcom_pcie { struct dw_pcie *pci; void __iomem *parf; /* DT parf */ @@ -274,24 +285,37 @@ struct qcom_pcie { struct icc_path *icc_cpu; const struct qcom_pcie_cfg *cfg; struct dentry *debugfs; + struct list_head ports; bool suspended; bool use_pm_opp; }; #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) -static void qcom_ep_reset_assert(struct qcom_pcie *pcie) +static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert) { - gpiod_set_value_cansleep(pcie->reset, 1); + struct qcom_pcie_port *port; + int val = assert ? 1 : 0; + + if (list_empty(&pcie->ports)) + gpiod_set_value_cansleep(pcie->reset, val); + else + list_for_each_entry(port, &pcie->ports, list) + gpiod_set_value_cansleep(port->reset, val); + usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); } +static void qcom_ep_reset_assert(struct qcom_pcie *pcie) +{ + qcom_perst_assert(pcie, true); +} + static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) { /* Ensure that PERST has been asserted for at least 100 ms */ msleep(PCIE_T_PVPERL_MS); - gpiod_set_value_cansleep(pcie->reset, 0); - usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); + qcom_perst_assert(pcie, false); } static int qcom_pcie_start_link(struct dw_pcie *pci) @@ -1229,6 +1253,59 @@ static bool qcom_pcie_link_up(struct dw_pcie *pci) return val & PCI_EXP_LNKSTA_DLLLA; } +static void qcom_pcie_phy_exit(struct qcom_pcie *pcie) +{ + struct qcom_pcie_port *port; + + if (list_empty(&pcie->ports)) + phy_exit(pcie->phy); + else + list_for_each_entry(port, &pcie->ports, list) + phy_exit(port->phy); +} + +static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie) +{ + struct qcom_pcie_port *port; + + if (list_empty(&pcie->ports)) { + phy_power_off(pcie->phy); + } else { + list_for_each_entry(port, &pcie->ports, list) + phy_power_off(port->phy); + } +} + +static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie) +{ + struct qcom_pcie_port *port; + int ret = 0; + + if (list_empty(&pcie->ports)) { + ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); + if (ret) + return ret; + + ret = phy_power_on(pcie->phy); + if (ret) + return ret; + } else { + list_for_each_entry(port, &pcie->ports, list) { + ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); + if (ret) + return ret; + + ret = phy_power_on(port->phy); + if (ret) { + qcom_pcie_phy_power_off(pcie); + return ret; + } + } + } + + return ret; +} + static int qcom_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); @@ -1241,11 +1318,7 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp) if (ret) return ret; - ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); - if (ret) - goto err_deinit; - - ret = phy_power_on(pcie->phy); + ret = qcom_pcie_phy_power_on(pcie); if (ret) goto err_deinit; @@ -1268,7 +1341,7 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp) err_assert_reset: qcom_ep_reset_assert(pcie); err_disable_phy: - phy_power_off(pcie->phy); + qcom_pcie_phy_power_off(pcie); err_deinit: pcie->cfg->ops->deinit(pcie); @@ -1281,7 +1354,7 @@ static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp) struct qcom_pcie *pcie = to_qcom_pcie(pci); qcom_ep_reset_assert(pcie); - phy_power_off(pcie->phy); + qcom_pcie_phy_power_off(pcie); pcie->cfg->ops->deinit(pcie); } @@ -1426,6 +1499,10 @@ static const struct qcom_pcie_cfg cfg_sc8280xp = { .no_l0s = true, }; +static const struct qcom_pcie_cfg cfg_fw_managed = { + .firmware_managed = true, +}; + static const struct dw_pcie_ops dw_pcie_ops = { .link_up = qcom_pcie_link_up, .start_link = qcom_pcie_start_link, @@ -1564,6 +1641,7 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data) writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR); if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) { + msleep(PCIE_RESET_CONFIG_WAIT_MS); dev_dbg(dev, "Received Link up event. Starting enumeration!\n"); /* Rescan the bus to enumerate endpoint devices */ pci_lock_rescan_remove(); @@ -1579,10 +1657,128 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data) return IRQ_HANDLED; } +static void qcom_pci_free_msi(void *ptr) +{ + struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr; + + if (pp && pp->has_msi_ctrl) + dw_pcie_free_msi(pp); +} + +static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct dw_pcie_rp *pp; + struct dw_pcie *pci; + int ret; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pp = &pci->pp; + pci->dbi_base = cfg->win; + pp->num_vectors = MSI_DEF_NUM_VECTORS; + + ret = dw_pcie_msi_host_init(pp); + if (ret) + return ret; + + pp->has_msi_ctrl = true; + dw_pcie_msi_init(pp); + + return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp); +} + +static const struct pci_ecam_ops pci_qcom_ecam_ops = { + .init = qcom_pcie_ecam_host_init, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, + } +}; + +static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node) +{ + struct device *dev = pcie->pci->dev; + struct qcom_pcie_port *port; + struct gpio_desc *reset; + struct phy *phy; + int ret; + + reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(node), + "reset", GPIOD_OUT_HIGH, "PERST#"); + if (IS_ERR(reset)) + return PTR_ERR(reset); + + phy = devm_of_phy_get(dev, node, NULL); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + ret = phy_init(phy); + if (ret) + return ret; + + port->reset = reset; + port->phy = phy; + INIT_LIST_HEAD(&port->list); + list_add_tail(&port->list, &pcie->ports); + + return 0; +} + +static int qcom_pcie_parse_ports(struct qcom_pcie *pcie) +{ + struct device *dev = pcie->pci->dev; + struct qcom_pcie_port *port, *tmp; + int ret = -ENOENT; + + for_each_available_child_of_node_scoped(dev->of_node, of_port) { + ret = qcom_pcie_parse_port(pcie, of_port); + if (ret) + goto err_port_del; + } + + return ret; + +err_port_del: + list_for_each_entry_safe(port, tmp, &pcie->ports, list) + list_del(&port->list); + + return ret; +} + +static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie) +{ + struct device *dev = pcie->pci->dev; + int ret; + + pcie->phy = devm_phy_optional_get(dev, "pciephy"); + if (IS_ERR(pcie->phy)) + return PTR_ERR(pcie->phy); + + pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); + if (IS_ERR(pcie->reset)) + return PTR_ERR(pcie->reset); + + ret = phy_init(pcie->phy); + if (ret) + return ret; + + return 0; +} + static int qcom_pcie_probe(struct platform_device *pdev) { const struct qcom_pcie_cfg *pcie_cfg; unsigned long max_freq = ULONG_MAX; + struct qcom_pcie_port *port, *tmp; struct device *dev = &pdev->dev; struct dev_pm_opp *opp; struct qcom_pcie *pcie; @@ -1593,24 +1789,64 @@ static int qcom_pcie_probe(struct platform_device *pdev) char *name; pcie_cfg = of_device_get_match_data(dev); - if (!pcie_cfg || !pcie_cfg->ops) { - dev_err(dev, "Invalid platform data\n"); - return -EINVAL; + if (!pcie_cfg) { + dev_err(dev, "No platform data\n"); + return -ENODATA; } - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; + if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) { + dev_err(dev, "No platform ops\n"); + return -ENODATA; + } pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) goto err_pm_runtime_put; + if (pcie_cfg->firmware_managed) { + struct pci_host_bridge *bridge; + struct pci_config_window *cfg; + + bridge = devm_pci_alloc_host_bridge(dev, 0); + if (!bridge) { + ret = -ENOMEM; + goto err_pm_runtime_put; + } + + /* Parse and map our ECAM configuration space area */ + cfg = pci_host_common_ecam_create(dev, bridge, + &pci_qcom_ecam_ops); + if (IS_ERR(cfg)) { + ret = PTR_ERR(cfg); + goto err_pm_runtime_put; + } + + bridge->sysdata = cfg; + bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops; + bridge->msi_domain = true; + + ret = pci_host_probe(bridge); + if (ret) + goto err_pm_runtime_put; + + return 0; + } + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) { + ret = -ENOMEM; + goto err_pm_runtime_put; + } + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) { + ret = -ENOMEM; + goto err_pm_runtime_put; + } + + INIT_LIST_HEAD(&pcie->ports); + pci->dev = dev; pci->ops = &dw_pcie_ops; pp = &pci->pp; @@ -1619,12 +1855,6 @@ static int qcom_pcie_probe(struct platform_device *pdev) pcie->cfg = pcie_cfg; - pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); - if (IS_ERR(pcie->reset)) { - ret = PTR_ERR(pcie->reset); - goto err_pm_runtime_put; - } - pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); if (IS_ERR(pcie->parf)) { ret = PTR_ERR(pcie->parf); @@ -1647,12 +1877,6 @@ static int qcom_pcie_probe(struct platform_device *pdev) } } - pcie->phy = devm_phy_optional_get(dev, "pciephy"); - if (IS_ERR(pcie->phy)) { - ret = PTR_ERR(pcie->phy); - goto err_pm_runtime_put; - } - /* OPP table is optional */ ret = devm_pm_opp_of_add_table(dev); if (ret && ret != -ENODEV) { @@ -1699,9 +1923,23 @@ static int qcom_pcie_probe(struct platform_device *pdev) pp->ops = &qcom_pcie_dw_ops; - ret = phy_init(pcie->phy); - if (ret) - goto err_pm_runtime_put; + ret = qcom_pcie_parse_ports(pcie); + if (ret) { + if (ret != -ENOENT) { + dev_err_probe(pci->dev, ret, + "Failed to parse Root Port: %d\n", ret); + goto err_pm_runtime_put; + } + + /* + * In the case of properties not populated in Root Port node, + * fallback to the legacy method of parsing the Host Bridge + * node. This is to maintain DT backwards compatibility. + */ + ret = qcom_pcie_parse_legacy_binding(pcie); + if (ret) + goto err_pm_runtime_put; + } platform_set_drvdata(pdev, pcie); @@ -1746,7 +1984,9 @@ static int qcom_pcie_probe(struct platform_device *pdev) err_host_deinit: dw_pcie_host_deinit(pp); err_phy_exit: - phy_exit(pcie->phy); + qcom_pcie_phy_exit(pcie); + list_for_each_entry_safe(port, tmp, &pcie->ports, list) + list_del(&port->list); err_pm_runtime_put: pm_runtime_put(dev); pm_runtime_disable(dev); @@ -1756,9 +1996,13 @@ err_pm_runtime_put: static int qcom_pcie_suspend_noirq(struct device *dev) { - struct qcom_pcie *pcie = dev_get_drvdata(dev); + struct qcom_pcie *pcie; int ret = 0; + pcie = dev_get_drvdata(dev); + if (!pcie) + return 0; + /* * Set minimum bandwidth required to keep data path functional during * suspend. @@ -1812,9 +2056,13 @@ static int qcom_pcie_suspend_noirq(struct device *dev) static int qcom_pcie_resume_noirq(struct device *dev) { - struct qcom_pcie *pcie = dev_get_drvdata(dev); + struct qcom_pcie *pcie; int ret; + pcie = dev_get_drvdata(dev); + if (!pcie) + return 0; + if (pm_suspend_target_state != PM_SUSPEND_MEM) { ret = icc_enable(pcie->icc_cpu); if (ret) { @@ -1849,6 +2097,7 @@ static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 }, { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, + { .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed }, { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp }, { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0}, { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, diff --git a/drivers/pci/controller/dwc/pcie-sophgo.c b/drivers/pci/controller/dwc/pcie-sophgo.c new file mode 100644 index 000000000000..ad4baaa34ffa --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-sophgo.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sophgo DesignWare based PCIe host controller driver + */ + +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/property.h> +#include <linux/platform_device.h> + +#include "pcie-designware.h" + +#define to_sophgo_pcie(x) dev_get_drvdata((x)->dev) + +#define PCIE_INT_SIGNAL 0xc48 +#define PCIE_INT_EN 0xca0 + +#define PCIE_INT_SIGNAL_INTX GENMASK(8, 5) + +#define PCIE_INT_EN_INTX GENMASK(4, 1) +#define PCIE_INT_EN_INT_MSI BIT(5) + +struct sophgo_pcie { + struct dw_pcie pci; + void __iomem *app_base; + struct clk_bulk_data *clks; + unsigned int clk_cnt; + struct irq_domain *irq_domain; +}; + +static int sophgo_pcie_readl_app(struct sophgo_pcie *sophgo, u32 reg) +{ + return readl_relaxed(sophgo->app_base + reg); +} + +static void sophgo_pcie_writel_app(struct sophgo_pcie *sophgo, u32 val, u32 reg) +{ + writel_relaxed(val, sophgo->app_base + reg); +} + +static void sophgo_pcie_intx_handler(struct irq_desc *desc) +{ + struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct sophgo_pcie *sophgo = to_sophgo_pcie(pci); + unsigned long hwirq, reg; + + chained_irq_enter(chip, desc); + + reg = sophgo_pcie_readl_app(sophgo, PCIE_INT_SIGNAL); + reg = FIELD_GET(PCIE_INT_SIGNAL_INTX, reg); + + for_each_set_bit(hwirq, ®, PCI_NUM_INTX) + generic_handle_domain_irq(sophgo->irq_domain, hwirq); + + chained_irq_exit(chip, desc); +} + +static void sophgo_intx_irq_mask(struct irq_data *d) +{ + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct sophgo_pcie *sophgo = to_sophgo_pcie(pci); + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&pp->lock, flags); + + val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN); + val &= ~FIELD_PREP(PCIE_INT_EN_INTX, BIT(d->hwirq)); + sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN); + + raw_spin_unlock_irqrestore(&pp->lock, flags); +}; + +static void sophgo_intx_irq_unmask(struct irq_data *d) +{ + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct sophgo_pcie *sophgo = to_sophgo_pcie(pci); + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&pp->lock, flags); + + val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN); + val |= FIELD_PREP(PCIE_INT_EN_INTX, BIT(d->hwirq)); + sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN); + + raw_spin_unlock_irqrestore(&pp->lock, flags); +}; + +static struct irq_chip sophgo_intx_irq_chip = { + .name = "INTx", + .irq_mask = sophgo_intx_irq_mask, + .irq_unmask = sophgo_intx_irq_unmask, +}; + +static int sophgo_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &sophgo_intx_irq_chip, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = sophgo_pcie_intx_map, +}; + +static int sophgo_pcie_init_irq_domain(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct sophgo_pcie *sophgo = to_sophgo_pcie(pci); + struct device *dev = sophgo->pci.dev; + struct fwnode_handle *intc; + int irq; + + intc = device_get_named_child_node(dev, "interrupt-controller"); + if (!intc) { + dev_err(dev, "missing child interrupt-controller node\n"); + return -ENODEV; + } + + irq = fwnode_irq_get(intc, 0); + if (irq < 0) { + dev_err(dev, "failed to get INTx irq number\n"); + fwnode_handle_put(intc); + return irq; + } + + sophgo->irq_domain = irq_domain_create_linear(intc, PCI_NUM_INTX, + &intx_domain_ops, pp); + fwnode_handle_put(intc); + if (!sophgo->irq_domain) { + dev_err(dev, "failed to get a INTx irq domain\n"); + return -EINVAL; + } + + return irq; +} + +static void sophgo_pcie_msi_enable(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct sophgo_pcie *sophgo = to_sophgo_pcie(pci); + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&pp->lock, flags); + + val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN); + val |= PCIE_INT_EN_INT_MSI; + sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN); + + raw_spin_unlock_irqrestore(&pp->lock, flags); +} + +static int sophgo_pcie_host_init(struct dw_pcie_rp *pp) +{ + int irq; + + irq = sophgo_pcie_init_irq_domain(pp); + if (irq < 0) + return irq; + + irq_set_chained_handler_and_data(irq, sophgo_pcie_intx_handler, pp); + + sophgo_pcie_msi_enable(pp); + + return 0; +} + +static const struct dw_pcie_host_ops sophgo_pcie_host_ops = { + .init = sophgo_pcie_host_init, +}; + +static int sophgo_pcie_clk_init(struct sophgo_pcie *sophgo) +{ + struct device *dev = sophgo->pci.dev; + int ret; + + ret = devm_clk_bulk_get_all_enabled(dev, &sophgo->clks); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to get clocks\n"); + + sophgo->clk_cnt = ret; + + return 0; +} + +static int sophgo_pcie_resource_get(struct platform_device *pdev, + struct sophgo_pcie *sophgo) +{ + sophgo->app_base = devm_platform_ioremap_resource_byname(pdev, "app"); + if (IS_ERR(sophgo->app_base)) + return dev_err_probe(&pdev->dev, PTR_ERR(sophgo->app_base), + "failed to map app registers\n"); + + return 0; +} + +static int sophgo_pcie_configure_rc(struct sophgo_pcie *sophgo) +{ + struct dw_pcie_rp *pp; + + pp = &sophgo->pci.pp; + pp->ops = &sophgo_pcie_host_ops; + + return dw_pcie_host_init(pp); +} + +static int sophgo_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sophgo_pcie *sophgo; + int ret; + + sophgo = devm_kzalloc(dev, sizeof(*sophgo), GFP_KERNEL); + if (!sophgo) + return -ENOMEM; + + platform_set_drvdata(pdev, sophgo); + + sophgo->pci.dev = dev; + + ret = sophgo_pcie_resource_get(pdev, sophgo); + if (ret) + return ret; + + ret = sophgo_pcie_clk_init(sophgo); + if (ret) + return ret; + + return sophgo_pcie_configure_rc(sophgo); +} + +static const struct of_device_id sophgo_pcie_of_match[] = { + { .compatible = "sophgo,sg2044-pcie" }, + { } +}; +MODULE_DEVICE_TABLE(of, sophgo_pcie_of_match); + +static struct platform_driver sophgo_pcie_driver = { + .driver = { + .name = "sophgo-pcie", + .of_match_table = sophgo_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = sophgo_pcie_probe, +}; +builtin_platform_driver(sophgo_pcie_driver); diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig index 58ce034f701a..c50c4625937f 100644 --- a/drivers/pci/controller/mobiveil/Kconfig +++ b/drivers/pci/controller/mobiveil/Kconfig @@ -9,6 +9,7 @@ config PCIE_MOBIVEIL config PCIE_MOBIVEIL_HOST bool depends on PCI_MSI + select IRQ_MSI_LIB select PCIE_MOBIVEIL config PCIE_LAYERSCAPE_GEN4 diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c index a600f46ee3c3..dbc72c73fd0a 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> @@ -353,16 +354,19 @@ static const struct irq_domain_ops intx_domain_ops = { .map = mobiveil_pcie_intx_map, }; -static struct irq_chip mobiveil_msi_irq_chip = { - .name = "Mobiveil PCIe MSI", - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; +#define MOBIVEIL_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) + +#define MOBIVEIL_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX) -static struct msi_domain_info mobiveil_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, - .chip = &mobiveil_msi_irq_chip, +static const struct msi_parent_ops mobiveil_msi_parent_ops = { + .required_flags = MOBIVEIL_MSI_FLAGS_REQUIRED, + .supported_flags = MOBIVEIL_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "Mobiveil-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) @@ -435,23 +439,20 @@ static const struct irq_domain_ops msi_domain_ops = { static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie) { struct device *dev = &pcie->pdev->dev; - struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node); struct mobiveil_msi *msi = &pcie->rp.msi; mutex_init(&msi->lock); - msi->dev_domain = irq_domain_create_linear(NULL, msi->num_of_vectors, - &msi_domain_ops, pcie); - if (!msi->dev_domain) { - dev_err(dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &mobiveil_msi_domain_info, - msi->dev_domain); - if (!msi->msi_domain) { + struct irq_domain_info info = { + .fwnode = dev_fwnode(dev), + .ops = &msi_domain_ops, + .host_data = pcie, + .size = msi->num_of_vectors, + }; + + msi->dev_domain = msi_create_parent_irq_domain(&info, &mobiveil_msi_parent_ops); + if (!msi->dev_domain) { dev_err(dev, "failed to create MSI domain\n"); - irq_domain_remove(msi->dev_domain); return -ENOMEM; } @@ -464,9 +465,8 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie) struct mobiveil_root_port *rp = &pcie->rp; /* setup INTx */ - rp->intx_domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), PCI_NUM_INTX, - &intx_domain_ops, pcie); - + rp->intx_domain = irq_domain_create_linear(dev_fwnode(dev), PCI_NUM_INTX, &intx_domain_ops, + pcie); if (!rp->intx_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENOMEM; diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h index 662f17f9bf65..7246de6a7176 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h @@ -135,7 +135,6 @@ struct mobiveil_msi { /* MSI information */ struct mutex lock; /* protect bitmap variable */ - struct irq_domain *msi_domain; struct irq_domain *dev_domain; phys_addr_t msi_pages_phys; int num_of_vectors; diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 7bac64533b14..e34bea1ff0ac 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -13,6 +13,7 @@ #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/module.h> @@ -278,7 +279,6 @@ struct advk_pcie { struct irq_domain *irq_domain; struct irq_chip irq_chip; raw_spinlock_t irq_lock; - struct irq_domain *msi_domain; struct irq_domain *msi_inner_domain; raw_spinlock_t msi_irq_lock; DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); @@ -1332,18 +1332,6 @@ static void advk_msi_irq_unmask(struct irq_data *d) raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags); } -static void advk_msi_top_irq_mask(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void advk_msi_top_irq_unmask(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - static struct irq_chip advk_msi_bottom_irq_chip = { .name = "MSI", .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg, @@ -1436,17 +1424,20 @@ static const struct irq_domain_ops advk_pcie_irq_domain_ops = { .xlate = irq_domain_xlate_onecell, }; -static struct irq_chip advk_msi_irq_chip = { - .name = "advk-MSI", - .irq_mask = advk_msi_top_irq_mask, - .irq_unmask = advk_msi_top_irq_unmask, -}; - -static struct msi_domain_info advk_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI | - MSI_FLAG_PCI_MSIX, - .chip = &advk_msi_irq_chip, +#define ADVK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT | \ + MSI_FLAG_NO_AFFINITY) +#define ADVK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX | \ + MSI_FLAG_MULTI_PCI_MSI) + +static const struct msi_parent_ops advk_msi_parent_ops = { + .required_flags = ADVK_MSI_FLAGS_REQUIRED, + .supported_flags = ADVK_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "advk-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) @@ -1456,26 +1447,22 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) raw_spin_lock_init(&pcie->msi_irq_lock); mutex_init(&pcie->msi_used_lock); - pcie->msi_inner_domain = irq_domain_create_linear(NULL, MSI_IRQ_NUM, - &advk_msi_domain_ops, pcie); - if (!pcie->msi_inner_domain) - return -ENOMEM; + struct irq_domain_info info = { + .fwnode = dev_fwnode(dev), + .ops = &advk_msi_domain_ops, + .host_data = pcie, + .size = MSI_IRQ_NUM, + }; - pcie->msi_domain = - pci_msi_create_irq_domain(dev_fwnode(dev), - &advk_msi_domain_info, - pcie->msi_inner_domain); - if (!pcie->msi_domain) { - irq_domain_remove(pcie->msi_inner_domain); + pcie->msi_inner_domain = msi_create_parent_irq_domain(&info, &advk_msi_parent_ops); + if (!pcie->msi_inner_domain) return -ENOMEM; - } return 0; } static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie) { - irq_domain_remove(pcie->msi_domain); irq_domain_remove(pcie->msi_inner_domain); } diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index b37052863847..810d1c8de24e 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -22,7 +22,7 @@ static void gen_pci_unmap_cfg(void *ptr) pci_ecam_free((struct pci_config_window *)ptr); } -static struct pci_config_window *gen_pci_init(struct device *dev, +struct pci_config_window *pci_host_common_ecam_create(struct device *dev, struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops) { int err; @@ -50,6 +50,7 @@ static struct pci_config_window *gen_pci_init(struct device *dev, return cfg; } +EXPORT_SYMBOL_GPL(pci_host_common_ecam_create); int pci_host_common_init(struct platform_device *pdev, const struct pci_ecam_ops *ops) @@ -67,7 +68,7 @@ int pci_host_common_init(struct platform_device *pdev, platform_set_drvdata(pdev, bridge); /* Parse and map our Configuration Space windows */ - cfg = gen_pci_init(dev, bridge, ops); + cfg = pci_host_common_ecam_create(dev, bridge, ops); if (IS_ERR(cfg)) return PTR_ERR(cfg); diff --git a/drivers/pci/controller/pci-host-common.h b/drivers/pci/controller/pci-host-common.h index 65bd9e032353..51c35ec0cf37 100644 --- a/drivers/pci/controller/pci-host-common.h +++ b/drivers/pci/controller/pci-host-common.h @@ -17,4 +17,6 @@ int pci_host_common_init(struct platform_device *pdev, const struct pci_ecam_ops *ops); void pci_host_common_remove(struct platform_device *pdev); +struct pci_config_window *pci_host_common_ecam_create(struct device *dev, + struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops); #endif diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index a4a2bac4f4b2..755651f33811 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -1353,11 +1353,9 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, goto skip; } - ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port); - if (ret < 0) { - clk_put(port->clk); + ret = devm_add_action_or_reset(dev, mvebu_pcie_port_clk_put, port); + if (ret < 0) goto err; - } return 1; diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c index b05ec8b0bb93..0a37a3f1809c 100644 --- a/drivers/pci/controller/pci-xgene-msi.c +++ b/drivers/pci/controller/pci-xgene-msi.c @@ -6,6 +6,7 @@ * Author: Tanmay Inamdar <tinamdar@apm.com> * Duc Dang <dhdang@apm.com> */ +#include <linux/bitfield.h> #include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> @@ -22,31 +23,49 @@ #define IDX_PER_GROUP 8 #define IRQS_PER_IDX 16 #define NR_HW_IRQS 16 -#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) +#define NR_MSI_BITS (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) +#define NR_MSI_VEC (NR_MSI_BITS / num_possible_cpus()) -struct xgene_msi_group { - struct xgene_msi *msi; - int gic_irq; - u32 msi_grp; -}; +#define MSI_GROUP_MASK GENMASK(22, 19) +#define MSI_INDEX_MASK GENMASK(18, 16) +#define MSI_INTR_MASK GENMASK(19, 16) + +#define MSInRx_HWIRQ_MASK GENMASK(6, 4) +#define DATA_HWIRQ_MASK GENMASK(3, 0) struct xgene_msi { - struct device_node *node; struct irq_domain *inner_domain; u64 msi_addr; void __iomem *msi_regs; unsigned long *bitmap; struct mutex bitmap_lock; - struct xgene_msi_group *msi_groups; - int num_cpus; + unsigned int gic_irq[NR_HW_IRQS]; }; /* Global data */ -static struct xgene_msi xgene_msi_ctrl; +static struct xgene_msi *xgene_msi_ctrl; /* - * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where - * n is group number (0..F), x is index of registers in each group (0..7) + * X-Gene v1 has 16 frames of MSI termination registers MSInIRx, where n is + * frame number (0..15), x is index of registers in each frame (0..7). Each + * 32b register is at the beginning of a 64kB region, each frame occupying + * 512kB (and the whole thing 8MB of PA space). + * + * Each register supports 16 MSI vectors (0..15) to generate interrupts. A + * write to the MSInIRx from the PCI side generates an interrupt. A read + * from the MSInRx on the CPU side returns a bitmap of the pending MSIs in + * the lower 16 bits. A side effect of this read is that all pending + * interrupts are acknowledged and cleared). + * + * Additionally, each MSI termination frame has 1 MSIINTn register (n is + * 0..15) to indicate the MSI pending status caused by any of its 8 + * termination registers, reported as a bitmap in the lower 8 bits. Each 32b + * register is at the beginning of a 64kB region (and overall occupying an + * extra 1MB). + * + * There is one GIC IRQ assigned for each MSI termination frame, 16 in + * total. + * * The register layout is as follows: * MSI0IR0 base_addr * MSI0IR1 base_addr + 0x10000 @@ -67,107 +86,74 @@ static struct xgene_msi xgene_msi_ctrl; * MSIINT1 base_addr + 0x810000 * ... ... * MSIINTF base_addr + 0x8F0000 - * - * Each index register supports 16 MSI vectors (0..15) to generate interrupt. - * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination - * registers. - * - * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate - * the MSI pending status caused by 1 of its 8 index registers. */ /* MSInIRx read helper */ -static u32 xgene_msi_ir_read(struct xgene_msi *msi, - u32 msi_grp, u32 msir_idx) +static u32 xgene_msi_ir_read(struct xgene_msi *msi, u32 msi_grp, u32 msir_idx) { return readl_relaxed(msi->msi_regs + MSI_IR0 + - (msi_grp << 19) + (msir_idx << 16)); + (FIELD_PREP(MSI_GROUP_MASK, msi_grp) | + FIELD_PREP(MSI_INDEX_MASK, msir_idx))); } /* MSIINTn read helper */ static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp) { - return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16)); + return readl_relaxed(msi->msi_regs + MSI_INT0 + + FIELD_PREP(MSI_INTR_MASK, msi_grp)); } /* - * With 2048 MSI vectors supported, the MSI message can be constructed using - * following scheme: - * - Divide into 8 256-vector groups - * Group 0: 0-255 - * Group 1: 256-511 - * Group 2: 512-767 - * ... - * Group 7: 1792-2047 - * - Each 256-vector group is divided into 16 16-vector groups - * As an example: 16 16-vector groups for 256-vector group 0-255 is - * Group 0: 0-15 - * Group 1: 16-32 - * ... - * Group 15: 240-255 - * - The termination address of MSI vector in 256-vector group n and 16-vector - * group x is the address of MSIxIRn - * - The data for MSI vector in 16-vector group x is x + * In order to allow an MSI to be moved from one CPU to another without + * having to repaint both the address and the data (which cannot be done + * atomically), we statically partitions the MSI frames between CPUs. Given + * that XGene-1 has 8 CPUs, each CPU gets two frames assigned to it + * + * We adopt the convention that when an MSI is moved, it is configured to + * target the same register number in the congruent frame assigned to the + * new target CPU. This reserves a given MSI across all CPUs, and reduces + * the MSI capacity from 2048 to 256. + * + * Effectively, this amounts to: + * - hwirq[7]::cpu[2:0] is the target frame number (n in MSInIRx) + * - hwirq[6:4] is the register index in any given frame (x in MSInIRx) + * - hwirq[3:0] is the MSI data */ -static u32 hwirq_to_reg_set(unsigned long hwirq) -{ - return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX)); -} - -static u32 hwirq_to_group(unsigned long hwirq) -{ - return (hwirq % NR_HW_IRQS); -} - -static u32 hwirq_to_msi_data(unsigned long hwirq) +static irq_hw_number_t compute_hwirq(u8 frame, u8 index, u8 data) { - return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX); + return (FIELD_PREP(BIT(7), FIELD_GET(BIT(3), frame)) | + FIELD_PREP(MSInRx_HWIRQ_MASK, index) | + FIELD_PREP(DATA_HWIRQ_MASK, data)); } static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct xgene_msi *msi = irq_data_get_irq_chip_data(data); - u32 reg_set = hwirq_to_reg_set(data->hwirq); - u32 group = hwirq_to_group(data->hwirq); - u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16); + u64 target_addr; + u32 frame, msir; + int cpu; - msg->address_hi = upper_32_bits(target_addr); - msg->address_lo = lower_32_bits(target_addr); - msg->data = hwirq_to_msi_data(data->hwirq); -} + cpu = cpumask_first(irq_data_get_effective_affinity_mask(data)); + msir = FIELD_GET(MSInRx_HWIRQ_MASK, data->hwirq); + frame = FIELD_PREP(BIT(3), FIELD_GET(BIT(7), data->hwirq)) | cpu; -/* - * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain - * the expected behaviour of .set_affinity for each MSI interrupt, the 16 - * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs - * for each core). The MSI vector is moved from 1 MSI GIC IRQ to another - * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a - * consequence, the total MSI vectors that X-Gene v1 supports will be - * reduced to 256 (2048/8) vectors. - */ -static int hwirq_to_cpu(unsigned long hwirq) -{ - return (hwirq % xgene_msi_ctrl.num_cpus); -} + target_addr = msi->msi_addr; + target_addr += (FIELD_PREP(MSI_GROUP_MASK, frame) | + FIELD_PREP(MSI_INTR_MASK, msir)); -static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq) -{ - return (hwirq - hwirq_to_cpu(hwirq)); + msg->address_hi = upper_32_bits(target_addr); + msg->address_lo = lower_32_bits(target_addr); + msg->data = FIELD_GET(DATA_HWIRQ_MASK, data->hwirq); } static int xgene_msi_set_affinity(struct irq_data *irqdata, const struct cpumask *mask, bool force) { int target_cpu = cpumask_first(mask); - int curr_cpu; - - curr_cpu = hwirq_to_cpu(irqdata->hwirq); - if (curr_cpu == target_cpu) - return IRQ_SET_MASK_OK_DONE; - /* Update MSI number to target the new CPU */ - irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu; + irq_data_update_effective_affinity(irqdata, cpumask_of(target_cpu)); + /* Force the core code to regenerate the message */ return IRQ_SET_MASK_OK; } @@ -181,25 +167,23 @@ static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct xgene_msi *msi = domain->host_data; - int msi_irq; + irq_hw_number_t hwirq; mutex_lock(&msi->bitmap_lock); - msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0, - msi->num_cpus, 0); - if (msi_irq < NR_MSI_VEC) - bitmap_set(msi->bitmap, msi_irq, msi->num_cpus); - else - msi_irq = -ENOSPC; + hwirq = find_first_zero_bit(msi->bitmap, NR_MSI_VEC); + if (hwirq < NR_MSI_VEC) + set_bit(hwirq, msi->bitmap); mutex_unlock(&msi->bitmap_lock); - if (msi_irq < 0) - return msi_irq; + if (hwirq >= NR_MSI_VEC) + return -ENOSPC; - irq_domain_set_info(domain, virq, msi_irq, + irq_domain_set_info(domain, virq, hwirq, &xgene_msi_bottom_irq_chip, domain->host_data, handle_simple_irq, NULL, NULL); + irqd_set_resend_when_in_progress(irq_get_irq_data(virq)); return 0; } @@ -209,12 +193,10 @@ static void xgene_irq_domain_free(struct irq_domain *domain, { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct xgene_msi *msi = irq_data_get_irq_chip_data(d); - u32 hwirq; mutex_lock(&msi->bitmap_lock); - hwirq = hwirq_to_canonical_hwirq(d->hwirq); - bitmap_clear(msi->bitmap, hwirq, msi->num_cpus); + clear_bit(d->hwirq, msi->bitmap); mutex_unlock(&msi->bitmap_lock); @@ -235,10 +217,11 @@ static const struct msi_parent_ops xgene_msi_parent_ops = { .init_dev_msi_info = msi_lib_init_dev_msi_info, }; -static int xgene_allocate_domains(struct xgene_msi *msi) +static int xgene_allocate_domains(struct device_node *node, + struct xgene_msi *msi) { struct irq_domain_info info = { - .fwnode = of_fwnode_handle(msi->node), + .fwnode = of_fwnode_handle(node), .ops = &xgene_msi_domain_ops, .size = NR_MSI_VEC, .host_data = msi, @@ -248,166 +231,111 @@ static int xgene_allocate_domains(struct xgene_msi *msi) return msi->inner_domain ? 0 : -ENOMEM; } -static void xgene_free_domains(struct xgene_msi *msi) +static int xgene_msi_init_allocator(struct device *dev) { - if (msi->inner_domain) - irq_domain_remove(msi->inner_domain); -} - -static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) -{ - xgene_msi->bitmap = bitmap_zalloc(NR_MSI_VEC, GFP_KERNEL); - if (!xgene_msi->bitmap) + xgene_msi_ctrl->bitmap = devm_bitmap_zalloc(dev, NR_MSI_VEC, GFP_KERNEL); + if (!xgene_msi_ctrl->bitmap) return -ENOMEM; - mutex_init(&xgene_msi->bitmap_lock); - - xgene_msi->msi_groups = kcalloc(NR_HW_IRQS, - sizeof(struct xgene_msi_group), - GFP_KERNEL); - if (!xgene_msi->msi_groups) - return -ENOMEM; + mutex_init(&xgene_msi_ctrl->bitmap_lock); return 0; } static void xgene_msi_isr(struct irq_desc *desc) { + unsigned int *irqp = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); - struct xgene_msi_group *msi_groups; - struct xgene_msi *xgene_msi; - int msir_index, msir_val, hw_irq, ret; - u32 intr_index, grp_select, msi_grp; + struct xgene_msi *xgene_msi = xgene_msi_ctrl; + unsigned long grp_pending; + int msir_idx; + u32 msi_grp; chained_irq_enter(chip, desc); - msi_groups = irq_desc_get_handler_data(desc); - xgene_msi = msi_groups->msi; - msi_grp = msi_groups->msi_grp; - - /* - * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt - * If bit x of this register is set (x is 0..7), one or more interrupts - * corresponding to MSInIRx is set. - */ - grp_select = xgene_msi_int_read(xgene_msi, msi_grp); - while (grp_select) { - msir_index = ffs(grp_select) - 1; - /* - * Calculate MSInIRx address to read to check for interrupts - * (refer to termination address and data assignment - * described in xgene_compose_msi_msg() ) - */ - msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index); - while (msir_val) { - intr_index = ffs(msir_val) - 1; - /* - * Calculate MSI vector number (refer to the termination - * address and data assignment described in - * xgene_compose_msi_msg function) - */ - hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) * - NR_HW_IRQS) + msi_grp; - /* - * As we have multiple hw_irq that maps to single MSI, - * always look up the virq using the hw_irq as seen from - * CPU0 - */ - hw_irq = hwirq_to_canonical_hwirq(hw_irq); - ret = generic_handle_domain_irq(xgene_msi->inner_domain, hw_irq); + msi_grp = irqp - xgene_msi->gic_irq; + + grp_pending = xgene_msi_int_read(xgene_msi, msi_grp); + + for_each_set_bit(msir_idx, &grp_pending, IDX_PER_GROUP) { + unsigned long msir; + int intr_idx; + + msir = xgene_msi_ir_read(xgene_msi, msi_grp, msir_idx); + + for_each_set_bit(intr_idx, &msir, IRQS_PER_IDX) { + irq_hw_number_t hwirq; + int ret; + + hwirq = compute_hwirq(msi_grp, msir_idx, intr_idx); + ret = generic_handle_domain_irq(xgene_msi->inner_domain, + hwirq); WARN_ON_ONCE(ret); - msir_val &= ~(1 << intr_index); - } - grp_select &= ~(1 << msir_index); - - if (!grp_select) { - /* - * We handled all interrupts happened in this group, - * resample this group MSI_INTx register in case - * something else has been made pending in the meantime - */ - grp_select = xgene_msi_int_read(xgene_msi, msi_grp); } } chained_irq_exit(chip, desc); } -static enum cpuhp_state pci_xgene_online; - static void xgene_msi_remove(struct platform_device *pdev) { - struct xgene_msi *msi = platform_get_drvdata(pdev); - - if (pci_xgene_online) - cpuhp_remove_state(pci_xgene_online); - cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD); - - kfree(msi->msi_groups); - - bitmap_free(msi->bitmap); - msi->bitmap = NULL; + for (int i = 0; i < NR_HW_IRQS; i++) { + unsigned int irq = xgene_msi_ctrl->gic_irq[i]; + if (!irq) + continue; + irq_set_chained_handler_and_data(irq, NULL, NULL); + } - xgene_free_domains(msi); + if (xgene_msi_ctrl->inner_domain) + irq_domain_remove(xgene_msi_ctrl->inner_domain); } -static int xgene_msi_hwirq_alloc(unsigned int cpu) +static int xgene_msi_handler_setup(struct platform_device *pdev) { - struct xgene_msi *msi = &xgene_msi_ctrl; - struct xgene_msi_group *msi_group; - cpumask_var_t mask; + struct xgene_msi *xgene_msi = xgene_msi_ctrl; int i; - int err; - for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { - msi_group = &msi->msi_groups[i]; - if (!msi_group->gic_irq) - continue; + for (i = 0; i < NR_HW_IRQS; i++) { + u32 msi_val; + int irq, err; - irq_set_chained_handler_and_data(msi_group->gic_irq, - xgene_msi_isr, msi_group); + /* + * MSInIRx registers are read-to-clear; before registering + * interrupt handlers, read all of them to clear spurious + * interrupts that may occur before the driver is probed. + */ + for (int msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) + xgene_msi_ir_read(xgene_msi, i, msi_idx); + + /* Read MSIINTn to confirm */ + msi_val = xgene_msi_int_read(xgene_msi, i); + if (msi_val) { + dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); + return EINVAL; + } + + irq = platform_get_irq(pdev, i); + if (irq < 0) + return irq; + + xgene_msi->gic_irq[i] = irq; /* * Statically allocate MSI GIC IRQs to each CPU core. * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated * to each core. */ - if (alloc_cpumask_var(&mask, GFP_KERNEL)) { - cpumask_clear(mask); - cpumask_set_cpu(cpu, mask); - err = irq_set_affinity(msi_group->gic_irq, mask); - if (err) - pr_err("failed to set affinity for GIC IRQ"); - free_cpumask_var(mask); - } else { - pr_err("failed to alloc CPU mask for affinity\n"); - err = -EINVAL; - } - + irq_set_status_flags(irq, IRQ_NO_BALANCING); + err = irq_set_affinity(irq, cpumask_of(i % num_possible_cpus())); if (err) { - irq_set_chained_handler_and_data(msi_group->gic_irq, - NULL, NULL); + pr_err("failed to set affinity for GIC IRQ"); return err; } - } - - return 0; -} - -static int xgene_msi_hwirq_free(unsigned int cpu) -{ - struct xgene_msi *msi = &xgene_msi_ctrl; - struct xgene_msi_group *msi_group; - int i; - - for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { - msi_group = &msi->msi_groups[i]; - if (!msi_group->gic_irq) - continue; - irq_set_chained_handler_and_data(msi_group->gic_irq, NULL, - NULL); + irq_set_chained_handler_and_data(irq, xgene_msi_isr, + &xgene_msi_ctrl->gic_irq[i]); } + return 0; } @@ -419,14 +347,15 @@ static const struct of_device_id xgene_msi_match_table[] = { static int xgene_msi_probe(struct platform_device *pdev) { struct resource *res; - int rc, irq_index; struct xgene_msi *xgene_msi; - int virt_msir; - u32 msi_val, msi_idx; + int rc; - xgene_msi = &xgene_msi_ctrl; + xgene_msi_ctrl = devm_kzalloc(&pdev->dev, sizeof(*xgene_msi_ctrl), + GFP_KERNEL); + if (!xgene_msi_ctrl) + return -ENOMEM; - platform_set_drvdata(pdev, xgene_msi); + xgene_msi = xgene_msi_ctrl; xgene_msi->msi_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(xgene_msi->msi_regs)) { @@ -434,66 +363,26 @@ static int xgene_msi_probe(struct platform_device *pdev) goto error; } xgene_msi->msi_addr = res->start; - xgene_msi->node = pdev->dev.of_node; - xgene_msi->num_cpus = num_possible_cpus(); - rc = xgene_msi_init_allocator(xgene_msi); + rc = xgene_msi_init_allocator(&pdev->dev); if (rc) { dev_err(&pdev->dev, "Error allocating MSI bitmap\n"); goto error; } - rc = xgene_allocate_domains(xgene_msi); + rc = xgene_allocate_domains(dev_of_node(&pdev->dev), xgene_msi); if (rc) { dev_err(&pdev->dev, "Failed to allocate MSI domain\n"); goto error; } - for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { - virt_msir = platform_get_irq(pdev, irq_index); - if (virt_msir < 0) { - rc = virt_msir; - goto error; - } - xgene_msi->msi_groups[irq_index].gic_irq = virt_msir; - xgene_msi->msi_groups[irq_index].msi_grp = irq_index; - xgene_msi->msi_groups[irq_index].msi = xgene_msi; - } - - /* - * MSInIRx registers are read-to-clear; before registering - * interrupt handlers, read all of them to clear spurious - * interrupts that may occur before the driver is probed. - */ - for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { - for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) - xgene_msi_ir_read(xgene_msi, irq_index, msi_idx); - - /* Read MSIINTn to confirm */ - msi_val = xgene_msi_int_read(xgene_msi, irq_index); - if (msi_val) { - dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); - rc = -EINVAL; - goto error; - } - } - - rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online", - xgene_msi_hwirq_alloc, NULL); - if (rc < 0) - goto err_cpuhp; - pci_xgene_online = rc; - rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL, - xgene_msi_hwirq_free); + rc = xgene_msi_handler_setup(pdev); if (rc) - goto err_cpuhp; + goto error; dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); return 0; - -err_cpuhp: - dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); error: xgene_msi_remove(pdev); return rc; @@ -507,9 +396,4 @@ static struct platform_driver xgene_msi_driver = { .probe = xgene_msi_probe, .remove = xgene_msi_remove, }; - -static int __init xgene_pcie_msi_init(void) -{ - return platform_driver_register(&xgene_msi_driver); -} -subsys_initcall(xgene_pcie_msi_init); +builtin_platform_driver(xgene_msi_driver); diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index 1e2ebbfa36d1..b95afa35201d 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -12,6 +12,7 @@ #include <linux/jiffies.h> #include <linux/memblock.h> #include <linux/init.h> +#include <linux/irqdomain.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_pci.h> @@ -53,11 +54,9 @@ #define XGENE_V1_PCI_EXP_CAP 0x40 /* PCIe IP version */ -#define XGENE_PCIE_IP_VER_UNKN 0 #define XGENE_PCIE_IP_VER_1 1 #define XGENE_PCIE_IP_VER_2 2 -#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) struct xgene_pcie { struct device_node *node; struct device *dev; @@ -188,7 +187,6 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, return PCIBIOS_SUCCESSFUL; } -#endif #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) static int xgene_get_csr_resource(struct acpi_device *adev, @@ -279,7 +277,6 @@ const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = { }; #endif -#if defined(CONFIG_PCI_XGENE) static u64 xgene_pcie_set_ib_mask(struct xgene_pcie *port, u32 addr, u32 flags, u64 size) { @@ -594,6 +591,24 @@ static struct pci_ops xgene_pcie_ops = { .write = pci_generic_config_write32, }; +static bool xgene_check_pcie_msi_ready(void) +{ + struct device_node *np; + struct irq_domain *d; + + if (!IS_ENABLED(CONFIG_PCI_XGENE_MSI)) + return true; + + np = of_find_compatible_node(NULL, NULL, "apm,xgene1-msi"); + if (!np) + return true; + + d = irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI); + of_node_put(np); + + return d && irq_domain_is_msi_parent(d); +} + static int xgene_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -602,6 +617,10 @@ static int xgene_pcie_probe(struct platform_device *pdev) struct pci_host_bridge *bridge; int ret; + if (!xgene_check_pcie_msi_ready()) + return dev_err_probe(&pdev->dev, -EPROBE_DEFER, + "MSI driver not ready\n"); + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); if (!bridge) return -ENOMEM; @@ -610,10 +629,7 @@ static int xgene_pcie_probe(struct platform_device *pdev) port->node = of_node_get(dn); port->dev = dev; - - port->version = XGENE_PCIE_IP_VER_UNKN; - if (of_device_is_compatible(port->node, "apm,xgene-pcie")) - port->version = XGENE_PCIE_IP_VER_1; + port->version = XGENE_PCIE_IP_VER_1; ret = xgene_pcie_map_reg(port, pdev); if (ret) @@ -647,4 +663,3 @@ static struct platform_driver xgene_pcie_driver = { .probe = xgene_pcie_probe, }; builtin_platform_driver(xgene_pcie_driver); -#endif diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c index a43f21eb8fbb..ea2ca2e70f20 100644 --- a/drivers/pci/controller/pcie-altera-msi.c +++ b/drivers/pci/controller/pcie-altera-msi.c @@ -9,6 +9,7 @@ #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/init.h> #include <linux/module.h> @@ -29,7 +30,6 @@ struct altera_msi { DECLARE_BITMAP(used, MAX_MSI_VECTORS); struct mutex lock; /* protect "used" bitmap */ struct platform_device *pdev; - struct irq_domain *msi_domain; struct irq_domain *inner_domain; void __iomem *csr_base; void __iomem *vector_base; @@ -74,18 +74,20 @@ static void altera_msi_isr(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static struct irq_chip altera_msi_irq_chip = { - .name = "Altera PCIe MSI", - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; +#define ALTERA_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) -static struct msi_domain_info altera_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, - .chip = &altera_msi_irq_chip, -}; +#define ALTERA_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX) +static const struct msi_parent_ops altera_msi_parent_ops = { + .required_flags = ALTERA_MSI_FLAGS_REQUIRED, + .supported_flags = ALTERA_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "Altera-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct altera_msi *msi = irq_data_get_irq_chip_data(data); @@ -164,20 +166,16 @@ static const struct irq_domain_ops msi_domain_ops = { static int altera_allocate_domains(struct altera_msi *msi) { - struct fwnode_handle *fwnode = of_fwnode_handle(msi->pdev->dev.of_node); - - msi->inner_domain = irq_domain_create_linear(NULL, msi->num_of_vectors, - &msi_domain_ops, msi); + struct irq_domain_info info = { + .fwnode = dev_fwnode(&msi->pdev->dev), + .ops = &msi_domain_ops, + .host_data = msi, + .size = msi->num_of_vectors, + }; + + msi->inner_domain = msi_create_parent_irq_domain(&info, &altera_msi_parent_ops); if (!msi->inner_domain) { - dev_err(&msi->pdev->dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &altera_msi_domain_info, msi->inner_domain); - if (!msi->msi_domain) { dev_err(&msi->pdev->dev, "failed to create MSI domain\n"); - irq_domain_remove(msi->inner_domain); return -ENOMEM; } @@ -186,7 +184,6 @@ static int altera_allocate_domains(struct altera_msi *msi) static void altera_free_domains(struct altera_msi *msi) { - irq_domain_remove(msi->msi_domain); irq_domain_remove(msi->inner_domain); } diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index 0fc77176a52e..3dbb7adc421c 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -852,10 +852,9 @@ static void aglx_isr(struct irq_desc *desc) static int altera_pcie_init_irq_domain(struct altera_pcie *pcie) { struct device *dev = &pcie->pdev->dev; - struct device_node *node = dev->of_node; /* Setup INTx */ - pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), PCI_NUM_INTX, + pcie->irq_domain = irq_domain_create_linear(dev_fwnode(dev), PCI_NUM_INTX, &intx_domain_ops, pcie); if (!pcie->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 92887b394eb4..9afbd02ded35 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -12,6 +12,7 @@ #include <linux/iopoll.h> #include <linux/ioport.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/list.h> @@ -46,6 +47,7 @@ #define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc +#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK 0x1f0 #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00 #define PCIE_RC_CFG_PRIV1_ROOT_CAP 0x4f8 @@ -55,6 +57,9 @@ #define PCIE_RC_DL_MDIO_WR_DATA 0x1104 #define PCIE_RC_DL_MDIO_RD_DATA 0x1108 +#define PCIE_RC_PL_REG_PHY_CTL_1 0x1804 +#define PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK 0x8 + #define PCIE_RC_PL_PHY_CTL_15 0x184c #define PCIE_RC_PL_PHY_CTL_15_DIS_PLL_PD_MASK 0x400000 #define PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK 0xff @@ -265,7 +270,6 @@ struct brcm_msi { struct device *dev; void __iomem *base; struct device_node *np; - struct irq_domain *msi_domain; struct irq_domain *inner_domain; struct mutex lock; /* guards the alloc/free operations */ u64 target_addr; @@ -465,17 +469,20 @@ static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie, writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win)); } -static struct irq_chip brcm_msi_irq_chip = { - .name = "BRCM STB PCIe MSI", - .irq_ack = irq_chip_ack_parent, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; +#define BRCM_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) -static struct msi_domain_info brcm_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, - .chip = &brcm_msi_irq_chip, +#define BRCM_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_MULTI_PCI_MSI) + +static const struct msi_parent_ops brcm_msi_parent_ops = { + .required_flags = BRCM_MSI_FLAGS_REQUIRED, + .supported_flags = BRCM_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .prefix = "BRCM-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static void brcm_pcie_msi_isr(struct irq_desc *desc) @@ -581,21 +588,18 @@ static const struct irq_domain_ops msi_domain_ops = { static int brcm_allocate_domains(struct brcm_msi *msi) { - struct fwnode_handle *fwnode = of_fwnode_handle(msi->np); struct device *dev = msi->dev; - msi->inner_domain = irq_domain_create_linear(NULL, msi->nr, &msi_domain_ops, msi); - if (!msi->inner_domain) { - dev_err(dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(msi->np), + .ops = &msi_domain_ops, + .host_data = msi, + .size = msi->nr, + }; - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &brcm_msi_domain_info, - msi->inner_domain); - if (!msi->msi_domain) { + msi->inner_domain = msi_create_parent_irq_domain(&info, &brcm_msi_parent_ops); + if (!msi->inner_domain) { dev_err(dev, "failed to create MSI domain\n"); - irq_domain_remove(msi->inner_domain); return -ENOMEM; } @@ -604,7 +608,6 @@ static int brcm_allocate_domains(struct brcm_msi *msi) static void brcm_free_domains(struct brcm_msi *msi) { - irq_domain_remove(msi->msi_domain); irq_domain_remove(msi->inner_domain); } @@ -970,7 +973,7 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie, * * The PCIe host controller by design must set the inbound viewport to * be a contiguous arrangement of all of the system's memory. In - * addition, its size mut be a power of two. To further complicate + * addition, its size must be a power of two. To further complicate * matters, the viewport must start on a pcie-address that is aligned * on a multiple of its size. If a portion of the viewport does not * represent system memory -- e.g. 3GB of memory requires a 4GB @@ -1072,7 +1075,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) void __iomem *base = pcie->base; struct pci_host_bridge *bridge; struct resource_entry *entry; - u32 tmp, burst, aspm_support; + u32 tmp, burst, aspm_support, num_lanes, num_lanes_cap; u8 num_out_wins = 0; int num_inbound_wins = 0; int memc, ret; @@ -1180,6 +1183,27 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK); writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); + /* 'tmp' still holds the contents of PRIV1_LINK_CAPABILITY */ + num_lanes_cap = u32_get_bits(tmp, PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK); + num_lanes = 0; + + /* + * Use hardware negotiated Max Link Width value by default. If the + * "num-lanes" DT property is present, assume that the chip's default + * link width capability information is incorrect/undesired and use the + * specified value instead. + */ + if (!of_property_read_u32(pcie->np, "num-lanes", &num_lanes) && + num_lanes && num_lanes <= 4 && num_lanes_cap != num_lanes) { + u32p_replace_bits(&tmp, num_lanes, + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK); + writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); + tmp = readl(base + PCIE_RC_PL_REG_PHY_CTL_1); + u32p_replace_bits(&tmp, 1, + PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK); + writel(tmp, base + PCIE_RC_PL_REG_PHY_CTL_1); + } + /* * For config space accesses on the RC, show the right class for * a PCIe-PCIe bridge (the default setting is to be EP mode). @@ -1333,11 +1357,7 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie) if (ret) return ret; - /* - * Wait for 100ms after PERST# deassertion; see PCIe CEM specification - * sections 2.2, PCIe r5.0, 6.6.1. - */ - msleep(100); + msleep(PCIE_RESET_CONFIG_WAIT_MS); /* * Give the RC/EP even more time to wake up, before trying to diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c index d2cb4c4f821a..9ba242ab9596 100644 --- a/drivers/pci/controller/pcie-iproc-msi.c +++ b/drivers/pci/controller/pcie-iproc-msi.c @@ -5,6 +5,7 @@ #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/msi.h> #include <linux/of_irq.h> @@ -81,7 +82,6 @@ struct iproc_msi_grp { * @bitmap_lock: lock to protect access to the MSI bitmap * @nr_msi_vecs: total number of MSI vectors * @inner_domain: inner IRQ domain - * @msi_domain: MSI IRQ domain * @nr_eq_region: required number of 4K aligned memory region for MSI event * queues * @nr_msi_region: required number of 4K aligned address region for MSI posted @@ -101,7 +101,6 @@ struct iproc_msi { struct mutex bitmap_lock; unsigned int nr_msi_vecs; struct irq_domain *inner_domain; - struct irq_domain *msi_domain; unsigned int nr_eq_region; unsigned int nr_msi_region; void *eq_cpu; @@ -165,16 +164,18 @@ static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq) return eq * EQ_LEN * sizeof(u32); } -static struct irq_chip iproc_msi_irq_chip = { - .name = "iProc-MSI", +#define IPROC_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS) +#define IPROC_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX) + +static struct msi_parent_ops iproc_msi_parent_ops = { + .required_flags = IPROC_MSI_FLAGS_REQUIRED, + .supported_flags = IPROC_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "iProc-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; - -static struct msi_domain_info iproc_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX, - .chip = &iproc_msi_irq_chip, -}; - /* * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a * dedicated event queue. Each MSI group can support up to 64 MSI vectors. @@ -446,27 +447,22 @@ static void iproc_msi_disable(struct iproc_msi *msi) static int iproc_msi_alloc_domains(struct device_node *node, struct iproc_msi *msi) { - msi->inner_domain = irq_domain_create_linear(NULL, msi->nr_msi_vecs, - &msi_domain_ops, msi); + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(node), + .ops = &msi_domain_ops, + .host_data = msi, + .size = msi->nr_msi_vecs, + }; + + msi->inner_domain = msi_create_parent_irq_domain(&info, &iproc_msi_parent_ops); if (!msi->inner_domain) return -ENOMEM; - msi->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(node), - &iproc_msi_domain_info, - msi->inner_domain); - if (!msi->msi_domain) { - irq_domain_remove(msi->inner_domain); - return -ENOMEM; - } - return 0; } static void iproc_msi_free_domains(struct iproc_msi *msi) { - if (msi->msi_domain) - irq_domain_remove(msi->msi_domain); - if (msi->inner_domain) irq_domain_remove(msi->inner_domain); } @@ -542,7 +538,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) msi->nr_cpus = num_possible_cpus(); if (msi->nr_cpus == 1) - iproc_msi_domain_info.flags |= MSI_FLAG_MULTI_PCI_MSI; + iproc_msi_parent_ops.supported_flags |= MSI_FLAG_MULTI_PCI_MSI; msi->nr_irqs = of_irq_count(node); if (!msi->nr_irqs) { diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index b55f5973414c..97147f43e41c 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -12,6 +12,7 @@ #include <linux/delay.h> #include <linux/iopoll.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> @@ -187,7 +188,6 @@ struct mtk_msi_set { * @saved_irq_state: IRQ enable state saved at suspend time * @irq_lock: lock protecting IRQ register access * @intx_domain: legacy INTx IRQ domain - * @msi_domain: MSI IRQ domain * @msi_bottom_domain: MSI IRQ bottom domain * @msi_sets: MSI sets information * @lock: lock protecting IRQ bit map @@ -210,7 +210,6 @@ struct mtk_gen3_pcie { u32 saved_irq_state; raw_spinlock_t irq_lock; struct irq_domain *intx_domain; - struct irq_domain *msi_domain; struct irq_domain *msi_bottom_domain; struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM]; struct mutex lock; @@ -526,30 +525,22 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) return 0; } -static void mtk_pcie_msi_irq_mask(struct irq_data *data) -{ - pci_msi_mask_irq(data); - irq_chip_mask_parent(data); -} - -static void mtk_pcie_msi_irq_unmask(struct irq_data *data) -{ - pci_msi_unmask_irq(data); - irq_chip_unmask_parent(data); -} - -static struct irq_chip mtk_msi_irq_chip = { - .irq_ack = irq_chip_ack_parent, - .irq_mask = mtk_pcie_msi_irq_mask, - .irq_unmask = mtk_pcie_msi_irq_unmask, - .name = "MSI", -}; - -static struct msi_domain_info mtk_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX | - MSI_FLAG_MULTI_PCI_MSI, - .chip = &mtk_msi_irq_chip, +#define MTK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY | \ + MSI_FLAG_PCI_MSI_MASK_PARENT) + +#define MTK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX | \ + MSI_FLAG_MULTI_PCI_MSI) + +static const struct msi_parent_ops mtk_msi_parent_ops = { + .required_flags = MTK_MSI_FLAGS_REQUIRED, + .supported_flags = MTK_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .prefix = "MTK3-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) @@ -756,29 +747,23 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie) /* Setup MSI */ mutex_init(&pcie->lock); - pcie->msi_bottom_domain = irq_domain_create_linear(of_fwnode_handle(node), - PCIE_MSI_IRQS_NUM, - &mtk_msi_bottom_domain_ops, pcie); + struct irq_domain_info info = { + .fwnode = dev_fwnode(dev), + .ops = &mtk_msi_bottom_domain_ops, + .host_data = pcie, + .size = PCIE_MSI_IRQS_NUM, + }; + + pcie->msi_bottom_domain = msi_create_parent_irq_domain(&info, &mtk_msi_parent_ops); if (!pcie->msi_bottom_domain) { dev_err(dev, "failed to create MSI bottom domain\n"); ret = -ENODEV; goto err_msi_bottom_domain; } - pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode, - &mtk_msi_domain_info, - pcie->msi_bottom_domain); - if (!pcie->msi_domain) { - dev_err(dev, "failed to create MSI domain\n"); - ret = -ENODEV; - goto err_msi_domain; - } - of_node_put(intc_node); return 0; -err_msi_domain: - irq_domain_remove(pcie->msi_bottom_domain); err_msi_bottom_domain: irq_domain_remove(pcie->intx_domain); out_put_node: @@ -793,9 +778,6 @@ static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie) if (pcie->intx_domain) irq_domain_remove(pcie->intx_domain); - if (pcie->msi_domain) - irq_domain_remove(pcie->msi_domain); - if (pcie->msi_bottom_domain) irq_domain_remove(pcie->msi_bottom_domain); diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index e1934aa06c8d..24cc30a2ab6c 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -12,6 +12,7 @@ #include <linux/iopoll.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> @@ -180,7 +181,6 @@ struct mtk_pcie_soc { * @irq: GIC irq * @irq_domain: legacy INTx IRQ domain * @inner_domain: inner IRQ domain - * @msi_domain: MSI IRQ domain * @lock: protect the msi_irq_in_use bitmap * @msi_irq_in_use: bit map for assigned MSI IRQ */ @@ -200,7 +200,6 @@ struct mtk_pcie_port { int irq; struct irq_domain *irq_domain; struct irq_domain *inner_domain; - struct irq_domain *msi_domain; struct mutex lock; DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM); }; @@ -470,40 +469,39 @@ static const struct irq_domain_ops msi_domain_ops = { .free = mtk_pcie_irq_domain_free, }; -static struct irq_chip mtk_msi_irq_chip = { - .name = "MTK PCIe MSI", - .irq_ack = irq_chip_ack_parent, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; +#define MTK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) + +#define MTK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX) -static struct msi_domain_info mtk_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, - .chip = &mtk_msi_irq_chip, +static const struct msi_parent_ops mtk_msi_parent_ops = { + .required_flags = MTK_MSI_FLAGS_REQUIRED, + .supported_flags = MTK_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .prefix = "MTK-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port) { - struct fwnode_handle *fwnode = of_fwnode_handle(port->pcie->dev->of_node); - mutex_init(&port->lock); - port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM, - &msi_domain_ops, port); + struct irq_domain_info info = { + .fwnode = dev_fwnode(port->pcie->dev), + .ops = &msi_domain_ops, + .host_data = port, + .size = MTK_MSI_IRQS_NUM, + }; + + port->inner_domain = msi_create_parent_irq_domain(&info, &mtk_msi_parent_ops); if (!port->inner_domain) { dev_err(port->pcie->dev, "failed to create IRQ domain\n"); return -ENOMEM; } - port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info, - port->inner_domain); - if (!port->msi_domain) { - dev_err(port->pcie->dev, "failed to create MSI domain\n"); - irq_domain_remove(port->inner_domain); - return -ENOMEM; - } - return 0; } @@ -532,8 +530,6 @@ static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie) irq_domain_remove(port->irq_domain); if (IS_ENABLED(CONFIG_PCI_MSI)) { - if (port->msi_domain) - irq_domain_remove(port->msi_domain); if (port->inner_domain) irq_domain_remove(port->inner_domain); } diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index c32b803a47c7..fe288fd770c4 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -17,6 +17,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/init.h> @@ -597,30 +598,6 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) return IRQ_HANDLED; } -static void rcar_msi_top_irq_ack(struct irq_data *d) -{ - irq_chip_ack_parent(d); -} - -static void rcar_msi_top_irq_mask(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void rcar_msi_top_irq_unmask(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - -static struct irq_chip rcar_msi_top_chip = { - .name = "PCIe MSI", - .irq_ack = rcar_msi_top_irq_ack, - .irq_mask = rcar_msi_top_irq_mask, - .irq_unmask = rcar_msi_top_irq_unmask, -}; - static void rcar_msi_irq_ack(struct irq_data *d) { struct rcar_msi *msi = irq_data_get_irq_chip_data(d); @@ -718,30 +695,36 @@ static const struct irq_domain_ops rcar_msi_domain_ops = { .free = rcar_msi_domain_free, }; -static struct msi_domain_info rcar_msi_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, - .chip = &rcar_msi_top_chip, +#define RCAR_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT | \ + MSI_FLAG_NO_AFFINITY) + +#define RCAR_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_MULTI_PCI_MSI) + +static const struct msi_parent_ops rcar_msi_parent_ops = { + .required_flags = RCAR_MSI_FLAGS_REQUIRED, + .supported_flags = RCAR_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .prefix = "RCAR-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static int rcar_allocate_domains(struct rcar_msi *msi) { struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; - struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); - struct irq_domain *parent; - - parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR, - &rcar_msi_domain_ops, msi); - if (!parent) { - dev_err(pcie->dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); - - msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent); + struct irq_domain_info info = { + .fwnode = dev_fwnode(pcie->dev), + .ops = &rcar_msi_domain_ops, + .host_data = msi, + .size = INT_PCI_MSI_NR, + }; + + msi->domain = msi_create_parent_irq_domain(&info, &rcar_msi_parent_ops); if (!msi->domain) { - dev_err(pcie->dev, "failed to create MSI domain\n"); - irq_domain_remove(parent); + dev_err(pcie->dev, "failed to create IRQ domain\n"); return -ENOMEM; } @@ -750,10 +733,7 @@ static int rcar_allocate_domains(struct rcar_msi *msi) static void rcar_free_domains(struct rcar_msi *msi) { - struct irq_domain *parent = msi->domain->parent; - irq_domain_remove(msi->domain); - irq_domain_remove(parent); } static int rcar_pcie_enable_msi(struct rcar_pcie_host *host) diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 55416b8311dd..300cd85fa035 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -518,9 +518,9 @@ static void rockchip_pcie_ep_retrain_link(struct rockchip_pcie *rockchip) { u32 status; - status = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE + PCI_EXP_LNKCTL); status |= PCI_EXP_LNKCTL_RL; - rockchip_pcie_write(rockchip, status, PCIE_EP_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_EP_CONFIG_BASE + PCI_EXP_LNKCTL); } static bool rockchip_pcie_ep_link_up(struct rockchip_pcie *rockchip) diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index b9e7a8710cf0..ee1822ca01db 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -11,27 +11,19 @@ * ARM PCI Host generic driver. */ +#include <linux/bitfield.h> #include <linux/bitrev.h> -#include <linux/clk.h> -#include <linux/delay.h> #include <linux/gpio/consumer.h> -#include <linux/init.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> -#include <linux/kernel.h> -#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_pci.h> -#include <linux/pci.h> -#include <linux/pci_ids.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> -#include <linux/reset.h> -#include <linux/regmap.h> #include "../pci.h" #include "pcie-rockchip.h" @@ -40,18 +32,18 @@ static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip) { u32 status; - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); } static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip) { u32 status; - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); } static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip) @@ -269,7 +261,7 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) scale = 3; /* 0.001x */ curr = curr / 1000; /* convert to mA */ power = (curr * 3300) / 1000; /* milliwatt */ - while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { + while (power > FIELD_MAX(PCI_EXP_DEVCAP_PWR_VAL)) { if (!scale) { dev_warn(rockchip->dev, "invalid power supply\n"); return; @@ -278,10 +270,10 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) power = power / 10; } - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); - status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | - (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP); + status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_VAL, power); + status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_SCL, scale); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP); } /** @@ -309,14 +301,14 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) rockchip_pcie_set_power_limit(rockchip); /* Set RC's clock architecture as common clock */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= PCI_EXP_LNKSTA_SLC << 16; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); /* Set RC's RCB to 128 */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= PCI_EXP_LNKCTL_RCB; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); /* Enable Gen1 training */ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, @@ -325,7 +317,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) msleep(PCIE_T_PVPERL_MS); gpiod_set_value_cansleep(rockchip->perst_gpio, 1); - msleep(PCIE_T_RRS_READY_MS); + msleep(PCIE_RESET_CONFIG_WAIT_MS); /* 500ms timeout value should be enough for Gen1/2 training */ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1, @@ -341,9 +333,13 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) * Enable retrain for gen2. This should be configured only after * gen1 finished. */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2); + status &= ~PCI_EXP_LNKCTL2_TLS; + status |= PCI_EXP_LNKCTL2_TLS_5_0GT; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= PCI_EXP_LNKCTL_RL; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, status, PCIE_LINK_IS_GEN2(status), 20, @@ -380,15 +376,15 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) /* Clear L0s from RC's link cap */ if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) { - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP); - status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP); + status &= ~PCI_EXP_LNKCAP_ASPM_L0S; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP); } - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR); - status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; - status |= PCIE_RC_CONFIG_DCSR_MPS_256; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL); + status &= ~PCI_EXP_DEVCTL_PAYLOAD; + status |= PCI_EXP_DEVCTL_PAYLOAD_256B; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL); return 0; err_power_off_phy: @@ -439,7 +435,7 @@ static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg) dev_dbg(dev, "malformed TLP received from the link\n"); if (sub_reg & PCIE_CORE_INT_UCR) - dev_dbg(dev, "malformed TLP received from the link\n"); + dev_dbg(dev, "Unexpected Completion received from the link\n"); if (sub_reg & PCIE_CORE_INT_FCE) dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n"); @@ -489,7 +485,7 @@ static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg) dev_dbg(dev, "fatal error interrupt received\n"); if (reg & PCIE_CLIENT_INT_NFATAL_ERR) - dev_dbg(dev, "no fatal error interrupt received\n"); + dev_dbg(dev, "non fatal error interrupt received\n"); if (reg & PCIE_CLIENT_INT_CORR_ERR) dev_dbg(dev, "correctable error interrupt received\n"); diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index 5864a20323f2..72a2c045f6fe 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -155,17 +155,7 @@ #define PCIE_EP_CONFIG_DID_VID (PCIE_EP_CONFIG_BASE + 0x00) #define PCIE_EP_CONFIG_LCS (PCIE_EP_CONFIG_BASE + 0xd0) #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) -#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) -#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 -#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff -#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 -#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8) -#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5) -#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5) -#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) -#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) -#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) -#define PCIE_EP_CONFIG_LCS (PCIE_EP_CONFIG_BASE + 0xd0) +#define PCIE_RC_CONFIG_CR (PCIE_RC_CONFIG_BASE + 0xc0) #define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c) #define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274) #define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20) @@ -215,20 +205,6 @@ #define RC_REGION_0_TYPE_MASK GENMASK(3, 0) #define MAX_AXI_WRAPPER_REGION_NUM 33 -#define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC 0x0 -#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR 0x1 -#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID 0x2 -#define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST 0x3 -#define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX 0x4 -#define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK 0x5 -#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA 0x20 -#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB 0x21 -#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC 0x22 -#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD 0x23 -#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA 0x24 -#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB 0x25 -#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC 0x26 -#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD 0x27 #define ROCKCHIP_PCIE_MSG_ROUTING_MASK GENMASK(7, 5) #define ROCKCHIP_PCIE_MSG_ROUTING(route) \ (((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK) diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c index dc9690a535e1..b037c8f315e4 100644 --- a/drivers/pci/controller/pcie-xilinx-dma-pl.c +++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c @@ -7,6 +7,7 @@ #include <linux/bitfield.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/module.h> @@ -90,7 +91,6 @@ struct xilinx_pl_dma_variant { }; struct xilinx_msi { - struct irq_domain *msi_domain; unsigned long *bitmap; struct irq_domain *dev_domain; struct mutex lock; /* Protect bitmap variable */ @@ -373,20 +373,20 @@ static irqreturn_t xilinx_pl_dma_pcie_intr_handler(int irq, void *dev_id) return IRQ_HANDLED; } -static struct irq_chip xilinx_msi_irq_chip = { - .name = "pl_dma:PCIe MSI", - .irq_enable = pci_msi_unmask_irq, - .irq_disable = pci_msi_mask_irq, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; +#define XILINX_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) -static struct msi_domain_info xilinx_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, - .chip = &xilinx_msi_irq_chip, -}; +#define XILINX_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_MULTI_PCI_MSI) +static const struct msi_parent_ops xilinx_msi_parent_ops = { + .required_flags = XILINX_MSI_FLAGS_REQUIRED, + .supported_flags = XILINX_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "pl_dma-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(data); @@ -458,11 +458,6 @@ static void xilinx_pl_dma_pcie_free_irq_domains(struct pl_dma_pcie *port) irq_domain_remove(msi->dev_domain); msi->dev_domain = NULL; } - - if (msi->msi_domain) { - irq_domain_remove(msi->msi_domain); - msi->msi_domain = NULL; - } } static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port) @@ -470,19 +465,17 @@ static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port) struct device *dev = port->dev; struct xilinx_msi *msi = &port->msi; int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long); - struct fwnode_handle *fwnode = of_fwnode_handle(port->dev->of_node); - - msi->dev_domain = irq_domain_create_linear(NULL, XILINX_NUM_MSI_IRQS, - &dev_msi_domain_ops, port); + struct irq_domain_info info = { + .fwnode = dev_fwnode(port->dev), + .ops = &dev_msi_domain_ops, + .host_data = port, + .size = XILINX_NUM_MSI_IRQS, + }; + + msi->dev_domain = msi_create_parent_irq_domain(&info, &xilinx_msi_parent_ops); if (!msi->dev_domain) goto out; - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &xilinx_msi_domain_info, - msi->dev_domain); - if (!msi->msi_domain) - goto out; - mutex_init(&msi->lock); msi->bitmap = kzalloc(size, GFP_KERNEL); if (!msi->bitmap) diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index c8b05477b719..05b8c205493c 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -10,6 +10,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/init.h> @@ -145,7 +146,6 @@ #define LINK_WAIT_USLEEP_MAX 100000 struct nwl_msi { /* MSI information */ - struct irq_domain *msi_domain; DECLARE_BITMAP(bitmap, INT_PCI_MSI_NR); struct irq_domain *dev_domain; struct mutex lock; /* protect bitmap variable */ @@ -418,19 +418,22 @@ static const struct irq_domain_ops intx_domain_ops = { }; #ifdef CONFIG_PCI_MSI -static struct irq_chip nwl_msi_irq_chip = { - .name = "nwl_pcie:msi", - .irq_enable = pci_msi_unmask_irq, - .irq_disable = pci_msi_mask_irq, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; -static struct msi_domain_info nwl_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, - .chip = &nwl_msi_irq_chip, +#define NWL_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) + +#define NWL_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_MULTI_PCI_MSI) + +static const struct msi_parent_ops nwl_msi_parent_ops = { + .required_flags = NWL_MSI_FLAGS_REQUIRED, + .supported_flags = NWL_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "nwl-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; + #endif static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) @@ -495,22 +498,19 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie) { #ifdef CONFIG_PCI_MSI struct device *dev = pcie->dev; - struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node); struct nwl_msi *msi = &pcie->msi; - - msi->dev_domain = irq_domain_create_linear(NULL, INT_PCI_MSI_NR, &dev_msi_domain_ops, pcie); + struct irq_domain_info info = { + .fwnode = dev_fwnode(dev), + .ops = &dev_msi_domain_ops, + .host_data = pcie, + .size = INT_PCI_MSI_NR, + }; + + msi->dev_domain = msi_create_parent_irq_domain(&info, &nwl_msi_parent_ops); if (!msi->dev_domain) { dev_err(dev, "failed to create dev IRQ domain\n"); return -ENOMEM; } - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &nwl_msi_domain_info, - msi->dev_domain); - if (!msi->msi_domain) { - dev_err(dev, "failed to create msi IRQ domain\n"); - irq_domain_remove(msi->dev_domain); - return -ENOMEM; - } #endif return 0; } diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index e36aa874bae9..f121836c3cf4 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -12,6 +12,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/init.h> @@ -203,11 +204,6 @@ static void xilinx_msi_top_irq_ack(struct irq_data *d) */ } -static struct irq_chip xilinx_msi_top_chip = { - .name = "PCIe MSI", - .irq_ack = xilinx_msi_top_irq_ack, -}; - static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data); @@ -264,29 +260,42 @@ static const struct irq_domain_ops xilinx_msi_domain_ops = { .free = xilinx_msi_domain_free, }; -static struct msi_domain_info xilinx_msi_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY, - .chip = &xilinx_msi_top_chip, +static bool xilinx_init_dev_msi_info(struct device *dev, struct irq_domain *domain, + struct irq_domain *real_parent, struct msi_domain_info *info) +{ + struct irq_chip *chip = info->chip; + + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) + return false; + + chip->irq_ack = xilinx_msi_top_irq_ack; + return true; +} + +#define XILINX_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) + +static const struct msi_parent_ops xilinx_msi_parent_ops = { + .required_flags = XILINX_MSI_FLAGS_REQUIRED, + .supported_flags = MSI_GENERIC_FLAGS_MASK, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "xilinx-", + .init_dev_msi_info = xilinx_init_dev_msi_info, }; static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie) { - struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); - struct irq_domain *parent; - - parent = irq_domain_create_linear(fwnode, XILINX_NUM_MSI_IRQS, - &xilinx_msi_domain_ops, pcie); - if (!parent) { - dev_err(pcie->dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); - - pcie->msi_domain = pci_msi_create_irq_domain(fwnode, &xilinx_msi_info, parent); + struct irq_domain_info info = { + .fwnode = dev_fwnode(pcie->dev), + .ops = &xilinx_msi_domain_ops, + .host_data = pcie, + .size = XILINX_NUM_MSI_IRQS, + }; + + pcie->msi_domain = msi_create_parent_irq_domain(&info, &xilinx_msi_parent_ops); if (!pcie->msi_domain) { dev_err(pcie->dev, "failed to create MSI domain\n"); - irq_domain_remove(parent); return -ENOMEM; } @@ -295,10 +304,7 @@ static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie) static void xilinx_free_msi_domains(struct xilinx_pcie *pcie) { - struct irq_domain *parent = pcie->msi_domain->parent; - irq_domain_remove(pcie->msi_domain); - irq_domain_remove(parent); } /* INTx Functions */ diff --git a/drivers/pci/controller/plda/Kconfig b/drivers/pci/controller/plda/Kconfig index c0e14146d7e4..62120101139c 100644 --- a/drivers/pci/controller/plda/Kconfig +++ b/drivers/pci/controller/plda/Kconfig @@ -5,6 +5,7 @@ menu "PLDA-based PCIe controllers" config PCIE_PLDA_HOST bool + select IRQ_MSI_LIB config PCIE_MICROCHIP_HOST tristate "Microchip AXI PCIe controller" diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c index 3abedf723215..8e2db2e5b64b 100644 --- a/drivers/pci/controller/plda/pcie-plda-host.c +++ b/drivers/pci/controller/plda/pcie-plda-host.c @@ -11,6 +11,7 @@ #include <linux/align.h> #include <linux/bitfield.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/msi.h> #include <linux/pci_regs.h> @@ -134,42 +135,41 @@ static const struct irq_domain_ops msi_domain_ops = { .free = plda_irq_msi_domain_free, }; -static struct irq_chip plda_msi_irq_chip = { - .name = "PLDA PCIe MSI", - .irq_ack = irq_chip_ack_parent, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; - -static struct msi_domain_info plda_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, - .chip = &plda_msi_irq_chip, +#define PLDA_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) +#define PLDA_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX) + +static const struct msi_parent_ops plda_msi_parent_ops = { + .required_flags = PLDA_MSI_FLAGS_REQUIRED, + .supported_flags = PLDA_MSI_FLAGS_SUPPORTED, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "PLDA-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static int plda_allocate_msi_domains(struct plda_pcie_rp *port) { struct device *dev = port->dev; - struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node); struct plda_msi *msi = &port->msi; mutex_init(&port->msi.lock); - msi->dev_domain = irq_domain_create_linear(NULL, msi->num_vectors, &msi_domain_ops, port); + struct irq_domain_info info = { + .fwnode = dev_fwnode(dev), + .ops = &msi_domain_ops, + .host_data = port, + .size = msi->num_vectors, + }; + + msi->dev_domain = msi_create_parent_irq_domain(&info, &plda_msi_parent_ops); if (!msi->dev_domain) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; } - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &plda_msi_domain_info, - msi->dev_domain); - if (!msi->msi_domain) { - dev_err(dev, "failed to create MSI domain\n"); - irq_domain_remove(msi->dev_domain); - return -ENOMEM; - } - return 0; } @@ -563,7 +563,6 @@ static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie) irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL); irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL); - irq_domain_remove(pcie->msi.msi_domain); irq_domain_remove(pcie->msi.dev_domain); irq_domain_remove(pcie->intx_domain); diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h index 61ece26065ea..6b8665df7bf0 100644 --- a/drivers/pci/controller/plda/pcie-plda.h +++ b/drivers/pci/controller/plda/pcie-plda.h @@ -164,7 +164,6 @@ struct plda_pcie_host_ops { struct plda_msi { struct mutex lock; /* Protect used bitmap */ - struct irq_domain *msi_domain; struct irq_domain *dev_domain; u32 num_vectors; u64 vector_phy; diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c index e73c1b7bc8ef..3caf53c6c082 100644 --- a/drivers/pci/controller/plda/pcie-starfive.c +++ b/drivers/pci/controller/plda/pcie-starfive.c @@ -368,7 +368,7 @@ static int starfive_pcie_host_init(struct plda_pcie_rp *plda) * of 100ms following exit from a conventional reset before * sending a configuration request to the device. */ - msleep(PCIE_RESET_CONFIG_DEVICE_WAIT_MS); + msleep(PCIE_RESET_CONFIG_WAIT_MS); if (starfive_pcie_host_wait_for_link(pcie)) dev_info(dev, "port link down\n"); diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 8df064b62a2f..9bbb0ff4cc15 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -7,6 +7,7 @@ #include <linux/device.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/msi.h> @@ -174,58 +175,52 @@ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) msg->arch_addr_lo.destid_0_7 = index_from_irqs(vmd, irq); } -/* - * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops. - */ static void vmd_irq_enable(struct irq_data *data) { struct vmd_irq *vmdirq = data->chip_data; - unsigned long flags; - raw_spin_lock_irqsave(&list_lock, flags); - WARN_ON(vmdirq->enabled); - list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); - vmdirq->enabled = true; - raw_spin_unlock_irqrestore(&list_lock, flags); + scoped_guard(raw_spinlock_irqsave, &list_lock) { + WARN_ON(vmdirq->enabled); + list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); + vmdirq->enabled = true; + } +} +static void vmd_pci_msi_enable(struct irq_data *data) +{ + vmd_irq_enable(data->parent_data); data->chip->irq_unmask(data); } static void vmd_irq_disable(struct irq_data *data) { struct vmd_irq *vmdirq = data->chip_data; - unsigned long flags; - - data->chip->irq_mask(data); - raw_spin_lock_irqsave(&list_lock, flags); - if (vmdirq->enabled) { - list_del_rcu(&vmdirq->node); - vmdirq->enabled = false; + scoped_guard(raw_spinlock_irqsave, &list_lock) { + if (vmdirq->enabled) { + list_del_rcu(&vmdirq->node); + vmdirq->enabled = false; + } } - raw_spin_unlock_irqrestore(&list_lock, flags); +} + +static void vmd_pci_msi_disable(struct irq_data *data) +{ + data->chip->irq_mask(data); + vmd_irq_disable(data->parent_data); } static struct irq_chip vmd_msi_controller = { .name = "VMD-MSI", - .irq_enable = vmd_irq_enable, - .irq_disable = vmd_irq_disable, .irq_compose_msi_msg = vmd_compose_msi_msg, }; -static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, - msi_alloc_info_t *arg) -{ - return 0; -} - /* * XXX: We can be even smarter selecting the best IRQ once we solve the * affinity problem. */ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc) { - unsigned long flags; int i, best; if (vmd->msix_count == 1 + vmd->first_vec) @@ -242,113 +237,129 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d return &vmd->irqs[vmd->first_vec]; } - raw_spin_lock_irqsave(&list_lock, flags); - best = vmd->first_vec + 1; - for (i = best; i < vmd->msix_count; i++) - if (vmd->irqs[i].count < vmd->irqs[best].count) - best = i; - vmd->irqs[best].count++; - raw_spin_unlock_irqrestore(&list_lock, flags); + scoped_guard(raw_spinlock_irq, &list_lock) { + best = vmd->first_vec + 1; + for (i = best; i < vmd->msix_count; i++) + if (vmd->irqs[i].count < vmd->irqs[best].count) + best = i; + vmd->irqs[best].count++; + } return &vmd->irqs[best]; } -static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, - unsigned int virq, irq_hw_number_t hwirq, - msi_alloc_info_t *arg) +static void vmd_msi_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs); + +static int vmd_msi_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) { - struct msi_desc *desc = arg->desc; - struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); - struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); + struct msi_desc *desc = ((msi_alloc_info_t *)arg)->desc; + struct vmd_dev *vmd = domain->host_data; + struct vmd_irq *vmdirq; - if (!vmdirq) - return -ENOMEM; + for (int i = 0; i < nr_irqs; ++i) { + vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); + if (!vmdirq) { + vmd_msi_free(domain, virq, i); + return -ENOMEM; + } - INIT_LIST_HEAD(&vmdirq->node); - vmdirq->irq = vmd_next_irq(vmd, desc); - vmdirq->virq = virq; + INIT_LIST_HEAD(&vmdirq->node); + vmdirq->irq = vmd_next_irq(vmd, desc); + vmdirq->virq = virq + i; + + irq_domain_set_info(domain, virq + i, vmdirq->irq->virq, + &vmd_msi_controller, vmdirq, + handle_untracked_irq, vmd, NULL); + } - irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq, - handle_untracked_irq, vmd, NULL); return 0; } -static void vmd_msi_free(struct irq_domain *domain, - struct msi_domain_info *info, unsigned int virq) +static void vmd_msi_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) { - struct vmd_irq *vmdirq = irq_get_chip_data(virq); - unsigned long flags; + struct vmd_irq *vmdirq; + + for (int i = 0; i < nr_irqs; ++i) { + vmdirq = irq_get_chip_data(virq + i); - synchronize_srcu(&vmdirq->irq->srcu); + synchronize_srcu(&vmdirq->irq->srcu); - /* XXX: Potential optimization to rebalance */ - raw_spin_lock_irqsave(&list_lock, flags); - vmdirq->irq->count--; - raw_spin_unlock_irqrestore(&list_lock, flags); + /* XXX: Potential optimization to rebalance */ + scoped_guard(raw_spinlock_irq, &list_lock) + vmdirq->irq->count--; - kfree(vmdirq); + kfree(vmdirq); + } } -static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, - int nvec, msi_alloc_info_t *arg) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct vmd_dev *vmd = vmd_from_bus(pdev->bus); +static const struct irq_domain_ops vmd_msi_domain_ops = { + .alloc = vmd_msi_alloc, + .free = vmd_msi_free, +}; - if (nvec > vmd->msix_count) - return vmd->msix_count; +static bool vmd_init_dev_msi_info(struct device *dev, struct irq_domain *domain, + struct irq_domain *real_parent, + struct msi_domain_info *info) +{ + if (WARN_ON_ONCE(info->bus_token != DOMAIN_BUS_PCI_DEVICE_MSIX)) + return false; - memset(arg, 0, sizeof(*arg)); - return 0; -} + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) + return false; -static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) -{ - arg->desc = desc; + info->chip->irq_enable = vmd_pci_msi_enable; + info->chip->irq_disable = vmd_pci_msi_disable; + return true; } -static struct msi_domain_ops vmd_msi_domain_ops = { - .get_hwirq = vmd_get_hwirq, - .msi_init = vmd_msi_init, - .msi_free = vmd_msi_free, - .msi_prepare = vmd_msi_prepare, - .set_desc = vmd_set_desc, -}; +#define VMD_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | MSI_FLAG_PCI_MSIX) +#define VMD_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_NO_AFFINITY) -static struct msi_domain_info vmd_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, - .ops = &vmd_msi_domain_ops, - .chip = &vmd_msi_controller, +static const struct msi_parent_ops vmd_msi_parent_ops = { + .supported_flags = VMD_MSI_FLAGS_SUPPORTED, + .required_flags = VMD_MSI_FLAGS_REQUIRED, + .bus_select_token = DOMAIN_BUS_VMD_MSI, + .bus_select_mask = MATCH_PCI_MSI, + .prefix = "VMD-", + .init_dev_msi_info = vmd_init_dev_msi_info, }; -static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable) -{ - u16 reg; - - pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, ®); - reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) : - (reg | VMCONFIG_MSI_REMAP); - pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg); -} - static int vmd_create_irq_domain(struct vmd_dev *vmd) { - struct fwnode_handle *fn; + struct irq_domain_info info = { + .size = vmd->msix_count, + .ops = &vmd_msi_domain_ops, + .host_data = vmd, + }; - fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); - if (!fn) + info.fwnode = irq_domain_alloc_named_id_fwnode("VMD-MSI", + vmd->sysdata.domain); + if (!info.fwnode) return -ENODEV; - vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL); + vmd->irq_domain = msi_create_parent_irq_domain(&info, + &vmd_msi_parent_ops); if (!vmd->irq_domain) { - irq_domain_free_fwnode(fn); + irq_domain_free_fwnode(info.fwnode); return -ENODEV; } return 0; } +static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable) +{ + u16 reg; + + pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, ®); + reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) : + (reg | VMCONFIG_MSI_REMAP); + pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg); +} + static void vmd_remove_irq_domain(struct vmd_dev *vmd) { /* @@ -387,29 +398,24 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, { struct vmd_dev *vmd = vmd_from_bus(bus); void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); - unsigned long flags; - int ret = 0; if (!addr) return -EFAULT; - raw_spin_lock_irqsave(&vmd->cfg_lock, flags); + guard(raw_spinlock_irqsave)(&vmd->cfg_lock); switch (len) { case 1: *value = readb(addr); - break; + return 0; case 2: *value = readw(addr); - break; + return 0; case 4: *value = readl(addr); - break; + return 0; default: - ret = -EINVAL; - break; + return -EINVAL; } - raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags); - return ret; } /* @@ -422,32 +428,27 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, { struct vmd_dev *vmd = vmd_from_bus(bus); void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); - unsigned long flags; - int ret = 0; if (!addr) return -EFAULT; - raw_spin_lock_irqsave(&vmd->cfg_lock, flags); + guard(raw_spinlock_irqsave)(&vmd->cfg_lock); switch (len) { case 1: writeb(value, addr); readb(addr); - break; + return 0; case 2: writew(value, addr); readw(addr); - break; + return 0; case 4: writel(value, addr); readl(addr); - break; + return 0; default: - ret = -EINVAL; - break; + return -EINVAL; } - raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags); - return ret; } static struct pci_ops vmd_ops = { @@ -889,12 +890,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) ret = vmd_create_irq_domain(vmd); if (ret) return ret; - - /* - * Override the IRQ domain bus token so the domain can be - * distinguished from a regular PCI/MSI domain. - */ - irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI); } else { vmd_set_msi_remapping(vmd, false); } @@ -1129,6 +1124,8 @@ static const struct pci_device_id vmd_ids[] = { .driver_data = VMD_FEATS_CLIENT,}, {PCI_VDEVICE(INTEL, 0xb06f), .driver_data = VMD_FEATS_CLIENT,}, + {PCI_VDEVICE(INTEL, 0xb07f), + .driver_data = VMD_FEATS_CLIENT,}, {0,} }; MODULE_DEVICE_TABLE(pci, vmd_ids); diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig index 1c5d82eb57d4..8dad291be8b8 100644 --- a/drivers/pci/endpoint/Kconfig +++ b/drivers/pci/endpoint/Kconfig @@ -28,6 +28,14 @@ config PCI_ENDPOINT_CONFIGFS configure the endpoint function and used to bind the function with an endpoint controller. +config PCI_ENDPOINT_MSI_DOORBELL + bool "PCI Endpoint MSI Doorbell Support" + depends on PCI_ENDPOINT && GENERIC_MSI_IRQ + help + This enables the EP's MSI interrupt controller to function as a + doorbell. The RC can trigger doorbell in EP by writing data to a + dedicated BAR, which the EP maps to the controller's message address. + source "drivers/pci/endpoint/functions/Kconfig" endmenu diff --git a/drivers/pci/endpoint/Makefile b/drivers/pci/endpoint/Makefile index 95b2fe47e3b0..b4869d52053a 100644 --- a/drivers/pci/endpoint/Makefile +++ b/drivers/pci/endpoint/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_PCI_ENDPOINT_CONFIGFS) += pci-ep-cfs.o obj-$(CONFIG_PCI_ENDPOINT) += pci-epc-core.o pci-epf-core.o\ pci-epc-mem.o functions/ +obj-$(CONFIG_PCI_ENDPOINT_MSI_DOORBELL) += pci-ep-msi.o diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 50eb4106369f..e091193bd8a8 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -11,12 +11,14 @@ #include <linux/dmaengine.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/msi.h> #include <linux/slab.h> #include <linux/pci_ids.h> #include <linux/random.h> #include <linux/pci-epc.h> #include <linux/pci-epf.h> +#include <linux/pci-ep-msi.h> #include <linux/pci_regs.h> #define IRQ_TYPE_INTX 0 @@ -29,6 +31,8 @@ #define COMMAND_READ BIT(3) #define COMMAND_WRITE BIT(4) #define COMMAND_COPY BIT(5) +#define COMMAND_ENABLE_DOORBELL BIT(6) +#define COMMAND_DISABLE_DOORBELL BIT(7) #define STATUS_READ_SUCCESS BIT(0) #define STATUS_READ_FAIL BIT(1) @@ -39,6 +43,11 @@ #define STATUS_IRQ_RAISED BIT(6) #define STATUS_SRC_ADDR_INVALID BIT(7) #define STATUS_DST_ADDR_INVALID BIT(8) +#define STATUS_DOORBELL_SUCCESS BIT(9) +#define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10) +#define STATUS_DOORBELL_ENABLE_FAIL BIT(11) +#define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12) +#define STATUS_DOORBELL_DISABLE_FAIL BIT(13) #define FLAG_USE_DMA BIT(0) @@ -66,6 +75,7 @@ struct pci_epf_test { bool dma_supported; bool dma_private; const struct pci_epc_features *epc_features; + struct pci_epf_bar db_bar; }; struct pci_epf_test_reg { @@ -80,6 +90,9 @@ struct pci_epf_test_reg { __le32 irq_number; __le32 flags; __le32 caps; + __le32 doorbell_bar; + __le32 doorbell_offset; + __le32 doorbell_data; } __packed; static struct pci_epf_header test_header = { @@ -667,6 +680,115 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, } } +static irqreturn_t pci_epf_test_doorbell_handler(int irq, void *data) +{ + struct pci_epf_test *epf_test = data; + enum pci_barno test_reg_bar = epf_test->test_reg_bar; + struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; + u32 status = le32_to_cpu(reg->status); + + status |= STATUS_DOORBELL_SUCCESS; + reg->status = cpu_to_le32(status); + pci_epf_test_raise_irq(epf_test, reg); + + return IRQ_HANDLED; +} + +static void pci_epf_test_doorbell_cleanup(struct pci_epf_test *epf_test) +{ + struct pci_epf_test_reg *reg = epf_test->reg[epf_test->test_reg_bar]; + struct pci_epf *epf = epf_test->epf; + + free_irq(epf->db_msg[0].virq, epf_test); + reg->doorbell_bar = cpu_to_le32(NO_BAR); + + pci_epf_free_doorbell(epf); +} + +static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test, + struct pci_epf_test_reg *reg) +{ + u32 status = le32_to_cpu(reg->status); + struct pci_epf *epf = epf_test->epf; + struct pci_epc *epc = epf->epc; + struct msi_msg *msg; + enum pci_barno bar; + size_t offset; + int ret; + + ret = pci_epf_alloc_doorbell(epf, 1); + if (ret) + goto set_status_err; + + msg = &epf->db_msg[0].msg; + bar = pci_epc_get_next_free_bar(epf_test->epc_features, epf_test->test_reg_bar + 1); + if (bar < BAR_0) + goto err_doorbell_cleanup; + + ret = request_irq(epf->db_msg[0].virq, pci_epf_test_doorbell_handler, 0, + "pci-ep-test-doorbell", epf_test); + if (ret) { + dev_err(&epf->dev, + "Failed to request doorbell IRQ: %d\n", + epf->db_msg[0].virq); + goto err_doorbell_cleanup; + } + + reg->doorbell_data = cpu_to_le32(msg->data); + reg->doorbell_bar = cpu_to_le32(bar); + + msg = &epf->db_msg[0].msg; + ret = pci_epf_align_inbound_addr(epf, bar, ((u64)msg->address_hi << 32) | msg->address_lo, + &epf_test->db_bar.phys_addr, &offset); + + if (ret) + goto err_doorbell_cleanup; + + reg->doorbell_offset = cpu_to_le32(offset); + + epf_test->db_bar.barno = bar; + epf_test->db_bar.size = epf->bar[bar].size; + epf_test->db_bar.flags = epf->bar[bar].flags; + + ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar); + if (ret) + goto err_doorbell_cleanup; + + status |= STATUS_DOORBELL_ENABLE_SUCCESS; + reg->status = cpu_to_le32(status); + return; + +err_doorbell_cleanup: + pci_epf_test_doorbell_cleanup(epf_test); +set_status_err: + status |= STATUS_DOORBELL_ENABLE_FAIL; + reg->status = cpu_to_le32(status); +} + +static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test, + struct pci_epf_test_reg *reg) +{ + enum pci_barno bar = le32_to_cpu(reg->doorbell_bar); + u32 status = le32_to_cpu(reg->status); + struct pci_epf *epf = epf_test->epf; + struct pci_epc *epc = epf->epc; + + if (bar < BAR_0) + goto set_status_err; + + pci_epf_test_doorbell_cleanup(epf_test); + pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar); + + status |= STATUS_DOORBELL_DISABLE_SUCCESS; + reg->status = cpu_to_le32(status); + + return; + +set_status_err: + status |= STATUS_DOORBELL_DISABLE_FAIL; + reg->status = cpu_to_le32(status); +} + static void pci_epf_test_cmd_handler(struct work_struct *work) { u32 command; @@ -714,6 +836,14 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) pci_epf_test_copy(epf_test, reg); pci_epf_test_raise_irq(epf_test, reg); break; + case COMMAND_ENABLE_DOORBELL: + pci_epf_test_enable_doorbell(epf_test, reg); + pci_epf_test_raise_irq(epf_test, reg); + break; + case COMMAND_DISABLE_DOORBELL: + pci_epf_test_disable_doorbell(epf_test, reg); + pci_epf_test_raise_irq(epf_test, reg); + break; default: dev_err(dev, "Invalid command 0x%x\n", command); break; diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c index e4da3fdb0007..83e9ab10f9c4 100644 --- a/drivers/pci/endpoint/functions/pci-epf-vntb.c +++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c @@ -70,9 +70,11 @@ static struct workqueue_struct *kpcintb_workqueue; enum epf_ntb_bar { BAR_CONFIG, BAR_DB, - BAR_MW0, BAR_MW1, BAR_MW2, + BAR_MW3, + BAR_MW4, + VNTB_BAR_NUM, }; /* @@ -132,7 +134,7 @@ struct epf_ntb { bool linkup; u32 spad_size; - enum pci_barno epf_ntb_bar[6]; + enum pci_barno epf_ntb_bar[VNTB_BAR_NUM]; struct epf_ntb_ctrl *reg; @@ -510,7 +512,7 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb) struct device *dev = &ntb->epf->dev; int ret; struct pci_epf_bar *epf_bar; - void __iomem *mw_addr; + void *mw_addr; enum pci_barno barno; size_t size = sizeof(u32) * ntb->db_count; @@ -576,7 +578,7 @@ static int epf_ntb_mw_bar_init(struct epf_ntb *ntb) for (i = 0; i < ntb->num_mws; i++) { size = ntb->mws_size[i]; - barno = ntb->epf_ntb_bar[BAR_MW0 + i]; + barno = ntb->epf_ntb_bar[BAR_MW1 + i]; ntb->epf->bar[barno].barno = barno; ntb->epf->bar[barno].size = size; @@ -629,7 +631,7 @@ static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws) int i; for (i = 0; i < num_mws; i++) { - barno = ntb->epf_ntb_bar[BAR_MW0 + i]; + barno = ntb->epf_ntb_bar[BAR_MW1 + i]; pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, @@ -654,6 +656,63 @@ static void epf_ntb_epc_destroy(struct epf_ntb *ntb) pci_epc_put(ntb->epf->epc); } + +/** + * epf_ntb_is_bar_used() - Check if a bar is used in the ntb configuration + * @ntb: NTB device that facilitates communication between HOST and VHOST + * @barno: Checked bar number + * + * Returns: true if used, false if free. + */ +static bool epf_ntb_is_bar_used(struct epf_ntb *ntb, + enum pci_barno barno) +{ + int i; + + for (i = 0; i < VNTB_BAR_NUM; i++) { + if (ntb->epf_ntb_bar[i] == barno) + return true; + } + + return false; +} + +/** + * epf_ntb_find_bar() - Assign BAR number when no configuration is provided + * @ntb: NTB device that facilitates communication between HOST and VHOST + * @epc_features: The features provided by the EPC specific to this EPF + * @bar: NTB BAR index + * @barno: Bar start index + * + * When the BAR configuration was not provided through the userspace + * configuration, automatically assign BAR as it has been historically + * done by this endpoint function. + * + * Returns: the BAR number found, if any. -1 otherwise + */ +static int epf_ntb_find_bar(struct epf_ntb *ntb, + const struct pci_epc_features *epc_features, + enum epf_ntb_bar bar, + enum pci_barno barno) +{ + while (ntb->epf_ntb_bar[bar] < 0) { + barno = pci_epc_get_next_free_bar(epc_features, barno); + if (barno < 0) + break; /* No more BAR available */ + + /* + * Verify if the BAR found is not already assigned + * through the provided configuration + */ + if (!epf_ntb_is_bar_used(ntb, barno)) + ntb->epf_ntb_bar[bar] = barno; + + barno += 1; + } + + return barno; +} + /** * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB * constructs (scratchpad region, doorbell, memorywindow) @@ -676,23 +735,21 @@ static int epf_ntb_init_epc_bar(struct epf_ntb *ntb) epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no); /* These are required BARs which are mandatory for NTB functionality */ - for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) { - barno = pci_epc_get_next_free_bar(epc_features, barno); + for (bar = BAR_CONFIG; bar <= BAR_MW1; bar++) { + barno = epf_ntb_find_bar(ntb, epc_features, bar, barno); if (barno < 0) { dev_err(dev, "Fail to get NTB function BAR\n"); - return barno; + return -ENOENT; } - ntb->epf_ntb_bar[bar] = barno; } /* These are optional BARs which don't impact NTB functionality */ - for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) { - barno = pci_epc_get_next_free_bar(epc_features, barno); + for (bar = BAR_MW1, i = 1; i < num_mws; bar++, i++) { + barno = epf_ntb_find_bar(ntb, epc_features, bar, barno); if (barno < 0) { ntb->num_mws = i; dev_dbg(dev, "BAR not available for > MW%d\n", i + 1); } - ntb->epf_ntb_bar[bar] = barno; } return 0; @@ -860,6 +917,37 @@ static ssize_t epf_ntb_##_name##_store(struct config_item *item, \ return len; \ } +#define EPF_NTB_BAR_R(_name, _id) \ + static ssize_t epf_ntb_##_name##_show(struct config_item *item, \ + char *page) \ + { \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + \ + return sprintf(page, "%d\n", ntb->epf_ntb_bar[_id]); \ + } + +#define EPF_NTB_BAR_W(_name, _id) \ + static ssize_t epf_ntb_##_name##_store(struct config_item *item, \ + const char *page, size_t len) \ + { \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + int val; \ + int ret; \ + \ + ret = kstrtoint(page, 0, &val); \ + if (ret) \ + return ret; \ + \ + if (val < NO_BAR || val > BAR_5) \ + return -EINVAL; \ + \ + ntb->epf_ntb_bar[_id] = val; \ + \ + return len; \ + } + static ssize_t epf_ntb_num_mws_store(struct config_item *item, const char *page, size_t len) { @@ -899,6 +987,18 @@ EPF_NTB_MW_R(mw3) EPF_NTB_MW_W(mw3) EPF_NTB_MW_R(mw4) EPF_NTB_MW_W(mw4) +EPF_NTB_BAR_R(ctrl_bar, BAR_CONFIG) +EPF_NTB_BAR_W(ctrl_bar, BAR_CONFIG) +EPF_NTB_BAR_R(db_bar, BAR_DB) +EPF_NTB_BAR_W(db_bar, BAR_DB) +EPF_NTB_BAR_R(mw1_bar, BAR_MW1) +EPF_NTB_BAR_W(mw1_bar, BAR_MW1) +EPF_NTB_BAR_R(mw2_bar, BAR_MW2) +EPF_NTB_BAR_W(mw2_bar, BAR_MW2) +EPF_NTB_BAR_R(mw3_bar, BAR_MW3) +EPF_NTB_BAR_W(mw3_bar, BAR_MW3) +EPF_NTB_BAR_R(mw4_bar, BAR_MW4) +EPF_NTB_BAR_W(mw4_bar, BAR_MW4) CONFIGFS_ATTR(epf_ntb_, spad_count); CONFIGFS_ATTR(epf_ntb_, db_count); @@ -910,6 +1010,12 @@ CONFIGFS_ATTR(epf_ntb_, mw4); CONFIGFS_ATTR(epf_ntb_, vbus_number); CONFIGFS_ATTR(epf_ntb_, vntb_pid); CONFIGFS_ATTR(epf_ntb_, vntb_vid); +CONFIGFS_ATTR(epf_ntb_, ctrl_bar); +CONFIGFS_ATTR(epf_ntb_, db_bar); +CONFIGFS_ATTR(epf_ntb_, mw1_bar); +CONFIGFS_ATTR(epf_ntb_, mw2_bar); +CONFIGFS_ATTR(epf_ntb_, mw3_bar); +CONFIGFS_ATTR(epf_ntb_, mw4_bar); static struct configfs_attribute *epf_ntb_attrs[] = { &epf_ntb_attr_spad_count, @@ -922,6 +1028,12 @@ static struct configfs_attribute *epf_ntb_attrs[] = { &epf_ntb_attr_vbus_number, &epf_ntb_attr_vntb_pid, &epf_ntb_attr_vntb_vid, + &epf_ntb_attr_ctrl_bar, + &epf_ntb_attr_db_bar, + &epf_ntb_attr_mw1_bar, + &epf_ntb_attr_mw2_bar, + &epf_ntb_attr_mw3_bar, + &epf_ntb_attr_mw4_bar, NULL, }; @@ -1048,7 +1160,7 @@ static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx, struct device *dev; dev = &ntb->ntb.dev; - barno = ntb->epf_ntb_bar[BAR_MW0 + idx]; + barno = ntb->epf_ntb_bar[BAR_MW1 + idx]; epf_bar = &ntb->epf->bar[barno]; epf_bar->phys_addr = addr; epf_bar->barno = barno; @@ -1379,6 +1491,7 @@ static int epf_ntb_probe(struct pci_epf *epf, { struct epf_ntb *ntb; struct device *dev; + int i; dev = &epf->dev; @@ -1389,6 +1502,11 @@ static int epf_ntb_probe(struct pci_epf *epf, epf->header = &epf_ntb_header; ntb->epf = epf; ntb->vbus_number = 0xff; + + /* Initially, no bar is assigned */ + for (i = 0; i < VNTB_BAR_NUM; i++) + ntb->epf_ntb_bar[i] = NO_BAR; + epf_set_drvdata(epf, ntb); dev_info(dev, "pci-ep epf driver loaded\n"); diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c index d712c7a866d2..ef50c82e647f 100644 --- a/drivers/pci/endpoint/pci-ep-cfs.c +++ b/drivers/pci/endpoint/pci-ep-cfs.c @@ -691,6 +691,7 @@ void pci_ep_cfs_remove_epf_group(struct config_group *group) if (IS_ERR_OR_NULL(group)) return; + list_del(&group->group_entry); configfs_unregister_default_group(group); } EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group); diff --git a/drivers/pci/endpoint/pci-ep-msi.c b/drivers/pci/endpoint/pci-ep-msi.c new file mode 100644 index 000000000000..9ca89cbfec15 --- /dev/null +++ b/drivers/pci/endpoint/pci-ep-msi.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI Endpoint *Controller* (EPC) MSI library + * + * Copyright (C) 2025 NXP + * Author: Frank Li <Frank.Li@nxp.com> + */ + +#include <linux/device.h> +#include <linux/export.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_irq.h> +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> +#include <linux/pci-ep-cfs.h> +#include <linux/pci-ep-msi.h> +#include <linux/slab.h> + +static void pci_epf_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) +{ + struct pci_epc *epc; + struct pci_epf *epf; + + epc = pci_epc_get(dev_name(msi_desc_to_dev(desc))); + if (!epc) + return; + + epf = list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list); + + if (epf && epf->db_msg && desc->msi_index < epf->num_db) + memcpy(&epf->db_msg[desc->msi_index].msg, msg, sizeof(*msg)); + + pci_epc_put(epc); +} + +int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 num_db) +{ + struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; + struct irq_domain *domain; + void *msg; + int ret; + int i; + + /* TODO: Multi-EPF support */ + if (list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list) != epf) { + dev_err(dev, "MSI doorbell doesn't support multiple EPF\n"); + return -EINVAL; + } + + domain = of_msi_map_get_device_domain(epc->dev.parent, 0, + DOMAIN_BUS_PLATFORM_MSI); + if (!domain) { + dev_err(dev, "Can't find MSI domain for EPC\n"); + return -ENODEV; + } + + if (!irq_domain_is_msi_parent(domain)) + return -ENODEV; + + if (!irq_domain_is_msi_immutable(domain)) { + dev_err(dev, "Mutable MSI controller not supported\n"); + return -ENODEV; + } + + dev_set_msi_domain(epc->dev.parent, domain); + + msg = kcalloc(num_db, sizeof(struct pci_epf_doorbell_msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + epf->num_db = num_db; + epf->db_msg = msg; + + ret = platform_device_msi_init_and_alloc_irqs(epc->dev.parent, num_db, + pci_epf_write_msi_msg); + if (ret) { + dev_err(dev, "Failed to allocate MSI\n"); + kfree(msg); + return ret; + } + + for (i = 0; i < num_db; i++) + epf->db_msg[i].virq = msi_get_virq(epc->dev.parent, i); + + return ret; +} +EXPORT_SYMBOL_GPL(pci_epf_alloc_doorbell); + +void pci_epf_free_doorbell(struct pci_epf *epf) +{ + platform_device_msi_free_irqs_all(epf->epc->dev.parent); + + kfree(epf->db_msg); + epf->db_msg = NULL; + epf->num_db = 0; +} +EXPORT_SYMBOL_GPL(pci_epf_free_doorbell); diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 577a9e490115..d54e18872aef 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -338,7 +338,7 @@ static void pci_epf_remove_cfs(struct pci_epf_driver *driver) mutex_lock(&pci_epf_mutex); list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry) pci_ep_cfs_remove_epf_group(group); - list_del(&driver->epf_group); + WARN_ON(!list_empty(&driver->epf_group)); mutex_unlock(&pci_epf_mutex); } @@ -477,6 +477,44 @@ struct pci_epf *pci_epf_create(const char *name) } EXPORT_SYMBOL_GPL(pci_epf_create); +/** + * pci_epf_align_inbound_addr() - Align the given address based on the BAR + * alignment requirement + * @epf: the EPF device + * @addr: inbound address to be aligned + * @bar: the BAR number corresponding to the given addr + * @base: base address matching the @bar alignment requirement + * @off: offset to be added to the @base address + * + * Helper function to align input @addr based on BAR's alignment requirement. + * The aligned base address and offset are returned via @base and @off. + * + * NOTE: The pci_epf_alloc_space() function already accounts for alignment. + * This API is primarily intended for use with other memory regions not + * allocated by pci_epf_alloc_space(), such as peripheral register spaces or + * the message address of a platform MSI controller. + * + * Return: 0 on success, errno otherwise. + */ +int pci_epf_align_inbound_addr(struct pci_epf *epf, enum pci_barno bar, + u64 addr, dma_addr_t *base, size_t *off) +{ + /* + * Most EP controllers require the BAR start address to be aligned to + * the BAR size, because they mask off the lower bits. + * + * Alignment to BAR size also works for controllers that support + * unaligned addresses. + */ + u64 align = epf->bar[bar].size; + + *base = round_down(addr, align); + *off = addr & (align - 1); + + return 0; +} +EXPORT_SYMBOL_GPL(pci_epf_align_inbound_addr); + static void pci_epf_dev_release(struct device *dev) { struct pci_epf *epf = to_pci_epf(dev); diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO index 92e6e20e8595..7397374af171 100644 --- a/drivers/pci/hotplug/TODO +++ b/drivers/pci/hotplug/TODO @@ -2,10 +2,6 @@ Contributions are solicited in particular to remedy the following issues: cpcihp: -* There are no implementations of the ->hardware_test, ->get_power and - ->set_power callbacks in struct cpci_hp_controller_ops. Why were they - introduced? Can they be removed from the struct? - * Returned code from pci_hp_add_bridge() is not checked. cpqphp: diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 91d2d92717d9..bcc51b26d03d 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -995,7 +995,7 @@ static inline int pcie_hotplug_depth(struct pci_dev *dev) while (bus->parent) { bus = bus->parent; - if (bus->self && bus->self->is_hotplug_bridge) + if (bus->self && bus->self->is_pciehp) depth++; } diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 10693b5d7eb6..ac4375954c94 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -7,11 +7,16 @@ * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com> */ +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/log2.h> #include <linux/pci.h> +#include <linux/sizes.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/string.h> #include <linux/delay.h> +#include <asm/div64.h> #include "pci.h" #define VIRTFN_ID_LEN 17 /* "virtfn%u\0" for 2^32 - 1 */ @@ -150,7 +155,28 @@ resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) if (!dev->is_physfn) return 0; - return dev->sriov->barsz[resno - PCI_IOV_RESOURCES]; + return dev->sriov->barsz[pci_resource_num_to_vf_bar(resno)]; +} + +void pci_iov_resource_set_size(struct pci_dev *dev, int resno, + resource_size_t size) +{ + if (!pci_resource_is_iov(resno)) { + pci_warn(dev, "%s is not an IOV resource\n", + pci_resource_name(dev, resno)); + return; + } + + dev->sriov->barsz[pci_resource_num_to_vf_bar(resno)] = size; +} + +bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev) +{ + u16 cmd; + + pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_CTRL, &cmd); + + return cmd & PCI_SRIOV_CTRL_MSE; } static void pci_read_vf_config_common(struct pci_dev *virtfn) @@ -341,12 +367,14 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id) virtfn->multifunction = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = &dev->resource[i + PCI_IOV_RESOURCES]; + int idx = pci_resource_num_from_vf_bar(i); + + res = &dev->resource[idx]; if (!res->parent) continue; virtfn->resource[i].name = pci_name(virtfn); virtfn->resource[i].flags = res->flags; - size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); + size = pci_iov_resource_size(dev, idx); resource_set_range(&virtfn->resource[i], res->start + size * id, size); rc = request_resource(res, &virtfn->resource[i]); @@ -643,8 +671,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) nres = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - bars |= (1 << (i + PCI_IOV_RESOURCES)); - res = &dev->resource[i + PCI_IOV_RESOURCES]; + int idx = pci_resource_num_from_vf_bar(i); + resource_size_t vf_bar_sz = pci_iov_resource_size(dev, idx); + + bars |= (1 << idx); + res = &dev->resource[idx]; + if (vf_bar_sz * nr_virtfn > resource_size(res)) + continue; if (res->parent) nres++; } @@ -810,8 +843,10 @@ found: nres = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = &dev->resource[i + PCI_IOV_RESOURCES]; - res_name = pci_resource_name(dev, i + PCI_IOV_RESOURCES); + int idx = pci_resource_num_from_vf_bar(i); + + res = &dev->resource[idx]; + res_name = pci_resource_name(dev, idx); /* * If it is already FIXED, don't change it, something @@ -850,6 +885,7 @@ found: pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link); + iov->vf_rebar_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VF_REBAR); if (pdev) iov->dev = pci_dev_get(pdev); @@ -869,7 +905,7 @@ fail_max_buses: dev->is_physfn = 0; failed: for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = &dev->resource[i + PCI_IOV_RESOURCES]; + res = &dev->resource[pci_resource_num_from_vf_bar(i)]; res->flags = 0; } @@ -888,6 +924,30 @@ static void sriov_release(struct pci_dev *dev) dev->sriov = NULL; } +static void sriov_restore_vf_rebar_state(struct pci_dev *dev) +{ + unsigned int pos, nbars, i; + u32 ctrl; + + pos = pci_iov_vf_rebar_cap(dev); + if (!pos) + return; + + pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl); + nbars = FIELD_GET(PCI_VF_REBAR_CTRL_NBAR_MASK, ctrl); + + for (i = 0; i < nbars; i++, pos += 8) { + int bar_idx, size; + + pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl); + bar_idx = FIELD_GET(PCI_VF_REBAR_CTRL_BAR_IDX, ctrl); + size = pci_rebar_bytes_to_size(dev->sriov->barsz[bar_idx]); + ctrl &= ~PCI_VF_REBAR_CTRL_BAR_SIZE; + ctrl |= FIELD_PREP(PCI_VF_REBAR_CTRL_BAR_SIZE, size); + pci_write_config_dword(dev, pos + PCI_VF_REBAR_CTRL, ctrl); + } +} + static void sriov_restore_state(struct pci_dev *dev) { int i; @@ -907,7 +967,7 @@ static void sriov_restore_state(struct pci_dev *dev) pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl); for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) - pci_update_resource(dev, i + PCI_IOV_RESOURCES); + pci_update_resource(dev, pci_resource_num_from_vf_bar(i)); pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz); pci_iov_set_numvfs(dev, iov->num_VFs); @@ -973,7 +1033,7 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno) { struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL; struct resource *res = pci_resource_n(dev, resno); - int vf_bar = resno - PCI_IOV_RESOURCES; + int vf_bar = pci_resource_num_to_vf_bar(resno); struct pci_bus_region region; u16 cmd; u32 new; @@ -1047,8 +1107,10 @@ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) */ void pci_restore_iov_state(struct pci_dev *dev) { - if (dev->is_physfn) + if (dev->is_physfn) { + sriov_restore_vf_rebar_state(dev); sriov_restore_state(dev); + } } /** @@ -1255,3 +1317,72 @@ int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn) return nr_virtfn; } EXPORT_SYMBOL_GPL(pci_sriov_configure_simple); + +/** + * pci_iov_vf_bar_set_size - set a new size for a VF BAR + * @dev: the PCI device + * @resno: the resource number + * @size: new size as defined in the spec (0=1MB, 31=128TB) + * + * Set the new size of a VF BAR that supports VF resizable BAR capability. + * Unlike pci_resize_resource(), this does not cause the resource that + * reserves the MMIO space (originally up to total_VFs) to be resized, which + * means that following calls to pci_enable_sriov() can fail if the resources + * no longer fit. + * + * Return: 0 on success, or negative on failure. + */ +int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size) +{ + u32 sizes; + int ret; + + if (!pci_resource_is_iov(resno)) + return -EINVAL; + + if (pci_iov_is_memory_decoding_enabled(dev)) + return -EBUSY; + + sizes = pci_rebar_get_possible_sizes(dev, resno); + if (!sizes) + return -ENOTSUPP; + + if (!(sizes & BIT(size))) + return -EINVAL; + + ret = pci_rebar_set_size(dev, resno, size); + if (ret) + return ret; + + pci_iov_resource_set_size(dev, resno, pci_rebar_size_to_bytes(size)); + + return 0; +} +EXPORT_SYMBOL_GPL(pci_iov_vf_bar_set_size); + +/** + * pci_iov_vf_bar_get_sizes - get VF BAR sizes allowing to create up to num_vfs + * @dev: the PCI device + * @resno: the resource number + * @num_vfs: number of VFs + * + * Get the sizes of a VF resizable BAR that can accommodate @num_vfs within + * the currently assigned size of the resource @resno. + * + * Return: A bitmask of sizes in format defined in the spec (bit 0=1MB, + * bit 31=128TB). + */ +u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs) +{ + u64 vf_len = pci_resource_len(dev, resno); + u32 sizes; + + if (!num_vfs) + return 0; + + do_div(vf_len, num_vfs); + sizes = (roundup_pow_of_two(vf_len + 1) - 1) >> ilog2(SZ_1M); + + return sizes & pci_rebar_get_possible_sizes(dev, resno); +} +EXPORT_SYMBOL_GPL(pci_iov_vf_bar_get_sizes); diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index 8d8de0ad2bb7..34d664139f48 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -943,7 +943,7 @@ int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag) /* * This is a horrible hack, but short of implementing a PCI * specific interrupt chip callback and a huge pile of - * infrastructure, this is the minor nuissance. It provides the + * infrastructure, this is the minor nuisance. It provides the * protection against concurrent operations on this entry and keeps * the control word cache in sync. */ diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index af370628e583..ddb25960ea47 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -816,15 +816,10 @@ int pci_acpi_program_hp_params(struct pci_dev *dev) bool pciehp_is_native(struct pci_dev *bridge) { const struct pci_host_bridge *host; - u32 slot_cap; if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) return false; - pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap); - if (!(slot_cap & PCI_EXP_SLTCAP_HPC)) - return false; - if (pcie_ports_native) return true; @@ -1002,7 +997,7 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev) struct acpi_device *adev, *rpadev; const union acpi_object *obj; - if (acpi_pci_disabled || !dev->is_hotplug_bridge) + if (acpi_pci_disabled || !dev->is_pciehp) return false; adev = ACPI_COMPANION(&dev->dev); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index b853585cb1f8..63665240ae87 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -1632,7 +1632,7 @@ static int pci_bus_num_vf(struct device *dev) */ static int pci_dma_configure(struct device *dev) { - struct pci_driver *driver = to_pci_driver(dev->driver); + const struct device_driver *drv = READ_ONCE(dev->driver); struct device *bridge; int ret = 0; @@ -1649,8 +1649,8 @@ static int pci_dma_configure(struct device *dev) pci_put_host_bridge_device(bridge); - /* @driver may not be valid when we're called from the IOMMU layer */ - if (!ret && dev->driver && !driver->driver_managed_dma) { + /* @drv may not be valid when we're called from the IOMMU layer */ + if (!ret && drv && !to_pci_driver(drv)->driver_managed_dma) { ret = iommu_device_use_default_domain(dev); if (ret) arch_teardown_dma_ops(dev); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 9e42090fb108..b0f4d98036cd 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3030,8 +3030,12 @@ static const struct dmi_system_id bridge_d3_blacklist[] = { * pci_bridge_d3_possible - Is it possible to put the bridge into D3 * @bridge: Bridge to check * - * This function checks if it is possible to move the bridge to D3. * Currently we only allow D3 for some PCIe ports and for Thunderbolt. + * + * Return: Whether it is possible to move the bridge to D3. + * + * The return value is guaranteed to be constant across the entire lifetime + * of the bridge, including its hot-removal. */ bool pci_bridge_d3_possible(struct pci_dev *bridge) { @@ -3046,10 +3050,14 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge) return false; /* - * Hotplug ports handled by firmware in System Management Mode - * may not be put into D3 by the OS (Thunderbolt on non-Macs). + * Hotplug ports handled by platform firmware may not be put + * into D3 by the OS, e.g. ACPI slots ... */ - if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge)) + if (bridge->is_hotplug_bridge && !bridge->is_pciehp) + return false; + + /* ... or PCIe hotplug ports not handled natively by the OS. */ + if (bridge->is_pciehp && !pciehp_is_native(bridge)) return false; if (pci_bridge_d3_force) @@ -3068,7 +3076,7 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge) * by vendors for runtime D3 at least until 2018 because there * was no OS support. */ - if (bridge->is_hotplug_bridge) + if (bridge->is_pciehp) return false; if (dmi_check_system(bridge_d3_blacklist)) @@ -3205,7 +3213,6 @@ void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev) void pci_pm_init(struct pci_dev *dev) { int pm; - u16 status; u16 pmc; device_enable_async_suspend(&dev->dev); @@ -3266,9 +3273,6 @@ void pci_pm_init(struct pci_dev *dev) pci_pme_active(dev, false); } - pci_read_config_word(dev, PCI_STATUS, &status); - if (status & PCI_STATUS_IMM_READY) - dev->imm_ready = 1; poweron: pci_pm_power_up_and_verify_state(dev); pm_runtime_forbid(&dev->dev); @@ -3753,7 +3757,13 @@ static int pci_rebar_find_pos(struct pci_dev *pdev, int bar) unsigned int pos, nbars, i; u32 ctrl; - pos = pdev->rebar_cap; + if (pci_resource_is_iov(bar)) { + pos = pci_iov_vf_rebar_cap(pdev); + bar = pci_resource_num_to_vf_bar(bar); + } else { + pos = pdev->rebar_cap; + } + if (!pos) return -ENOTSUPP; diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 12215ee72afb..34f65d69662e 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -36,13 +36,6 @@ struct pcie_tlp_log; #define PCIE_T_PERST_CLK_US 100 /* - * End of conventional reset (PERST# de-asserted) to first configuration - * request (device able to respond with a "Request Retry Status" completion), - * from PCIe r6.0, sec 6.6.1. - */ -#define PCIE_T_RRS_READY_MS 100 - -/* * PCIe r6.0, sec 5.3.3.2.1 <PME Synchronization> * Recommends 1ms to 10ms timeout to check L2 ready. */ @@ -61,7 +54,11 @@ struct pcie_tlp_log; * completes before sending a Configuration Request to the device * immediately below that Port." */ -#define PCIE_RESET_CONFIG_DEVICE_WAIT_MS 100 +#define PCIE_RESET_CONFIG_WAIT_MS 100 + +/* Parameters for the waiting for link up routine */ +#define PCIE_LINK_WAIT_MAX_RETRIES 10 +#define PCIE_LINK_WAIT_SLEEP_MS 90 /* Message Routing (r[2:0]); PCIe r6.0, sec 2.2.8 */ #define PCIE_MSG_TYPE_R_RC 0 @@ -391,12 +388,14 @@ void pci_bus_put(struct pci_bus *bus); #define PCIE_LNKCAP_SLS2SPEED(lnkcap) \ ({ \ - ((lnkcap) == PCI_EXP_LNKCAP_SLS_64_0GB ? PCIE_SPEED_64_0GT : \ - (lnkcap) == PCI_EXP_LNKCAP_SLS_32_0GB ? PCIE_SPEED_32_0GT : \ - (lnkcap) == PCI_EXP_LNKCAP_SLS_16_0GB ? PCIE_SPEED_16_0GT : \ - (lnkcap) == PCI_EXP_LNKCAP_SLS_8_0GB ? PCIE_SPEED_8_0GT : \ - (lnkcap) == PCI_EXP_LNKCAP_SLS_5_0GB ? PCIE_SPEED_5_0GT : \ - (lnkcap) == PCI_EXP_LNKCAP_SLS_2_5GB ? PCIE_SPEED_2_5GT : \ + u32 lnkcap_sls = (lnkcap) & PCI_EXP_LNKCAP_SLS; \ + \ + (lnkcap_sls == PCI_EXP_LNKCAP_SLS_64_0GB ? PCIE_SPEED_64_0GT : \ + lnkcap_sls == PCI_EXP_LNKCAP_SLS_32_0GB ? PCIE_SPEED_32_0GT : \ + lnkcap_sls == PCI_EXP_LNKCAP_SLS_16_0GB ? PCIE_SPEED_16_0GT : \ + lnkcap_sls == PCI_EXP_LNKCAP_SLS_8_0GB ? PCIE_SPEED_8_0GT : \ + lnkcap_sls == PCI_EXP_LNKCAP_SLS_5_0GB ? PCIE_SPEED_5_0GT : \ + lnkcap_sls == PCI_EXP_LNKCAP_SLS_2_5GB ? PCIE_SPEED_2_5GT : \ PCI_SPEED_UNKNOWN); \ }) @@ -411,13 +410,17 @@ void pci_bus_put(struct pci_bus *bus); PCI_SPEED_UNKNOWN) #define PCIE_LNKCTL2_TLS2SPEED(lnkctl2) \ - ((lnkctl2) == PCI_EXP_LNKCTL2_TLS_64_0GT ? PCIE_SPEED_64_0GT : \ - (lnkctl2) == PCI_EXP_LNKCTL2_TLS_32_0GT ? PCIE_SPEED_32_0GT : \ - (lnkctl2) == PCI_EXP_LNKCTL2_TLS_16_0GT ? PCIE_SPEED_16_0GT : \ - (lnkctl2) == PCI_EXP_LNKCTL2_TLS_8_0GT ? PCIE_SPEED_8_0GT : \ - (lnkctl2) == PCI_EXP_LNKCTL2_TLS_5_0GT ? PCIE_SPEED_5_0GT : \ - (lnkctl2) == PCI_EXP_LNKCTL2_TLS_2_5GT ? PCIE_SPEED_2_5GT : \ - PCI_SPEED_UNKNOWN) +({ \ + u16 lnkctl2_tls = (lnkctl2) & PCI_EXP_LNKCTL2_TLS; \ + \ + (lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_64_0GT ? PCIE_SPEED_64_0GT : \ + lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_32_0GT ? PCIE_SPEED_32_0GT : \ + lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_16_0GT ? PCIE_SPEED_16_0GT : \ + lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_8_0GT ? PCIE_SPEED_8_0GT : \ + lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_5_0GT ? PCIE_SPEED_5_0GT : \ + lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_2_5GT ? PCIE_SPEED_2_5GT : \ + PCI_SPEED_UNKNOWN); \ +}) /* PCIe speed to Mb/s reduced by encoding overhead */ #define PCIE_SPEED2MBS_ENC(speed) \ @@ -486,6 +489,7 @@ struct pci_sriov { u16 subsystem_vendor; /* VF subsystem vendor */ u16 subsystem_device; /* VF subsystem device */ resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ + u16 vf_rebar_cap; /* VF Resizable BAR capability offset */ bool drivers_autoprobe; /* Auto probing of VFs by driver */ }; @@ -710,10 +714,28 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno); resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); void pci_restore_iov_state(struct pci_dev *dev); int pci_iov_bus_range(struct pci_bus *bus); +void pci_iov_resource_set_size(struct pci_dev *dev, int resno, + resource_size_t size); +bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev); +static inline u16 pci_iov_vf_rebar_cap(struct pci_dev *dev) +{ + if (!dev->is_physfn) + return 0; + + return dev->sriov->vf_rebar_cap; +} static inline bool pci_resource_is_iov(int resno) { return resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END; } +static inline int pci_resource_num_from_vf_bar(int resno) +{ + return resno + PCI_IOV_RESOURCES; +} +static inline int pci_resource_num_to_vf_bar(int resno) +{ + return resno - PCI_IOV_RESOURCES; +} extern const struct attribute_group sriov_pf_dev_attr_group; extern const struct attribute_group sriov_vf_dev_attr_group; #else @@ -734,10 +756,30 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) { return 0; } +static inline void pci_iov_resource_set_size(struct pci_dev *dev, int resno, + resource_size_t size) { } +static inline bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev) +{ + return false; +} +static inline u16 pci_iov_vf_rebar_cap(struct pci_dev *dev) +{ + return 0; +} static inline bool pci_resource_is_iov(int resno) { return false; } +static inline int pci_resource_num_from_vf_bar(int resno) +{ + WARN_ON_ONCE(1); + return -ENODEV; +} +static inline int pci_resource_num_to_vf_bar(int resno) +{ + WARN_ON_ONCE(1); + return -ENODEV; +} #endif /* CONFIG_PCI_IOV */ #ifdef CONFIG_PCIE_TPH diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 70ac66188367..e286c197d716 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -116,12 +116,12 @@ struct aer_info { PCI_ERR_ROOT_MULTI_COR_RCV | \ PCI_ERR_ROOT_MULTI_UNCOR_RCV) -static int pcie_aer_disable; +static bool pcie_aer_disable; static pci_ers_result_t aer_root_reset(struct pci_dev *dev); void pci_no_aer(void) { - pcie_aer_disable = 1; + pcie_aer_disable = true; } bool pci_aer_available(void) @@ -1039,7 +1039,8 @@ static int find_device_iter(struct pci_dev *dev, void *data) /* List this device */ if (add_error_device(e_info, dev)) { /* We cannot handle more... Stop iteration */ - /* TODO: Should print error message here? */ + pci_err(dev, "Exceeded max supported (%d) devices with errors logged\n", + AER_MAX_MULTI_ERR_DEVICES); return 1; } diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 29fcb0689a91..919a05b97647 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -245,7 +245,7 @@ struct pcie_link_state { u32 clkpm_disable:1; /* Clock PM disabled */ }; -static int aspm_disabled, aspm_force; +static bool aspm_disabled, aspm_force; static bool aspm_support_enabled = true; static DEFINE_MUTEX(aspm_lock); static LIST_HEAD(link_list); @@ -884,10 +884,9 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) /* Configure the ASPM L1 substates. Caller must disable L1 first. */ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) { - u32 val; + u32 val = 0; struct pci_dev *child = link->downstream, *parent = link->pdev; - val = 0; if (state & PCIE_LINK_STATE_L1_1) val |= PCI_L1SS_CTL1_ASPM_L1_1; if (state & PCIE_LINK_STATE_L1_2) @@ -1712,11 +1711,11 @@ static int __init pcie_aspm_disable(char *str) { if (!strcmp(str, "off")) { aspm_policy = POLICY_DEFAULT; - aspm_disabled = 1; + aspm_disabled = true; aspm_support_enabled = false; pr_info("PCIe ASPM is disabled\n"); } else if (!strcmp(str, "force")) { - aspm_force = 1; + aspm_force = true; pr_info("PCIe ASPM is forcibly enabled\n"); } return 1; @@ -1734,7 +1733,7 @@ void pcie_no_aspm(void) */ if (!aspm_force) { aspm_policy = POLICY_DEFAULT; - aspm_disabled = 1; + aspm_disabled = true; } } diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c index e8318fd5f6ed..d1b68c18444f 100644 --- a/drivers/pci/pcie/portdrv.c +++ b/drivers/pci/pcie/portdrv.c @@ -220,7 +220,7 @@ static int get_port_device_capability(struct pci_dev *dev) struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); int services = 0; - if (dev->is_hotplug_bridge && + if (dev->is_pciehp && (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) && (pcie_ports_native || host->native_pcie_hotplug)) { diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c index 4bd73f038ffb..65e4b008be00 100644 --- a/drivers/pci/pcie/ptm.c +++ b/drivers/pci/pcie/ptm.c @@ -507,7 +507,7 @@ struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata, if (!ops->check_capability) return NULL; - /* Check for PTM capability before creating debugfs attrbutes */ + /* Check for PTM capability before creating debugfs attributes */ ret = ops->check_capability(pdata); if (!ret) { dev_dbg(dev, "PTM capability not present\n"); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e6a34db77826..f41128f91ca7 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1678,7 +1678,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev) pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32); if (reg32 & PCI_EXP_SLTCAP_HPC) - pdev->is_hotplug_bridge = 1; + pdev->is_hotplug_bridge = pdev->is_pciehp = 1; } static void set_pcie_thunderbolt(struct pci_dev *dev) @@ -2602,6 +2602,15 @@ void pcie_report_downtraining(struct pci_dev *dev) __pcie_print_link_status(dev, false); } +static void pci_imm_ready_init(struct pci_dev *dev) +{ + u16 status; + + pci_read_config_word(dev, PCI_STATUS, &status); + if (status & PCI_STATUS_IMM_READY) + dev->imm_ready = 1; +} + static void pci_init_capabilities(struct pci_dev *dev) { pci_ea_init(dev); /* Enhanced Allocation */ @@ -2611,6 +2620,7 @@ static void pci_init_capabilities(struct pci_dev *dev) /* Buffers for saving PCIe and PCI-X capabilities */ pci_allocate_cap_save_buffers(dev); + pci_imm_ready_init(dev); /* Immediate Readiness */ pci_pm_init(dev); /* Power Management */ pci_vpd_init(dev); /* Vital Product Data */ pci_configure_ari(dev); /* Alternative Routing-ID Forwarding */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index cf483d82572c..d97335a40193 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -105,13 +105,13 @@ int pcie_failed_link_retrain(struct pci_dev *dev) !pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting) return ret; - pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2); pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); if (!(lnksta & PCI_EXP_LNKSTA_DLLLA) && pcie_lbms_seen(dev, lnksta)) { - u16 oldlnkctl2 = lnkctl2; + u16 oldlnkctl2; pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n"); + pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &oldlnkctl2); ret = pcie_set_target_speed(dev, PCIE_SPEED_2_5GT, false); if (ret) { pci_info(dev, "retraining failed\n"); @@ -123,6 +123,8 @@ int pcie_failed_link_retrain(struct pci_dev *dev) pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); } + pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2); + if ((lnksta & PCI_EXP_LNKSTA_DLLLA) && (lnkctl2 & PCI_EXP_LNKCTL2_TLS) == PCI_EXP_LNKCTL2_TLS_2_5GT && pci_match_id(ids, dev)) { diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 07c3d021a47e..7853ac6999e2 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1888,7 +1888,8 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data) bool *unassigned = data; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - struct resource *r = &dev->resource[i + PCI_IOV_RESOURCES]; + int idx = pci_resource_num_from_vf_bar(i); + struct resource *r = &dev->resource[idx]; struct pci_bus_region region; /* Not assigned or rejected by kernel? */ diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index c6657cdd06f6..d2b3ed51e880 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -423,13 +423,39 @@ void pci_release_resource(struct pci_dev *dev, int resno) } EXPORT_SYMBOL(pci_release_resource); +static bool pci_resize_is_memory_decoding_enabled(struct pci_dev *dev, + int resno) +{ + u16 cmd; + + if (pci_resource_is_iov(resno)) + return pci_iov_is_memory_decoding_enabled(dev); + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + + return cmd & PCI_COMMAND_MEMORY; +} + +static void pci_resize_resource_set_size(struct pci_dev *dev, int resno, + int size) +{ + resource_size_t res_size = pci_rebar_size_to_bytes(size); + struct resource *res = pci_resource_n(dev, resno); + + if (!pci_resource_is_iov(resno)) { + resource_set_size(res, res_size); + } else { + resource_set_size(res, res_size * pci_sriov_get_totalvfs(dev)); + pci_iov_resource_set_size(dev, resno, res_size); + } +} + int pci_resize_resource(struct pci_dev *dev, int resno, int size) { struct resource *res = pci_resource_n(dev, resno); struct pci_host_bridge *host; int old, ret; u32 sizes; - u16 cmd; /* Check if we must preserve the firmware's resource assignment */ host = pci_find_host_bridge(dev->bus); @@ -440,8 +466,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size) if (!(res->flags & IORESOURCE_UNSET)) return -EBUSY; - pci_read_config_word(dev, PCI_COMMAND, &cmd); - if (cmd & PCI_COMMAND_MEMORY) + if (pci_resize_is_memory_decoding_enabled(dev, resno)) return -EBUSY; sizes = pci_rebar_get_possible_sizes(dev, resno); @@ -459,7 +484,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size) if (ret) return ret; - resource_set_size(res, pci_rebar_size_to_bytes(size)); + pci_resize_resource_set_size(dev, resno, size); /* Check if the new config works by trying to assign everything. */ if (dev->bus->self) { @@ -471,7 +496,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size) error_resize: pci_rebar_set_size(dev, resno, old); - resource_set_size(res, pci_rebar_size_to_bytes(old)); + pci_resize_resource_set_size(dev, resno, old); return ret; } EXPORT_SYMBOL(pci_resize_resource); diff --git a/drivers/phy/broadcom/phy-bcm-ns2-pcie.c b/drivers/phy/broadcom/phy-bcm-ns2-pcie.c index 2eaa41f8fc70..67a6ae5ecba0 100644 --- a/drivers/phy/broadcom/phy-bcm-ns2-pcie.c +++ b/drivers/phy/broadcom/phy-bcm-ns2-pcie.c @@ -61,8 +61,6 @@ static int ns2_pci_phy_probe(struct mdio_device *mdiodev) return PTR_ERR(provider); } - dev_info(dev, "%s PHY registered\n", dev_name(dev)); - return 0; } diff --git a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c index 36ad02c33ac5..8473fa574529 100644 --- a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c +++ b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c @@ -395,7 +395,6 @@ static int ns2_drd_phy_probe(struct platform_device *pdev) platform_set_drvdata(pdev, driver); - dev_info(dev, "Registered NS2 DRD Phy device\n"); queue_delayed_work(system_power_efficient_wq, &driver->wq_extcon, driver->debounce_jiffies); diff --git a/drivers/phy/broadcom/phy-bcm-sr-pcie.c b/drivers/phy/broadcom/phy-bcm-sr-pcie.c index ff9b3862bf7a..706e1d83b4ce 100644 --- a/drivers/phy/broadcom/phy-bcm-sr-pcie.c +++ b/drivers/phy/broadcom/phy-bcm-sr-pcie.c @@ -277,8 +277,6 @@ static int sr_pcie_phy_probe(struct platform_device *pdev) return PTR_ERR(provider); } - dev_info(dev, "Stingray PCIe PHY driver initialized\n"); - return 0; } diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c index 228100357054..d52dd065e862 100644 --- a/drivers/phy/broadcom/phy-brcm-sata.c +++ b/drivers/phy/broadcom/phy-brcm-sata.c @@ -832,7 +832,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) return PTR_ERR(provider); } - dev_info(dev, "registered %d port(s)\n", count); + dev_dbg(dev, "registered %d port(s)\n", count); return 0; } diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c index 45a5c00843bf..74613382ccb0 100644 --- a/drivers/phy/cadence/phy-cadence-sierra.c +++ b/drivers/phy/cadence/phy-cadence-sierra.c @@ -58,8 +58,11 @@ #define SIERRA_CMN_PLLLC1_GEN_PREG 0xC2 #define SIERRA_CMN_PLLLC1_FBDIV_INT_PREG 0xC3 #define SIERRA_CMN_PLLLC1_DCOCAL_CTRL_PREG 0xC5 +#define SIERRA_CMN_PLLLC1_MODE_PREG 0xC8 +#define SIERRA_CMN_PLLLC1_LF_COEFF_MODE1_PREG 0xC9 #define SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG 0xCA #define SIERRA_CMN_PLLLC1_CLK0_PREG 0xCE +#define SIERRA_CMN_PLLLC1_BWCAL_MODE1_PREG 0xCF #define SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG 0xD0 #define SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG 0xE2 @@ -1541,6 +1544,137 @@ static void cdns_sierra_phy_remove(struct platform_device *pdev) cdns_sierra_clk_unregister(phy); } +/* USB refclk 100MHz, 20b, SuperSpeed opt2, ext ssc, PLL LC1, multilink */ +static const struct cdns_reg_pairs usb_100_ext_ssc_plllc1_cmn_regs[] = { + {0x002D, SIERRA_CMN_PLLLC1_FBDIV_INT_PREG}, + {0x2086, SIERRA_CMN_PLLLC1_LF_COEFF_MODE1_PREG}, + {0x2086, SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG}, + {0x1005, SIERRA_CMN_PLLLC1_CLK0_PREG}, + {0x0000, SIERRA_CMN_PLLLC1_BWCAL_MODE1_PREG}, + {0x0000, SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG}, + {0x0000, SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG} +}; + +/* USB refclk 100MHz, 20b, SuperSpeed opt2, int ssc, PLL LC1, multilink */ +static const struct cdns_reg_pairs usb_100_int_ssc_plllc1_cmn_regs[] = { + {0x002D, SIERRA_CMN_PLLLC1_FBDIV_INT_PREG}, + {0x000E, SIERRA_CMN_PLLLC1_MODE_PREG}, + {0x1005, SIERRA_CMN_PLLLC1_CLK0_PREG} +}; + +static const struct cdns_reg_pairs usb_100_ml_ln_regs[] = { + {0xFE0A, SIERRA_DET_STANDEC_A_PREG}, + {0x000F, SIERRA_DET_STANDEC_B_PREG}, + {0x55A5, SIERRA_DET_STANDEC_C_PREG}, + {0x69AD, SIERRA_DET_STANDEC_D_PREG}, + {0x0241, SIERRA_DET_STANDEC_E_PREG}, + {0x0010, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG}, + {0x0014, SIERRA_PSM_A0IN_TMR_PREG}, + {0x001D, SIERRA_PSM_A3IN_TMR_PREG}, + {0x0004, SIERRA_PSC_LN_A3_PREG}, + {0x0004, SIERRA_PSC_LN_IDLE_PREG}, + {0x001F, SIERRA_PSC_TX_A0_PREG}, + {0x0007, SIERRA_PSC_TX_A1_PREG}, + {0x0003, SIERRA_PSC_TX_A2_PREG}, + {0x0003, SIERRA_PSC_TX_A3_PREG}, + {0x0FFF, SIERRA_PSC_RX_A0_PREG}, + {0x0619, SIERRA_PSC_RX_A1_PREG}, + {0x0003, SIERRA_PSC_RX_A2_PREG}, + {0x0001, SIERRA_PSC_RX_A3_PREG}, + {0x0606, SIERRA_PLLCTRL_FBDIV_MODE01_PREG}, + {0x0001, SIERRA_PLLCTRL_SUBRATE_PREG}, + {0x0003, SIERRA_PLLCTRL_GEN_A_PREG}, + {0x0406, SIERRA_PLLCTRL_GEN_D_PREG}, + {0x5211, SIERRA_PLLCTRL_CPGAIN_MODE_PREG}, + {0x00CA, SIERRA_CLKPATH_BIASTRIM_PREG}, + {0x2512, SIERRA_DFE_BIASTRIM_PREG}, + {0x0000, SIERRA_DRVCTRL_ATTEN_PREG}, + {0x823E, SIERRA_CLKPATHCTRL_TMR_PREG}, + {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG}, + {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG}, + {0x7B3C, SIERRA_CREQ_CCLKDET_MODE01_PREG}, + {0x023F, SIERRA_RX_CTLE_MAINTENANCE_PREG}, + {0x3232, SIERRA_CREQ_FSMCLK_SEL_PREG}, + {0x0000, SIERRA_CREQ_EQ_CTRL_PREG}, + {0xCC44, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}, + {0x8452, SIERRA_CTLELUT_CTRL_PREG}, + {0x4121, SIERRA_DFE_ECMP_RATESEL_PREG}, + {0x4121, SIERRA_DFE_SMP_RATESEL_PREG}, + {0x0002, SIERRA_DEQ_PHALIGN_CTRL}, + {0x3200, SIERRA_DEQ_CONCUR_CTRL1_PREG}, + {0x5064, SIERRA_DEQ_CONCUR_CTRL2_PREG}, + {0x0030, SIERRA_DEQ_EPIPWR_CTRL2_PREG}, + {0x5A5A, SIERRA_DEQ_ERRCMP_CTRL_PREG}, + {0x02F5, SIERRA_DEQ_OFFSET_CTRL_PREG}, + {0x02F5, SIERRA_DEQ_GAIN_CTRL_PREG}, + {0xA9A9, SIERRA_DEQ_VGATUNE_CTRL_PREG}, + {0x0014, SIERRA_DEQ_GLUT0}, + {0x0014, SIERRA_DEQ_GLUT1}, + {0x0014, SIERRA_DEQ_GLUT2}, + {0x0014, SIERRA_DEQ_GLUT3}, + {0x0014, SIERRA_DEQ_GLUT4}, + {0x0014, SIERRA_DEQ_GLUT5}, + {0x0014, SIERRA_DEQ_GLUT6}, + {0x0014, SIERRA_DEQ_GLUT7}, + {0x0014, SIERRA_DEQ_GLUT8}, + {0x0014, SIERRA_DEQ_GLUT9}, + {0x0014, SIERRA_DEQ_GLUT10}, + {0x0014, SIERRA_DEQ_GLUT11}, + {0x0014, SIERRA_DEQ_GLUT12}, + {0x0014, SIERRA_DEQ_GLUT13}, + {0x0014, SIERRA_DEQ_GLUT14}, + {0x0014, SIERRA_DEQ_GLUT15}, + {0x0014, SIERRA_DEQ_GLUT16}, + {0x0BAE, SIERRA_DEQ_ALUT0}, + {0x0AEB, SIERRA_DEQ_ALUT1}, + {0x0A28, SIERRA_DEQ_ALUT2}, + {0x0965, SIERRA_DEQ_ALUT3}, + {0x08A2, SIERRA_DEQ_ALUT4}, + {0x07DF, SIERRA_DEQ_ALUT5}, + {0x071C, SIERRA_DEQ_ALUT6}, + {0x0659, SIERRA_DEQ_ALUT7}, + {0x0596, SIERRA_DEQ_ALUT8}, + {0x0514, SIERRA_DEQ_ALUT9}, + {0x0492, SIERRA_DEQ_ALUT10}, + {0x0410, SIERRA_DEQ_ALUT11}, + {0x038E, SIERRA_DEQ_ALUT12}, + {0x030C, SIERRA_DEQ_ALUT13}, + {0x03F4, SIERRA_DEQ_DFETAP_CTRL_PREG}, + {0x0001, SIERRA_DFE_EN_1010_IGNORE_PREG}, + {0x3C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG}, + {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG}, + {0x1C08, SIERRA_DEQ_TAU_CTRL2_PREG}, + {0x0033, SIERRA_DEQ_PICTRL_PREG}, + {0x0330, SIERRA_CPICAL_TMRVAL_MODE0_PREG}, + {0x01FF, SIERRA_CPICAL_PICNT_MODE1_PREG}, + {0x0009, SIERRA_CPI_OUTBUF_RATESEL_PREG}, + {0x3232, SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG}, + {0x0005, SIERRA_LFPSDET_SUPPORT_PREG}, + {0x000F, SIERRA_LFPSFILT_NS_PREG}, + {0x0009, SIERRA_LFPSFILT_RD_PREG}, + {0x0001, SIERRA_LFPSFILT_MP_PREG}, + {0x8013, SIERRA_SDFILT_H2L_A_PREG}, + {0x8009, SIERRA_SDFILT_L2H_PREG}, + {0x0024, SIERRA_RXBUFFER_CTLECTRL_PREG}, + {0x0020, SIERRA_RXBUFFER_RCDFECTRL_PREG}, + {0x4243, SIERRA_RXBUFFER_DFECTRL_PREG} +}; + +static const struct cdns_sierra_vals usb_100_ext_ssc_plllc1_cmn_vals = { + .reg_pairs = usb_100_ext_ssc_plllc1_cmn_regs, + .num_regs = ARRAY_SIZE(usb_100_ext_ssc_plllc1_cmn_regs), +}; + +static const struct cdns_sierra_vals usb_100_int_ssc_plllc1_cmn_vals = { + .reg_pairs = usb_100_int_ssc_plllc1_cmn_regs, + .num_regs = ARRAY_SIZE(usb_100_int_ssc_plllc1_cmn_regs), +}; + +static const struct cdns_sierra_vals usb_100_ml_ln_vals = { + .reg_pairs = usb_100_ml_ln_regs, + .num_regs = ARRAY_SIZE(usb_100_ml_ln_regs), +}; + /* SGMII PHY PMA lane configuration */ static const struct cdns_reg_pairs sgmii_phy_pma_ln_regs[] = { {0x9010, SIERRA_PHY_PMA_XCVR_CTRL} @@ -2513,6 +2647,11 @@ static const struct cdns_sierra_data cdns_map_sierra = { [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, }, + [TYPE_USB] = { + [NO_SSC] = &pcie_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, + }, }, }, .pma_cmn_vals = { @@ -2532,11 +2671,20 @@ static const struct cdns_sierra_data cdns_map_sierra = { [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals, [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals, }, + [TYPE_USB] = { + [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals, + }, }, [TYPE_USB] = { [TYPE_NONE] = { [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals, }, + [TYPE_PCIE] = { + [EXTERNAL_SSC] = &usb_100_ext_ssc_plllc1_cmn_vals, + [INTERNAL_SSC] = &usb_100_int_ssc_plllc1_cmn_vals, + }, }, [TYPE_SGMII] = { [TYPE_NONE] = { @@ -2573,11 +2721,20 @@ static const struct cdns_sierra_data cdns_map_sierra = { [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals, [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals, }, + [TYPE_USB] = { + [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals, + [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals, + [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals, + }, }, [TYPE_USB] = { [TYPE_NONE] = { [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals, }, + [TYPE_PCIE] = { + [EXTERNAL_SSC] = &usb_100_ml_ln_vals, + [INTERNAL_SSC] = &usb_100_ml_ln_vals, + }, }, [TYPE_SGMII] = { [TYPE_NONE] = { @@ -2620,6 +2777,11 @@ static const struct cdns_sierra_data cdns_ti_map_sierra = { [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, }, + [TYPE_USB] = { + [NO_SSC] = &pcie_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals, + }, }, }, .phy_pma_ln_vals = { @@ -2655,11 +2817,20 @@ static const struct cdns_sierra_data cdns_ti_map_sierra = { [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals, [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals, }, + [TYPE_USB] = { + [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals, + }, }, [TYPE_USB] = { [TYPE_NONE] = { [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals, }, + [TYPE_PCIE] = { + [EXTERNAL_SSC] = &usb_100_ext_ssc_plllc1_cmn_vals, + [INTERNAL_SSC] = &usb_100_int_ssc_plllc1_cmn_vals, + }, }, [TYPE_SGMII] = { [TYPE_PCIE] = { @@ -2693,11 +2864,20 @@ static const struct cdns_sierra_data cdns_ti_map_sierra = { [EXTERNAL_SSC] = &ti_ml_pcie_100_ext_ssc_ln_vals, [INTERNAL_SSC] = &ti_ml_pcie_100_int_ssc_ln_vals, }, + [TYPE_USB] = { + [NO_SSC] = &ti_ml_pcie_100_no_ssc_ln_vals, + [EXTERNAL_SSC] = &ti_ml_pcie_100_ext_ssc_ln_vals, + [INTERNAL_SSC] = &ti_ml_pcie_100_int_ssc_ln_vals, + }, }, [TYPE_USB] = { [TYPE_NONE] = { [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals, }, + [TYPE_PCIE] = { + [EXTERNAL_SSC] = &usb_100_ml_ln_vals, + [INTERNAL_SSC] = &usb_100_ml_ln_vals, + }, }, [TYPE_SGMII] = { [TYPE_PCIE] = { diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c index a281c0dfae97..37fa4bad6bd7 100644 --- a/drivers/phy/cadence/phy-cadence-torrent.c +++ b/drivers/phy/cadence/phy-cadence-torrent.c @@ -197,6 +197,7 @@ #define RX_SDCAL1_INIT_TMR 0x004CU #define RX_SDCAL1_ITER_TMR 0x004DU #define RX_CDRLF_CNFG 0x0080U +#define RX_CDRLF_CNFG2 0x0081U #define RX_CDRLF_CNFG3 0x0082U #define RX_SIGDET_HL_FILT_TMR 0x0090U #define RX_REE_GCSM1_CTRL 0x0108U @@ -204,6 +205,8 @@ #define RX_REE_GCSM1_EQENM_PH2 0x010AU #define RX_REE_GCSM2_CTRL 0x0110U #define RX_REE_PERGCSM_CTRL 0x0118U +#define RX_REE_PEAK_UTHR 0x0142U +#define RX_REE_PEAK_LTHR 0x0143U #define RX_REE_ATTEN_THR 0x0149U #define RX_REE_TAP1_CLIP 0x0171U #define RX_REE_TAP2TON_CLIP 0x0172U @@ -212,6 +215,7 @@ #define RX_DIAG_DFE_CTRL 0x01E0U #define RX_DIAG_DFE_AMP_TUNE_2 0x01E2U #define RX_DIAG_DFE_AMP_TUNE_3 0x01E3U +#define RX_DIAG_REE_DAC_CTRL 0x01E4U #define RX_DIAG_NQST_CTRL 0x01E5U #define RX_DIAG_SIGDET_TUNE 0x01E8U #define RX_DIAG_PI_RATE 0x01F4U @@ -295,6 +299,7 @@ enum cdns_torrent_phy_type { TYPE_QSGMII, TYPE_USB, TYPE_USXGMII, + TYPE_PCIE_ML, }; enum cdns_torrent_ref_clk { @@ -693,6 +698,7 @@ static const char *cdns_torrent_get_phy_type(enum cdns_torrent_phy_type phy_type case TYPE_DP: return "DisplayPort"; case TYPE_PCIE: + case TYPE_PCIE_ML: return "PCIe"; case TYPE_SGMII: return "SGMII"; @@ -2478,6 +2484,7 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy) enum cdns_torrent_ssc_mode ssc; struct regmap *regmap; u32 num_regs, num_protocols, protocol; + u32 num_pcie_links = 0; num_protocols = hweight32(cdns_phy->protocol_bitmask); /* Maximum 2 protocols are supported */ @@ -2510,6 +2517,44 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy) phy_t1 = fns(cdns_phy->protocol_bitmask, 0); phy_t2 = fns(cdns_phy->protocol_bitmask, 1); + + /* + * PCIe Multilink configuration can be supported along with a + * non-PCIe protocol. The existing limitation associated with + * the standalone PCIe Multilink configuration still remains, + * implying that there can be only two links (subnodes) of the + * PHY type PCIe which constitute the PCIe Multilink. + * + * Such configurations are handled by introducing a new protocol + * namely TYPE_PCIE_ML. Both of the PCIe links which have the + * protocol as TYPE_PCIE shall be treated as though the protocol + * corresponding to them is TYPE_PCIE_ML only for the sake of + * configuring the SERDES. + * + * PCIe Multilink configuration can be identified by checking if + * there are exactly two links with phy_type set to TYPE_PCIE. + * phy_t1 and phy_t2 are modified in such cases to support the + * PCIe Multilink configuration with a non-PCIe protocol. + */ + for (node = 0; node < cdns_phy->nsubnodes; node++) { + if (cdns_phy->phys[node].phy_type == TYPE_PCIE) + num_pcie_links++; + } + + if (num_pcie_links > 2) { + dev_err(dev, "cannot support PCIe Multilink with %u PCIe links\n", + num_pcie_links); + return -EINVAL; + } else if (num_pcie_links == 2) { + phy_t1 = TYPE_PCIE_ML; + for (node = 0; node < cdns_phy->nsubnodes; node++) { + if (cdns_phy->phys[node].phy_type == TYPE_PCIE) { + cdns_phy->phys[node].phy_type = TYPE_PCIE_ML; + continue; + } + phy_t2 = cdns_phy->phys[node].phy_type; + } + } } /** @@ -2676,6 +2721,11 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy) } } + /* Restore TYPE_PCIE_ML to TYPE_PCIE to be compatible with suspend-resume */ + for (node = 0; node < cdns_phy->nsubnodes; node++) + if (cdns_phy->phys[node].phy_type == TYPE_PCIE_ML) + cdns_phy->phys[node].phy_type = TYPE_PCIE; + /* Take the PHY out of reset */ ret = reset_control_deassert(cdns_phy->phy_rst); if (ret) @@ -3088,15 +3138,14 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) } if (cdns_phy->nsubnodes > 1) - dev_dbg(dev, "Multi-link: %s (%d lanes) & %s (%d lanes)", - cdns_torrent_get_phy_type(cdns_phy->phys[0].phy_type), - cdns_phy->phys[0].num_lanes, - cdns_torrent_get_phy_type(cdns_phy->phys[1].phy_type), - cdns_phy->phys[1].num_lanes); + dev_dbg(dev, "Multi link configuration:\n"); else - dev_dbg(dev, "Single link: %s (%d lanes)", - cdns_torrent_get_phy_type(cdns_phy->phys[0].phy_type), - cdns_phy->phys[0].num_lanes); + dev_dbg(dev, "Single link configuration:\n"); + + for (i = 0; i < cdns_phy->nsubnodes; i++) + dev_dbg(dev, "%s (%d lanes)", + cdns_torrent_get_phy_type(cdns_phy->phys[i].phy_type), + cdns_phy->phys[i].num_lanes); return 0; @@ -3131,6 +3180,132 @@ static void cdns_torrent_phy_remove(struct platform_device *pdev) cdns_torrent_clk_cleanup(cdns_phy); } +/* Multilink PCIe and USB Same SSC link configuration */ +static const struct cdns_reg_pairs ml_pcie_usb_link_cmn_regs[] = { + {0x0002, PHY_PLL_CFG}, + {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0} +}; + +static const struct cdns_reg_pairs ml_pcie_usb_xcvr_diag_ln_regs[] = { + {0x0100, XCVR_DIAG_HSCLK_SEL}, + {0x0013, XCVR_DIAG_HSCLK_DIV}, + {0x0812, XCVR_DIAG_PLLDRC_CTRL} +}; + +static const struct cdns_reg_pairs usb_ml_pcie_xcvr_diag_ln_regs[] = { + {0x0041, XCVR_DIAG_PLLDRC_CTRL}, +}; + +static const struct cdns_torrent_vals ml_pcie_usb_link_cmn_vals = { + .reg_pairs = ml_pcie_usb_link_cmn_regs, + .num_regs = ARRAY_SIZE(ml_pcie_usb_link_cmn_regs), +}; + +static const struct cdns_torrent_vals ml_pcie_usb_xcvr_diag_ln_vals = { + .reg_pairs = ml_pcie_usb_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(ml_pcie_usb_xcvr_diag_ln_regs), +}; + +static const struct cdns_torrent_vals usb_ml_pcie_xcvr_diag_ln_vals = { + .reg_pairs = usb_ml_pcie_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(usb_ml_pcie_xcvr_diag_ln_regs), +}; + +/* Multi link PCIe configuration */ +static const struct cdns_reg_pairs ml_pcie_link_cmn_regs[] = { + {0x0002, PHY_PLL_CFG}, + {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0} +}; + +static const struct cdns_reg_pairs ml_pcie_xcvr_diag_ln_regs[] = { + {0x0100, XCVR_DIAG_HSCLK_SEL}, + {0x0001, XCVR_DIAG_HSCLK_DIV}, + {0x0812, XCVR_DIAG_PLLDRC_CTRL} +}; + +static const struct cdns_torrent_vals ml_pcie_link_cmn_vals = { + .reg_pairs = ml_pcie_link_cmn_regs, + .num_regs = ARRAY_SIZE(ml_pcie_link_cmn_regs), +}; + +static const struct cdns_torrent_vals ml_pcie_xcvr_diag_ln_vals = { + .reg_pairs = ml_pcie_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(ml_pcie_xcvr_diag_ln_regs), +}; + +/* Multi link PCIe, 100 MHz Ref clk, no SSC */ +static const struct cdns_reg_pairs ml_pcie_100_no_ssc_cmn_regs[] = { + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL} +}; + +static const struct cdns_reg_pairs ml_pcie_100_no_ssc_rx_ln_regs[] = { + {0x0019, RX_REE_TAP1_CLIP}, + {0x0019, RX_REE_TAP2TON_CLIP}, + {0x0008, RX_REE_PEAK_UTHR}, + {0x018E, RX_CDRLF_CNFG}, + {0x2E33, RX_CDRLF_CNFG2}, + {0x0001, RX_DIAG_ACYA}, + {0x0C21, RX_DIAG_DFE_AMP_TUNE_2}, + {0x0002, RX_DIAG_DFE_AMP_TUNE_3}, + {0x0005, RX_DIAG_REE_DAC_CTRL} +}; + +static const struct cdns_torrent_vals ml_pcie_100_no_ssc_cmn_vals = { + .reg_pairs = ml_pcie_100_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_cmn_regs), +}; + +static const struct cdns_torrent_vals ml_pcie_100_no_ssc_rx_ln_vals = { + .reg_pairs = ml_pcie_100_no_ssc_rx_ln_regs, + .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_rx_ln_regs), +}; + +/* Multi link PCIe, 100 MHz Ref clk, internal SSC */ +static const struct cdns_reg_pairs ml_pcie_100_int_ssc_cmn_regs[] = { + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0064, CMN_PLL0_INTDIV_M0}, + {0x0050, CMN_PLL1_INTDIV_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M0}, + {0x0002, CMN_PLL1_FRACDIVH_M0}, + {0x0044, CMN_PLL0_HIGH_THR_M0}, + {0x0036, CMN_PLL1_HIGH_THR_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M0}, + {0x0002, CMN_PDIAG_PLL1_CTRL_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M0}, + {0x0001, CMN_PLL1_SS_CTRL1_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M0}, + {0x011B, CMN_PLL1_SS_CTRL2_M0}, + {0x006E, CMN_PLL0_SS_CTRL3_M0}, + {0x0058, CMN_PLL1_SS_CTRL3_M0}, + {0x000E, CMN_PLL0_SS_CTRL4_M0}, + {0x0012, CMN_PLL1_SS_CTRL4_M0}, + {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START}, + {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START}, + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL}, + {0x00C7, CMN_PLL0_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL1_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL0_LOCK_PLLCNT_START}, + {0x00C7, CMN_PLL1_LOCK_PLLCNT_START}, + {0x0005, CMN_PLL0_LOCK_PLLCNT_THR}, + {0x0005, CMN_PLL1_LOCK_PLLCNT_THR} +}; + +static const struct cdns_torrent_vals ml_pcie_100_int_ssc_cmn_vals = { + .reg_pairs = ml_pcie_100_int_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_cmn_regs), +}; + /* SGMII and QSGMII link configuration */ static const struct cdns_reg_pairs sgmii_qsgmii_link_cmn_regs[] = { {0x0002, PHY_PLL_CFG} @@ -4042,6 +4217,8 @@ static const struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = { {0x0C02, RX_REE_ATTEN_THR}, {0x0330, RX_REE_SMGM_CTRL1}, {0x0300, RX_REE_SMGM_CTRL2}, + {0x0000, RX_REE_PEAK_UTHR}, + {0x01F5, RX_REE_PEAK_LTHR}, {0x0019, RX_REE_TAP1_CLIP}, {0x0019, RX_REE_TAP2TON_CLIP}, {0x1004, RX_DIAG_SIGDET_TUNE}, @@ -4531,7 +4708,7 @@ static const struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = { .num_regs = ARRAY_SIZE(sl_sgmii_xcvr_diag_ln_regs), }; -/* Multi link PCIe, 100 MHz Ref clk, internal SSC */ +/* For PCIe (with some other protocol), 100 MHz Ref clk, internal SSC */ static const struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = { {0x0004, CMN_PLL0_DSM_DIAG_M0}, {0x0004, CMN_PLL0_DSM_DIAG_M1}, @@ -4670,12 +4847,15 @@ static const struct cdns_torrent_vals_entry link_cmn_vals_entries[] = { {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &usb_dp_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_NONE), NULL}, + {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_PCIE), &ml_pcie_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_SGMII), &pcie_sgmii_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_QSGMII), &pcie_sgmii_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USB), &pcie_usb_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_DP), &pcie_dp_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USXGMII), &pcie_usxgmii_link_cmn_vals}, + {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE_ML, TYPE_USB), &ml_pcie_usb_link_cmn_vals}, + {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_QSGMII), &sgmii_qsgmii_link_cmn_vals}, @@ -4690,6 +4870,7 @@ static const struct cdns_torrent_vals_entry link_cmn_vals_entries[] = { {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &sl_usb_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &pcie_usb_link_cmn_vals}, + {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE_ML), &ml_pcie_usb_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_sgmii_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_sgmii_link_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_dp_link_cmn_vals}, @@ -4706,12 +4887,15 @@ static const struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = { {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &dp_usb_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_NONE), NULL}, + {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_PCIE), &ml_pcie_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_SGMII), &pcie_sgmii_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_QSGMII), &pcie_sgmii_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USB), &pcie_usb_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_DP), &pcie_dp_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USXGMII), &pcie_usxgmii_xcvr_diag_ln_vals}, + {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE_ML, TYPE_USB), &ml_pcie_usb_xcvr_diag_ln_vals}, + {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_QSGMII), &sgmii_qsgmii_xcvr_diag_ln_vals}, @@ -4726,6 +4910,7 @@ static const struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = { {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &sl_usb_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_pcie_xcvr_diag_ln_vals}, + {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE_ML), &usb_ml_pcie_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_sgmii_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_sgmii_xcvr_diag_ln_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_dp_xcvr_diag_ln_vals}, @@ -4739,6 +4924,7 @@ static const struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = { static const struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = { {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &usb_phy_pcs_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_phy_pcs_cmn_vals}, + {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE_ML), &usb_phy_pcs_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_phy_pcs_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_phy_pcs_cmn_vals}, {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_phy_pcs_cmn_vals}, @@ -4756,6 +4942,10 @@ static const struct cdns_torrent_vals_entry cmn_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &sl_pcie_100_int_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, NO_SSC), &ml_pcie_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, EXTERNAL_SSC), &ml_pcie_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, INTERNAL_SSC), &ml_pcie_100_int_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals}, @@ -4770,6 +4960,10 @@ static const struct cdns_torrent_vals_entry cmn_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, NO_SSC), &ml_pcie_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, EXTERNAL_SSC), &ml_pcie_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, INTERNAL_SSC), &ml_pcie_100_int_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_cmn_vals}, @@ -4802,6 +4996,10 @@ static const struct cdns_torrent_vals_entry cmn_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_int_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, NO_SSC), &usb_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, EXTERNAL_SSC), &usb_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, INTERNAL_SSC), &usb_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals}, @@ -4838,6 +5036,10 @@ static const struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, NO_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, EXTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, INTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL}, @@ -4852,6 +5054,10 @@ static const struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, NO_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, EXTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, INTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals}, @@ -4884,6 +5090,10 @@ static const struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, NO_SSC), &usb_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, @@ -4920,6 +5130,10 @@ static const struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, NO_SSC), &ml_pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, EXTERNAL_SSC), &ml_pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, INTERNAL_SSC), &ml_pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals}, @@ -4934,6 +5148,10 @@ static const struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), &pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, NO_SSC), &ml_pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, EXTERNAL_SSC), &ml_pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, INTERNAL_SSC), &ml_pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals}, @@ -4966,6 +5184,10 @@ static const struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, NO_SSC), &usb_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals}, @@ -5038,6 +5260,10 @@ static const struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, NO_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, EXTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, INTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL}, @@ -5052,6 +5278,10 @@ static const struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, NO_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, EXTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, INTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, @@ -5084,6 +5314,10 @@ static const struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, NO_SSC), &usb_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, @@ -5154,6 +5388,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &sl_pcie_100_int_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, NO_SSC), &ml_pcie_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, EXTERNAL_SSC), &ml_pcie_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, INTERNAL_SSC), &ml_pcie_100_int_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals}, @@ -5168,6 +5406,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, NO_SSC), &ml_pcie_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, EXTERNAL_SSC), &ml_pcie_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, INTERNAL_SSC), &ml_pcie_100_int_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_cmn_vals}, @@ -5200,6 +5442,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_cmn_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_int_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, NO_SSC), &usb_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, EXTERNAL_SSC), &usb_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, INTERNAL_SSC), &usb_100_no_ssc_cmn_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals}, @@ -5236,6 +5482,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, NO_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, EXTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, INTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL}, @@ -5250,6 +5500,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, NO_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, EXTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, INTERNAL_SSC), NULL}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals}, @@ -5282,6 +5536,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_tx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, NO_SSC), &usb_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals}, @@ -5318,6 +5576,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, NO_SSC), &pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_PCIE, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals}, @@ -5332,6 +5594,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), &pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, NO_SSC), &pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE_ML, TYPE_USB, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals}, @@ -5364,6 +5630,10 @@ static const struct cdns_torrent_vals_entry ti_j7200_rx_ln_vals_entries[] = { {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, NO_SSC), &usb_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE_ML, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals}, + {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals}, {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals}, diff --git a/drivers/phy/marvell/phy-pxa-usb.c b/drivers/phy/marvell/phy-pxa-usb.c index 6c98eb9608e9..c0bb71f80c04 100644 --- a/drivers/phy/marvell/phy-pxa-usb.c +++ b/drivers/phy/marvell/phy-pxa-usb.c @@ -325,7 +325,6 @@ static int pxa_usb_phy_probe(struct platform_device *pdev) phy_create_lookup(pxa_usb_phy->phy, "usb", "mv-otg"); } - dev_info(dev, "Marvell PXA USB PHY"); return 0; } diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index 644a34bd2b0b..f6504e0ecd1a 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c @@ -210,8 +210,6 @@ #define P2F_USB_FM_VALID BIT(0) #define P2F_RG_FRCK_EN BIT(8) -#define U3P_REF_CLK 26 /* MHZ */ -#define U3P_SLEW_RATE_COEF 28 #define U3P_SR_COEF_DIVISOR 1000 #define U3P_FM_DET_CYCLE_CNT 1024 @@ -277,20 +275,24 @@ enum mtk_phy_version { MTK_PHY_V3, }; +/** + * mtk_phy_pdata - SoC specific platform data + * @avoid_rx_sen_degradation: Avoid TX Sensitivity level degradation (MT6795/8173 only) + * @sw_pll_48m_to_26m: Workaround for V3 IP (MT8195) - switch the 48MHz PLL from + * fractional mode to integer to output 26MHz for U2PHY + * @sw_efuse_supported: Switches off eFuse auto-load from PHY and applies values + * read from different nvmem (usually different eFuse array) + * that is pointed at in the device tree node for this PHY + * @slew_ref_clk_mhz: Default reference clock (in MHz) for slew rate calibration + * @slew_rate_coefficient: Coefficient for slew rate calibration + * @version: PHY IP Version + */ struct mtk_phy_pdata { - /* avoid RX sensitivity level degradation only for mt8173 */ bool avoid_rx_sen_degradation; - /* - * workaround only for mt8195, HW fix it for others of V3, - * u2phy should use integer mode instead of fractional mode of - * 48M PLL, fix it by switching PLL to 26M from default 48M - */ bool sw_pll_48m_to_26m; - /* - * Some SoCs (e.g. mt8195) drop a bit when use auto load efuse, - * support sw way, also support it for v2/v3 optionally. - */ bool sw_efuse_supported; + u8 slew_ref_clock_mhz; + u8 slew_rate_coefficient; enum mtk_phy_version version; }; @@ -686,12 +688,14 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy, int fm_out; u32 tmp; - /* HW V3 doesn't support slew rate cal anymore */ - if (tphy->pdata->version == MTK_PHY_V3) - return; - - /* use force value */ - if (instance->eye_src) + /* + * If a fixed HS slew rate (EYE) value was supplied, don't run the + * calibration sequence and prefer using that value instead; also, + * if there is no reference clock for slew calibration or there is + * no slew coefficient, this means that the slew rate calibration + * sequence is not supported. + */ + if (instance->eye_src || !tphy->src_ref_clk || !tphy->src_coef) return; /* enable USB ring oscillator */ @@ -1516,12 +1520,16 @@ static const struct phy_ops mtk_tphy_ops = { static const struct mtk_phy_pdata tphy_v1_pdata = { .avoid_rx_sen_degradation = false, + .slew_ref_clock_mhz = 26, + .slew_rate_coefficient = 28, .version = MTK_PHY_V1, }; static const struct mtk_phy_pdata tphy_v2_pdata = { .avoid_rx_sen_degradation = false, .sw_efuse_supported = true, + .slew_ref_clock_mhz = 26, + .slew_rate_coefficient = 28, .version = MTK_PHY_V2, }; @@ -1532,6 +1540,8 @@ static const struct mtk_phy_pdata tphy_v3_pdata = { static const struct mtk_phy_pdata mt8173_pdata = { .avoid_rx_sen_degradation = true, + .slew_ref_clock_mhz = 26, + .slew_rate_coefficient = 28, .version = MTK_PHY_V1, }; @@ -1561,7 +1571,7 @@ static int mtk_tphy_probe(struct platform_device *pdev) struct resource *sif_res; struct mtk_tphy *tphy; struct resource res; - int port; + int port, ret; tphy = devm_kzalloc(dev, sizeof(*tphy), GFP_KERNEL); if (!tphy) @@ -1591,15 +1601,14 @@ static int mtk_tphy_probe(struct platform_device *pdev) } } - if (tphy->pdata->version < MTK_PHY_V3) { - tphy->src_ref_clk = U3P_REF_CLK; - tphy->src_coef = U3P_SLEW_RATE_COEF; - /* update parameters of slew rate calibrate if exist */ - device_property_read_u32(dev, "mediatek,src-ref-clk-mhz", - &tphy->src_ref_clk); - device_property_read_u32(dev, "mediatek,src-coef", - &tphy->src_coef); - } + /* Optional properties for slew calibration variation */ + ret = device_property_read_u32(dev, "mediatek,src-ref-clk-mhz", &tphy->src_ref_clk); + if (ret) + tphy->src_ref_clk = tphy->pdata->slew_ref_clock_mhz; + + ret = device_property_read_u32(dev, "mediatek,src-coef", &tphy->src_coef); + if (ret) + tphy->src_coef = tphy->pdata->slew_rate_coefficient; port = 0; for_each_child_of_node_scoped(np, child_np) { diff --git a/drivers/phy/phy-snps-eusb2.c b/drivers/phy/phy-snps-eusb2.c index 751b6d8ba2be..f90bf7e95463 100644 --- a/drivers/phy/phy-snps-eusb2.c +++ b/drivers/phy/phy-snps-eusb2.c @@ -256,7 +256,7 @@ static int exynos_eusb2_ref_clk_init(struct snps_eusb2_hsphy *phy) } if (!config) { - dev_err(&phy->phy->dev, "unsupported ref_clk_freq:%lu\n", ref_clk_freq); + dev_err(&phy->phy->dev, "unsupported ref_clk_freq: %lu\n", ref_clk_freq); return -EINVAL; } @@ -293,7 +293,7 @@ static int qcom_eusb2_ref_clk_init(struct snps_eusb2_hsphy *phy) } if (!config) { - dev_err(&phy->phy->dev, "unsupported ref_clk_freq:%lu\n", ref_clk_freq); + dev_err(&phy->phy->dev, "unsupported ref_clk_freq: %lu\n", ref_clk_freq); return -EINVAL; } @@ -392,7 +392,7 @@ static int qcom_snps_eusb2_hsphy_init(struct phy *p) snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_1, PHY_CFG_PLL_CPBIAS_CNTRL_MASK, - FIELD_PREP(PHY_CFG_PLL_CPBIAS_CNTRL_MASK, 0x1)); + FIELD_PREP(PHY_CFG_PLL_CPBIAS_CNTRL_MASK, 0x0)); snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG_CTRL_4, PHY_CFG_PLL_INT_CNTRL_MASK, @@ -437,6 +437,9 @@ static int qcom_snps_eusb2_hsphy_init(struct phy *p) snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_HS_PHY_CTRL2, USB2_SUSPEND_N_SEL, 0); + snps_eusb2_hsphy_write_mask(phy->base, QCOM_USB_PHY_CFG0, + CMN_CTRL_OVERRIDE_EN, 0); + return 0; } @@ -461,39 +464,40 @@ static int snps_eusb2_hsphy_init(struct phy *p) ret = phy_init(phy->repeater); if (ret) { - dev_err(&p->dev, "repeater init failed. %d\n", ret); + dev_err(&p->dev, "repeater init failed: %d\n", ret); goto disable_vreg; } ret = clk_bulk_prepare_enable(phy->data->num_clks, phy->clks); if (ret) { - dev_err(&p->dev, "failed to enable ref clock, %d\n", ret); - goto disable_vreg; + dev_err(&p->dev, "failed to enable ref clock: %d\n", ret); + goto exit_repeater; } ret = reset_control_assert(phy->phy_reset); if (ret) { - dev_err(&p->dev, "failed to assert phy_reset, %d\n", ret); - goto disable_ref_clk; + dev_err(&p->dev, "failed to assert phy_reset: %d\n", ret); + goto disable_clks; } usleep_range(100, 150); ret = reset_control_deassert(phy->phy_reset); if (ret) { - dev_err(&p->dev, "failed to de-assert phy_reset, %d\n", ret); - goto disable_ref_clk; + dev_err(&p->dev, "failed to de-assert phy_reset: %d\n", ret); + goto disable_clks; } ret = phy->data->phy_init(p); if (ret) - goto disable_ref_clk; + goto disable_clks; return 0; -disable_ref_clk: +disable_clks: clk_bulk_disable_unprepare(phy->data->num_clks, phy->clks); - +exit_repeater: + phy_exit(phy->repeater); disable_vreg: regulator_bulk_disable(ARRAY_SIZE(phy->vregs), phy->vregs); @@ -504,7 +508,7 @@ static int snps_eusb2_hsphy_exit(struct phy *p) { struct snps_eusb2_hsphy *phy = phy_get_drvdata(p); - clk_disable_unprepare(phy->ref_clk); + clk_bulk_disable_unprepare(phy->data->num_clks, phy->clks); regulator_bulk_disable(ARRAY_SIZE(phy->vregs), phy->vregs); @@ -551,7 +555,7 @@ static int snps_eusb2_hsphy_probe(struct platform_device *pdev) if (!phy->clks) return -ENOMEM; - for (int i = 0; i < phy->data->num_clks; ++i) + for (i = 0; i < phy->data->num_clks; ++i) phy->clks[i].id = phy->data->clk_names[i]; ret = devm_clk_bulk_get(dev, phy->data->num_clks, phy->clks); @@ -560,7 +564,7 @@ static int snps_eusb2_hsphy_probe(struct platform_device *pdev) "failed to get phy clock(s)\n"); phy->ref_clk = NULL; - for (int i = 0; i < phy->data->num_clks; ++i) { + for (i = 0; i < phy->data->num_clks; ++i) { if (!strcmp(phy->clks[i].id, "ref")) { phy->ref_clk = phy->clks[i].clk; break; @@ -582,14 +586,14 @@ static int snps_eusb2_hsphy_probe(struct platform_device *pdev) return dev_err_probe(dev, ret, "failed to get regulator supplies\n"); - phy->repeater = devm_of_phy_optional_get(dev, np, 0); + phy->repeater = devm_of_phy_optional_get(dev, np, NULL); if (IS_ERR(phy->repeater)) return dev_err_probe(dev, PTR_ERR(phy->repeater), "failed to get repeater\n"); generic_phy = devm_phy_create(dev, NULL, &snps_eusb2_hsphy_ops); if (IS_ERR(generic_phy)) { - dev_err(dev, "failed to create phy %d\n", ret); + dev_err(dev, "failed to create phy: %d\n", ret); return PTR_ERR(generic_phy); } @@ -600,8 +604,6 @@ static int snps_eusb2_hsphy_probe(struct platform_device *pdev) if (IS_ERR(phy_provider)) return PTR_ERR(phy_provider); - dev_info(dev, "Registered Snps-eUSB2 phy\n"); - return 0; } @@ -612,7 +614,9 @@ static const struct of_device_id snps_eusb2_hsphy_of_match_table[] = { }, { .compatible = "samsung,exynos2200-eusb2-phy", .data = &exynos2200_snps_eusb2_phy, - }, { }, + }, { + /* sentinel */ + } }; MODULE_DEVICE_TABLE(of, snps_eusb2_hsphy_of_match_table); diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig index ef14f4e33973..60a0ead127fa 100644 --- a/drivers/phy/qualcomm/Kconfig +++ b/drivers/phy/qualcomm/Kconfig @@ -126,12 +126,12 @@ config PHY_QCOM_QUSB2 USB IPs on MSM SOCs. config PHY_QCOM_EUSB2_REPEATER - tristate "Qualcomm SNPS eUSB2 Repeater Driver" + tristate "Qualcomm PMIC eUSB2 Repeater Driver" depends on OF && (ARCH_QCOM || COMPILE_TEST) select GENERIC_PHY help - Enable support for the USB high-speed SNPS eUSB2 repeater on Qualcomm - PMICs. The repeater is paired with a Synopsys eUSB2 Phy + Enable support for the USB high-speed eUSB2 repeater on Qualcomm + PMICs. The repeater is paired with a Synopsys or M31 eUSB2 Phy on Qualcomm SOCs. config PHY_QCOM_M31_USB @@ -158,6 +158,16 @@ config PHY_QCOM_UNIPHY_PCIE_28LP handles PHY initialization, clock management required after resetting the hardware and power management. +config PHY_QCOM_M31_EUSB + tristate "Qualcomm M31 eUSB2 PHY driver support" + depends on USB && (ARCH_QCOM || COMPILE_TEST) + select GENERIC_PHY + help + Enable this to support M31 EUSB2 PHY transceivers on Qualcomm + chips with DWC3 USB core. It supports initializing and cleaning + up of the associated USB repeater that is paired with the eUSB2 + PHY. + config PHY_QCOM_USB_HS tristate "Qualcomm USB HS PHY module" depends on USB_ULPI_BUS diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile index 3851e28a212d..b71a6a0bed3f 100644 --- a/drivers/phy/qualcomm/Makefile +++ b/drivers/phy/qualcomm/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_PHY_QCOM_EDP) += phy-qcom-edp.o obj-$(CONFIG_PHY_QCOM_IPQ4019_USB) += phy-qcom-ipq4019-usb.o obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o obj-$(CONFIG_PHY_QCOM_M31_USB) += phy-qcom-m31.o +obj-$(CONFIG_PHY_QCOM_M31_EUSB) += phy-qcom-m31-eusb2.o obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o obj-$(CONFIG_PHY_QCOM_QMP_COMBO) += phy-qcom-qmp-combo.o phy-qcom-qmp-usbc.o diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c index 6bd1b3c75c77..e0f2acc8109c 100644 --- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c +++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c @@ -37,32 +37,13 @@ #define EUSB2_TUNE_EUSB_EQU 0x5A #define EUSB2_TUNE_EUSB_HS_COMP_CUR 0x5B -enum eusb2_reg_layout { - TUNE_EUSB_HS_COMP_CUR, - TUNE_EUSB_EQU, - TUNE_EUSB_SLEW, - TUNE_USB2_HS_COMP_CUR, - TUNE_USB2_PREEM, - TUNE_USB2_EQU, - TUNE_USB2_SLEW, - TUNE_SQUELCH_U, - TUNE_HSDISC, - TUNE_RES_FSDIF, - TUNE_IUSB2, - TUNE_USB2_CROSSOVER, - NUM_TUNE_FIELDS, - - FORCE_VAL_5 = NUM_TUNE_FIELDS, - FORCE_EN_5, - - EN_CTL1, - - RPTR_STATUS, - LAYOUT_SIZE, +struct eusb2_repeater_init_tbl_reg { + unsigned int reg; + unsigned int value; }; struct eusb2_repeater_cfg { - const u32 *init_tbl; + const struct eusb2_repeater_init_tbl_reg *init_tbl; int init_tbl_num; const char * const *vreg_list; int num_vregs; @@ -82,16 +63,16 @@ static const char * const pm8550b_vreg_l[] = { "vdd18", "vdd3", }; -static const u32 pm8550b_init_tbl[NUM_TUNE_FIELDS] = { - [TUNE_IUSB2] = 0x8, - [TUNE_SQUELCH_U] = 0x3, - [TUNE_USB2_PREEM] = 0x5, +static const struct eusb2_repeater_init_tbl_reg pm8550b_init_tbl[] = { + { EUSB2_TUNE_IUSB2, 0x8 }, + { EUSB2_TUNE_SQUELCH_U, 0x3 }, + { EUSB2_TUNE_USB2_PREEM, 0x5 }, }; -static const u32 smb2360_init_tbl[NUM_TUNE_FIELDS] = { - [TUNE_IUSB2] = 0x5, - [TUNE_SQUELCH_U] = 0x3, - [TUNE_USB2_PREEM] = 0x2, +static const struct eusb2_repeater_init_tbl_reg smb2360_init_tbl[] = { + { EUSB2_TUNE_IUSB2, 0x5 }, + { EUSB2_TUNE_SQUELCH_U, 0x3 }, + { EUSB2_TUNE_USB2_PREEM, 0x2 }, }; static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = { @@ -129,17 +110,10 @@ static int eusb2_repeater_init(struct phy *phy) struct eusb2_repeater *rptr = phy_get_drvdata(phy); struct device_node *np = rptr->dev->of_node; struct regmap *regmap = rptr->regmap; - const u32 *init_tbl = rptr->cfg->init_tbl; - u8 tune_usb2_preem = init_tbl[TUNE_USB2_PREEM]; - u8 tune_hsdisc = init_tbl[TUNE_HSDISC]; - u8 tune_iusb2 = init_tbl[TUNE_IUSB2]; u32 base = rptr->base; - u32 val; + u32 poll_val; int ret; - - of_property_read_u8(np, "qcom,tune-usb2-amplitude", &tune_iusb2); - of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &tune_hsdisc); - of_property_read_u8(np, "qcom,tune-usb2-preem", &tune_usb2_preem); + u8 val; ret = regulator_bulk_enable(rptr->cfg->num_vregs, rptr->vregs); if (ret) @@ -147,21 +121,24 @@ static int eusb2_repeater_init(struct phy *phy) regmap_write(regmap, base + EUSB2_EN_CTL1, EUSB2_RPTR_EN); - regmap_write(regmap, base + EUSB2_TUNE_EUSB_HS_COMP_CUR, init_tbl[TUNE_EUSB_HS_COMP_CUR]); - regmap_write(regmap, base + EUSB2_TUNE_EUSB_EQU, init_tbl[TUNE_EUSB_EQU]); - regmap_write(regmap, base + EUSB2_TUNE_EUSB_SLEW, init_tbl[TUNE_EUSB_SLEW]); - regmap_write(regmap, base + EUSB2_TUNE_USB2_HS_COMP_CUR, init_tbl[TUNE_USB2_HS_COMP_CUR]); - regmap_write(regmap, base + EUSB2_TUNE_USB2_EQU, init_tbl[TUNE_USB2_EQU]); - regmap_write(regmap, base + EUSB2_TUNE_USB2_SLEW, init_tbl[TUNE_USB2_SLEW]); - regmap_write(regmap, base + EUSB2_TUNE_SQUELCH_U, init_tbl[TUNE_SQUELCH_U]); - regmap_write(regmap, base + EUSB2_TUNE_RES_FSDIF, init_tbl[TUNE_RES_FSDIF]); - regmap_write(regmap, base + EUSB2_TUNE_USB2_CROSSOVER, init_tbl[TUNE_USB2_CROSSOVER]); - - regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, tune_usb2_preem); - regmap_write(regmap, base + EUSB2_TUNE_HSDISC, tune_hsdisc); - regmap_write(regmap, base + EUSB2_TUNE_IUSB2, tune_iusb2); - - ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, val, val & RPTR_OK, 10, 5); + /* Write registers from init table */ + for (int i = 0; i < rptr->cfg->init_tbl_num; i++) + regmap_write(regmap, base + rptr->cfg->init_tbl[i].reg, + rptr->cfg->init_tbl[i].value); + + /* Override registers from devicetree values */ + if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &val)) + regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, val); + + if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &val)) + regmap_write(regmap, base + EUSB2_TUNE_HSDISC, val); + + if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &val)) + regmap_write(regmap, base + EUSB2_TUNE_IUSB2, val); + + /* Wait for status OK */ + ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, poll_val, + poll_val & RPTR_OK, 10, 5); if (ret) dev_err(rptr->dev, "initialization timed-out\n"); @@ -264,8 +241,6 @@ static int eusb2_repeater_probe(struct platform_device *pdev) if (IS_ERR(phy_provider)) return PTR_ERR(phy_provider); - dev_info(dev, "Registered Qcom-eUSB2 repeater\n"); - return 0; } diff --git a/drivers/phy/qualcomm/phy-qcom-m31-eusb2.c b/drivers/phy/qualcomm/phy-qcom-m31-eusb2.c new file mode 100644 index 000000000000..bf32572566c4 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-m31-eusb2.c @@ -0,0 +1,324 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#include <linux/regulator/consumer.h> + +#define USB_PHY_UTMI_CTRL0 (0x3c) +#define SLEEPM BIT(0) + +#define USB_PHY_UTMI_CTRL5 (0x50) +#define POR BIT(1) + +#define USB_PHY_HS_PHY_CTRL_COMMON0 (0x54) +#define SIDDQ_SEL BIT(1) +#define SIDDQ BIT(2) +#define FSEL GENMASK(6, 4) +#define FSEL_38_4_MHZ_VAL (0x6) + +#define USB_PHY_HS_PHY_CTRL2 (0x64) +#define USB2_SUSPEND_N BIT(2) +#define USB2_SUSPEND_N_SEL BIT(3) + +#define USB_PHY_CFG0 (0x94) +#define UTMI_PHY_CMN_CTRL_OVERRIDE_EN BIT(1) + +#define USB_PHY_CFG1 (0x154) +#define PLL_EN BIT(0) + +#define USB_PHY_FSEL_SEL (0xb8) +#define FSEL_SEL BIT(0) + +#define USB_PHY_XCFGI_39_32 (0x16c) +#define HSTX_PE GENMASK(3, 2) + +#define USB_PHY_XCFGI_71_64 (0x17c) +#define HSTX_SWING GENMASK(3, 0) + +#define USB_PHY_XCFGI_31_24 (0x168) +#define HSTX_SLEW GENMASK(2, 0) + +#define USB_PHY_XCFGI_7_0 (0x15c) +#define PLL_LOCK_TIME GENMASK(1, 0) + +#define M31_EUSB_PHY_INIT_CFG(o, b, v) \ +{ \ + .off = o, \ + .mask = b, \ + .val = v, \ +} + +struct m31_phy_tbl_entry { + u32 off; + u32 mask; + u32 val; +}; + +struct m31_eusb2_priv_data { + const struct m31_phy_tbl_entry *setup_seq; + unsigned int setup_seq_nregs; + const struct m31_phy_tbl_entry *override_seq; + unsigned int override_seq_nregs; + const struct m31_phy_tbl_entry *reset_seq; + unsigned int reset_seq_nregs; + unsigned int fsel; +}; + +static const struct m31_phy_tbl_entry m31_eusb2_setup_tbl[] = { + M31_EUSB_PHY_INIT_CFG(USB_PHY_CFG0, UTMI_PHY_CMN_CTRL_OVERRIDE_EN, 1), + M31_EUSB_PHY_INIT_CFG(USB_PHY_UTMI_CTRL5, POR, 1), + M31_EUSB_PHY_INIT_CFG(USB_PHY_CFG1, PLL_EN, 1), + M31_EUSB_PHY_INIT_CFG(USB_PHY_FSEL_SEL, FSEL_SEL, 1), +}; + +static const struct m31_phy_tbl_entry m31_eusb_phy_override_tbl[] = { + M31_EUSB_PHY_INIT_CFG(USB_PHY_XCFGI_39_32, HSTX_PE, 0), + M31_EUSB_PHY_INIT_CFG(USB_PHY_XCFGI_71_64, HSTX_SWING, 7), + M31_EUSB_PHY_INIT_CFG(USB_PHY_XCFGI_31_24, HSTX_SLEW, 0), + M31_EUSB_PHY_INIT_CFG(USB_PHY_XCFGI_7_0, PLL_LOCK_TIME, 0), +}; + +static const struct m31_phy_tbl_entry m31_eusb_phy_reset_tbl[] = { + M31_EUSB_PHY_INIT_CFG(USB_PHY_HS_PHY_CTRL2, USB2_SUSPEND_N_SEL, 1), + M31_EUSB_PHY_INIT_CFG(USB_PHY_HS_PHY_CTRL2, USB2_SUSPEND_N, 1), + M31_EUSB_PHY_INIT_CFG(USB_PHY_UTMI_CTRL0, SLEEPM, 1), + M31_EUSB_PHY_INIT_CFG(USB_PHY_HS_PHY_CTRL_COMMON0, SIDDQ_SEL, 1), + M31_EUSB_PHY_INIT_CFG(USB_PHY_HS_PHY_CTRL_COMMON0, SIDDQ, 0), + M31_EUSB_PHY_INIT_CFG(USB_PHY_UTMI_CTRL5, POR, 0), + M31_EUSB_PHY_INIT_CFG(USB_PHY_HS_PHY_CTRL2, USB2_SUSPEND_N_SEL, 0), + M31_EUSB_PHY_INIT_CFG(USB_PHY_CFG0, UTMI_PHY_CMN_CTRL_OVERRIDE_EN, 0), +}; + +static const struct regulator_bulk_data m31_eusb_phy_vregs[] = { + { .supply = "vdd" }, + { .supply = "vdda12" }, +}; + +#define M31_EUSB_NUM_VREGS ARRAY_SIZE(m31_eusb_phy_vregs) + +struct m31eusb2_phy { + struct phy *phy; + void __iomem *base; + const struct m31_eusb2_priv_data *data; + enum phy_mode mode; + + struct regulator_bulk_data *vregs; + struct clk *clk; + struct reset_control *reset; + + struct phy *repeater; +}; + +static int m31eusb2_phy_write_readback(void __iomem *base, u32 offset, + const u32 mask, u32 val) +{ + u32 write_val; + u32 tmp; + + tmp = readl(base + offset); + tmp &= ~mask; + write_val = tmp | val; + + writel(write_val, base + offset); + + tmp = readl(base + offset); + tmp &= mask; + + if (tmp != val) { + pr_err("write: %x to offset: %x FAILED\n", val, offset); + return -EINVAL; + } + + return 0; +} + +static int m31eusb2_phy_write_sequence(struct m31eusb2_phy *phy, + const struct m31_phy_tbl_entry *tbl, + int num) +{ + int i; + int ret; + + for (i = 0 ; i < num; i++, tbl++) { + dev_dbg(&phy->phy->dev, "Offset:%x BitMask:%x Value:%x", + tbl->off, tbl->mask, tbl->val); + + ret = m31eusb2_phy_write_readback(phy->base, + tbl->off, tbl->mask, + tbl->val << __ffs(tbl->mask)); + if (ret < 0) + return ret; + } + + return 0; +} + +static int m31eusb2_phy_set_mode(struct phy *uphy, enum phy_mode mode, int submode) +{ + struct m31eusb2_phy *phy = phy_get_drvdata(uphy); + + phy->mode = mode; + + return phy_set_mode_ext(phy->repeater, mode, submode); +} + +static int m31eusb2_phy_init(struct phy *uphy) +{ + struct m31eusb2_phy *phy = phy_get_drvdata(uphy); + const struct m31_eusb2_priv_data *data = phy->data; + int ret; + + ret = regulator_bulk_enable(M31_EUSB_NUM_VREGS, phy->vregs); + if (ret) { + dev_err(&uphy->dev, "failed to enable regulator, %d\n", ret); + return ret; + } + + ret = phy_init(phy->repeater); + if (ret) { + dev_err(&uphy->dev, "repeater init failed. %d\n", ret); + goto disable_vreg; + } + + ret = clk_prepare_enable(phy->clk); + if (ret) { + dev_err(&uphy->dev, "failed to enable cfg ahb clock, %d\n", ret); + goto disable_repeater; + } + + /* Perform phy reset */ + reset_control_assert(phy->reset); + udelay(5); + reset_control_deassert(phy->reset); + + m31eusb2_phy_write_sequence(phy, data->setup_seq, data->setup_seq_nregs); + m31eusb2_phy_write_readback(phy->base, + USB_PHY_HS_PHY_CTRL_COMMON0, FSEL, + FIELD_PREP(FSEL, data->fsel)); + m31eusb2_phy_write_sequence(phy, data->override_seq, data->override_seq_nregs); + m31eusb2_phy_write_sequence(phy, data->reset_seq, data->reset_seq_nregs); + + return 0; + +disable_repeater: + phy_exit(phy->repeater); +disable_vreg: + regulator_bulk_disable(M31_EUSB_NUM_VREGS, phy->vregs); + + return 0; +} + +static int m31eusb2_phy_exit(struct phy *uphy) +{ + struct m31eusb2_phy *phy = phy_get_drvdata(uphy); + + clk_disable_unprepare(phy->clk); + regulator_bulk_disable(M31_EUSB_NUM_VREGS, phy->vregs); + phy_exit(phy->repeater); + + return 0; +} + +static const struct phy_ops m31eusb2_phy_gen_ops = { + .init = m31eusb2_phy_init, + .exit = m31eusb2_phy_exit, + .set_mode = m31eusb2_phy_set_mode, + .owner = THIS_MODULE, +}; + +static int m31eusb2_phy_probe(struct platform_device *pdev) +{ + struct phy_provider *phy_provider; + const struct m31_eusb2_priv_data *data; + struct device *dev = &pdev->dev; + struct m31eusb2_phy *phy; + int ret; + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; + + data = device_get_match_data(dev); + if (!data) + return -EINVAL; + phy->data = data; + + phy->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(phy->base)) + return PTR_ERR(phy->base); + + phy->reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(phy->reset)) + return PTR_ERR(phy->reset); + + phy->clk = devm_clk_get(dev, NULL); + if (IS_ERR(phy->clk)) + return dev_err_probe(dev, PTR_ERR(phy->clk), + "failed to get clk\n"); + + phy->phy = devm_phy_create(dev, NULL, &m31eusb2_phy_gen_ops); + if (IS_ERR(phy->phy)) + return dev_err_probe(dev, PTR_ERR(phy->phy), + "failed to create phy\n"); + + ret = devm_regulator_bulk_get_const(dev, M31_EUSB_NUM_VREGS, + m31_eusb_phy_vregs, &phy->vregs); + if (ret) + return dev_err_probe(dev, ret, + "failed to get regulator supplies\n"); + + phy_set_drvdata(phy->phy, phy); + + phy->repeater = devm_of_phy_get_by_index(dev, dev->of_node, 0); + if (IS_ERR(phy->repeater)) + return dev_err_probe(dev, PTR_ERR(phy->repeater), + "failed to get repeater\n"); + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(phy_provider); +} + +static const struct m31_eusb2_priv_data m31_eusb_v1_data = { + .setup_seq = m31_eusb2_setup_tbl, + .setup_seq_nregs = ARRAY_SIZE(m31_eusb2_setup_tbl), + .override_seq = m31_eusb_phy_override_tbl, + .override_seq_nregs = ARRAY_SIZE(m31_eusb_phy_override_tbl), + .reset_seq = m31_eusb_phy_reset_tbl, + .reset_seq_nregs = ARRAY_SIZE(m31_eusb_phy_reset_tbl), + .fsel = FSEL_38_4_MHZ_VAL, +}; + +static const struct of_device_id m31eusb2_phy_id_table[] = { + { .compatible = "qcom,sm8750-m31-eusb2-phy", .data = &m31_eusb_v1_data }, + { }, +}; +MODULE_DEVICE_TABLE(of, m31eusb2_phy_id_table); + +static struct platform_driver m31eusb2_phy_driver = { + .probe = m31eusb2_phy_probe, + .driver = { + .name = "qcom-m31eusb2-phy", + .of_match_table = m31eusb2_phy_id_table, + }, +}; + +module_platform_driver(m31eusb2_phy_driver); + +MODULE_AUTHOR("Wesley Cheng <quic_wcheng@quicinc.com>"); +MODULE_DESCRIPTION("eUSB2 Qualcomm M31 HSPHY driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c index 20d4c020a83c..168ea980fda0 100644 --- a/drivers/phy/qualcomm/phy-qcom-m31.c +++ b/drivers/phy/qualcomm/phy-qcom-m31.c @@ -58,14 +58,16 @@ #define USB2_0_TX_ENABLE BIT(2) #define USB2PHY_USB_PHY_M31_XCFGI_4 0xc8 - #define HSTX_SLEW_RATE_565PS GENMASK(1, 0) + #define HSTX_SLEW_RATE_400PS GENMASK(2, 0) #define PLL_CHARGING_PUMP_CURRENT_35UA GENMASK(4, 3) #define ODT_VALUE_38_02_OHM GENMASK(7, 6) #define USB2PHY_USB_PHY_M31_XCFGI_5 0xcc - #define ODT_VALUE_45_02_OHM BIT(2) #define HSTX_PRE_EMPHASIS_LEVEL_0_55MA BIT(0) +#define USB2PHY_USB_PHY_M31_XCFGI_9 0xdc + #define HSTX_CURRENT_17_1MA_385MV BIT(1) + #define USB2PHY_USB_PHY_M31_XCFGI_11 0xe4 #define XCFG_COARSE_TUNE_NUM BIT(1) #define XCFG_FINE_TUNE_NUM BIT(3) @@ -164,7 +166,7 @@ static struct m31_phy_regs m31_ipq5332_regs[] = { }, { USB2PHY_USB_PHY_M31_XCFGI_4, - HSTX_SLEW_RATE_565PS | PLL_CHARGING_PUMP_CURRENT_35UA | ODT_VALUE_38_02_OHM, + HSTX_SLEW_RATE_400PS | PLL_CHARGING_PUMP_CURRENT_35UA | ODT_VALUE_38_02_OHM, 0 }, { @@ -174,10 +176,14 @@ static struct m31_phy_regs m31_ipq5332_regs[] = { }, { USB2PHY_USB_PHY_M31_XCFGI_5, - ODT_VALUE_45_02_OHM | HSTX_PRE_EMPHASIS_LEVEL_0_55MA, + HSTX_PRE_EMPHASIS_LEVEL_0_55MA, 4 }, { + USB2PHY_USB_PHY_M31_XCFGI_9, + HSTX_CURRENT_17_1MA_385MV, + }, + { USB_PHY_UTMI_CTRL5, 0x0, 0 @@ -305,8 +311,6 @@ static int m31usb_phy_probe(struct platform_device *pdev) phy_set_drvdata(qphy->phy, qphy); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - if (!IS_ERR(phy_provider)) - dev_info(dev, "Registered M31 USB phy\n"); return PTR_ERR_OR_ZERO(phy_provider); } diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c index b09fa00e9fe7..f07d097b129f 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -32,6 +32,7 @@ #include "phy-qcom-qmp-pcs-usb-v4.h" #include "phy-qcom-qmp-pcs-usb-v5.h" #include "phy-qcom-qmp-pcs-usb-v6.h" +#include "phy-qcom-qmp-pcs-usb-v8.h" #include "phy-qcom-qmp-dp-com-v3.h" @@ -212,6 +213,31 @@ static const unsigned int qmp_v6_n4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN, }; +static const unsigned int qmp_v8_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_SW_RESET] = QPHY_V8_PCS_SW_RESET, + [QPHY_START_CTRL] = QPHY_V8_PCS_START_CONTROL, + [QPHY_PCS_STATUS] = QPHY_V8_PCS_PCS_STATUS1, + [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V8_PCS_POWER_DOWN_CONTROL, + + /* In PCS_USB */ + [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V8_PCS_USB_AUTONOMOUS_MODE_CTRL, + [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V8_PCS_USB_LFPS_RXTERM_IRQ_CLEAR, + + [QPHY_COM_RESETSM_CNTRL] = QSERDES_V8_COM_RESETSM_CNTRL, + [QPHY_COM_C_READY_STATUS] = QSERDES_V8_COM_C_READY_STATUS, + [QPHY_COM_CMN_STATUS] = QSERDES_V8_COM_CMN_STATUS, + [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V8_COM_BIAS_EN_CLKBUFLR_EN, + + [QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS, + [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV, + + [QPHY_TX_TX_POL_INV] = QSERDES_V8_TX_TX_POL_INV, + [QPHY_TX_TX_DRV_LVL] = QSERDES_V8_TX_TX_DRV_LVL, + [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V8_TX_TX_EMP_POST1_LVL, + [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V8_TX_HIGHZ_DRVR_EN, + [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V8_TX_TRANSCEIVER_BIAS_EN, +}; + static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07), QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14), @@ -1471,6 +1497,139 @@ static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG5, 0x10), }; +static const struct qmp_phy_init_tbl sm8750_usb3_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V8_COM_SSC_STEP_SIZE1_MODE1, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_SSC_STEP_SIZE2_MODE1, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_CP_CTRL_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_CORECLK_DIV_MODE1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_LOCK_CMP1_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_LOCK_CMP2_MODE1, 0x41), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_DEC_START_MODE1, 0x41), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_DEC_START_MSB_MODE1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_DIV_FRAC_START1_MODE1, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_DIV_FRAC_START2_MODE1, 0x75), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_DIV_FRAC_START3_MODE1, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_HSCLK_SEL_1, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_VCO_TUNE1_MODE1, 0x25), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_VCO_TUNE2_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0x5c), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x5c), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_SSC_STEP_SIZE1_MODE0, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_SSC_STEP_SIZE2_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_CP_CTRL_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_LOCK_CMP1_MODE0, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_LOCK_CMP2_MODE0, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_DEC_START_MODE0, 0x41), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_DEC_START_MSB_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_DIV_FRAC_START2_MODE0, 0x75), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_DIV_FRAC_START3_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_VCO_TUNE1_MODE0, 0x25), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_VCO_TUNE2_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_SSC_PER1, 0x62), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_SSC_PER2, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_SYSCLK_BUF_ENABLE, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_SYSCLK_EN_SEL, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_LOCK_CMP_CFG, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_VCO_TUNE_MAP, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_CORE_CLK_EN, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_CMN_CONFIG_1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_AUTO_GAIN_ADJ_CTRL_1, 0xb6), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_AUTO_GAIN_ADJ_CTRL_2, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_AUTO_GAIN_ADJ_CTRL_3, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V8_COM_ADDITIONAL_MISC, 0x0c), +}; + +static const struct qmp_phy_init_tbl sm8750_usb3_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V8_TX_RES_CODE_LANE_TX, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V8_TX_RES_CODE_LANE_RX, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V8_TX_RES_CODE_LANE_OFFSET_TX, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V8_TX_RES_CODE_LANE_OFFSET_RX, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V8_TX_LANE_MODE_1, 0xf5), + QMP_PHY_INIT_CFG(QSERDES_V8_TX_LANE_MODE_3, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V8_TX_LANE_MODE_4, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V8_TX_LANE_MODE_5, 0x5f), + QMP_PHY_INIT_CFG(QSERDES_V8_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG_LANE(QSERDES_V8_TX_PI_QEC_CTRL, 0x21, 1), + QMP_PHY_INIT_CFG_LANE(QSERDES_V8_TX_PI_QEC_CTRL, 0x05, 2), +}; + +static const struct qmp_phy_init_tbl sm8750_usb3_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_FO_GAIN, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_SO_GAIN, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_PI_CONTROLS, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_SB2_THRESH1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_SB2_THRESH2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_SB2_GAIN1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_UCDR_SB2_GAIN2, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_AUX_DATA_TCOARSE_TFINE, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_VGA_CAL_CNTRL1, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_VGA_CAL_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_GM_CAL, 0x13), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_IDAC_TSETTLE_LOW, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x27), + + QMP_PHY_INIT_CFG(QSERDES_V8_RX_SIGDET_ENABLES, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_SIGDET_CNTRL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_SIGDET_DEGLITCH_CNTRL, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_00_LOW, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_00_HIGH, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_00_HIGH2, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_00_HIGH3, 0xdf), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_00_HIGH4, 0xed), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_01_LOW, 0x19), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_01_HIGH, 0x09), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_01_HIGH2, 0x91), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_01_HIGH3, 0xb7), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_RX_MODE_01_HIGH4, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_DCC_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_VTH_CODE, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_SIGDET_CAL_CTRL1, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V8_RX_SIGDET_CAL_TRIM, 0x08), +}; + +static const struct qmp_phy_init_tbl sm8750_usb3_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V8_PCS_LOCK_DETECT_CONFIG1, 0xc4), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_LOCK_DETECT_CONFIG2, 0x89), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_LOCK_DETECT_CONFIG3, 0x20), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_LOCK_DETECT_CONFIG6, 0x13), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_REFGEN_REQ_CONFIG1, 0x21), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_RX_SIGDET_LVL, 0x55), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_CDR_RESET_TIME, 0x0a), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_ALIGN_DETECT_CONFIG1, 0x88), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_ALIGN_DETECT_CONFIG2, 0x13), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_PCS_TX_RX_CONFIG, 0x0c), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_EQ_CONFIG1, 0x4b), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_EQ_CONFIG5, 0x10), +}; + +static const struct qmp_phy_init_tbl sm8750_usb3_pcs_usb_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V8_PCS_USB_LFPS_DET_HIGH_COUNT_VAL, 0xf8), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_USB_RXEQTRAINING_DFE_TIME_S2, 0x07), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_USB_RCVR_DTCT_DLY_U3_L, 0x40), + QMP_PHY_INIT_CFG(QPHY_V8_PCS_USB_RCVR_DTCT_DLY_U3_H, 0x00), +}; + static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_usb_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8), QMP_PHY_INIT_CFG(QPHY_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07), @@ -1781,6 +1940,22 @@ static const struct qmp_combo_offsets qmp_combo_offsets_v5 = { .dp_dp_phy = 0x2200, }; +static const struct qmp_combo_offsets qmp_combo_offsets_v8 = { + .com = 0x0000, + .txa = 0x1400, + .rxa = 0x1600, + .txb = 0x1800, + .rxb = 0x1a00, + .usb3_serdes = 0x1000, + .usb3_pcs_misc = 0x1c00, + .usb3_pcs = 0x1e00, + .usb3_pcs_usb = 0x2100, + .dp_serdes = 0x3000, + .dp_txa = 0x3400, + .dp_txb = 0x3800, + .dp_dp_phy = 0x3c00, +}; + static const struct qmp_phy_cfg sar2130p_usb3dpphy_cfg = { .offsets = &qmp_combo_offsets_v3, @@ -2280,6 +2455,51 @@ static const struct qmp_phy_cfg sm8650_usb3dpphy_cfg = { .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), }; +static const struct qmp_phy_cfg sm8750_usb3dpphy_cfg = { + .offsets = &qmp_combo_offsets_v8, + + .serdes_tbl = sm8750_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8750_usb3_serdes_tbl), + .tx_tbl = sm8750_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8750_usb3_tx_tbl), + .rx_tbl = sm8750_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8750_usb3_rx_tbl), + .pcs_tbl = sm8750_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8750_usb3_pcs_tbl), + .pcs_usb_tbl = sm8750_usb3_pcs_usb_tbl, + .pcs_usb_tbl_num = ARRAY_SIZE(sm8750_usb3_pcs_usb_tbl), + + .dp_serdes_tbl = qmp_v6_dp_serdes_tbl, + .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl), + .dp_tx_tbl = qmp_v6_dp_tx_tbl, + .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl), + + .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr, + .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr), + .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr, + .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr), + .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2, + .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2), + .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3, + .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3), + + .swing_hbr_rbr = &qmp_dp_v6_voltage_swing_hbr_rbr, + .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr, + .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2, + .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2, + + .dp_aux_init = qmp_v4_dp_aux_init, + .configure_dp_tx = qmp_v4_configure_dp_tx, + .configure_dp_phy = qmp_v4_configure_dp_phy, + .calibrate_dp_phy = qmp_v4_calibrate_dp_phy, + + .regs = qmp_v8_usb3phy_regs_layout, + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), +}; + static int qmp_combo_dp_serdes_init(struct qmp_combo *qmp) { const struct qmp_phy_cfg *cfg = qmp->cfg; @@ -3916,6 +4136,10 @@ static const struct of_device_id qmp_combo_of_match_table[] = { .data = &sm8650_usb3dpphy_cfg, }, { + .compatible = "qcom,sm8750-qmp-usb3-dp-phy", + .data = &sm8750_usb3dpphy_cfg, + }, + { .compatible = "qcom,x1e80100-qmp-usb3-dp-phy", .data = &x1e80100_usb3dpphy_cfg, }, diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c index 461b9e0af610..95830dcfdec9 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c @@ -2639,29 +2639,29 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_rc_serdes_alt_tbl[] }; static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_rx_alt_tbl[] = { - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x07), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B0, 0x9a), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B0, 0x9b), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xb0), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x92), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0xe4), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xf0), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B4, 0x42), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x99), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x29), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0x9a), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0x9b), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xfb), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0x92), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0xe4), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xec), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x43), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xdd), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0d), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xf3), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xb3), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xf8), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xec), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xd6), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x83), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xf5), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x5e), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xed), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xe5), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x8d), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xd6), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x7e), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_PHPRE_CTRL, 0x20), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1, 0x3f), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3, 0x37), @@ -2680,12 +2680,12 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_rx_alt_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3, 0x08), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_SO_GAIN_RATE3, 0x04), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_CNTRL1, 0x04), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x08), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x08), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x7c), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_IDAC_SAOFFSET, 0x10), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_DAC_ENABLE1, 0x00), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x05), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x01), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f), }; @@ -2699,6 +2699,8 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_tx_tbl[] = { }; static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_PCIE_V5_20_PCS_G3_RXEQEVAL_TIME, 0x27), + QMP_PHY_INIT_CFG(QPHY_PCIE_V5_20_PCS_G4_RXEQEVAL_TIME, 0x27), QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_EQ_CONFIG1, 0x16), QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02), QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e), @@ -2711,11 +2713,19 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), }; -static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl[] = { +static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_pcs_alt_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG4, 0x16), QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG5, 0x22), QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_G3S2_PRE_GAIN, 0x2e), QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_RX_SIGDET_LVL, 0x66), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LOCK_DETECT_CONFIG1, 0xff), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LOCK_DETECT_CONFIG2, 0x89), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_ALIGN_DETECT_CONFIG1, 0x00), + QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_ALIGN_DETECT_CONFIG2, 0x50), +}; + +static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_ln_shrd_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_v5_LN_SHRD_UCDR_PI_CTRL2, 0x00), }; static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_rx_alt_tbl[] = { @@ -2739,27 +2749,27 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_rx_alt_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3, 0x1f), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3, 0x1f), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x09), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B0, 0x99), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B0, 0x9b), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xb0), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x92), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0xd2), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xf0), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B4, 0x42), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x00), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x20), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0x9a), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0x9b), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xb6), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0x92), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0xd2), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xf0), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x43), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xdd), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0d), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xf3), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xb3), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xf6), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xee), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xd2), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xe4), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xe6), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x83), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xf9), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x3d), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xd6), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x7e), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2, 0x0c), @@ -2767,14 +2777,7 @@ static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_rx_alt_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_SO_GAIN_RATE3, 0x04), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16), QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_CNTRL1, 0x04), - QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x08), -}; - -static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_pcs_alt_tbl[] = { - QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG4, 0x16), - QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG5, 0x22), - QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_G3S2_PRE_GAIN, 0x2e), - QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_RX_SIGDET_LVL, 0x66), + QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x06), }; static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_serdes_alt_tbl[] = { @@ -3191,6 +3194,7 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_20 = { .rx = 0x0200, .tx2 = 0x0800, .rx2 = 0x0a00, + .ln_shrd = 0x0e00, }; static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_30 = { @@ -3398,8 +3402,8 @@ static const struct qmp_phy_cfg qcs8300_qmp_gen4x2_pciephy_cfg = { .tx_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_tx_tbl), .rx = qcs8300_qmp_gen4x2_pcie_rx_alt_tbl, .rx_num = ARRAY_SIZE(qcs8300_qmp_gen4x2_pcie_rx_alt_tbl), - .pcs = sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl, - .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl), + .pcs = sa8775p_qmp_gen4_pcie_pcs_alt_tbl, + .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_alt_tbl), .pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl, .pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_misc_tbl), }, @@ -4067,12 +4071,15 @@ static const struct qmp_phy_cfg sa8775p_qmp_gen4x2_pciephy_cfg = { .tx_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_tx_tbl), .rx = sa8775p_qmp_gen4x2_pcie_rx_alt_tbl, .rx_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_rx_alt_tbl), - .pcs = sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl, - .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl), - .pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl, + .pcs = sa8775p_qmp_gen4_pcie_pcs_alt_tbl, + .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_alt_tbl), + .pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl, .pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_misc_tbl), .pcs_lane1 = sdx65_qmp_pcie_pcs_lane1_tbl, .pcs_lane1_num = ARRAY_SIZE(sdx65_qmp_pcie_pcs_lane1_tbl), + .ln_shrd = sa8775p_qmp_gen4x2_pcie_ln_shrd_tbl, + .ln_shrd_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_ln_shrd_tbl), + }, .tbls_rc = &(const struct qmp_phy_cfg_tbls) { @@ -4112,8 +4119,8 @@ static const struct qmp_phy_cfg sa8775p_qmp_gen4x4_pciephy_cfg = { .tx_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_tx_tbl), .rx = sa8775p_qmp_gen4x4_pcie_rx_alt_tbl, .rx_num = ARRAY_SIZE(sa8775p_qmp_gen4x4_pcie_rx_alt_tbl), - .pcs = sa8775p_qmp_gen4x4_pcie_pcs_alt_tbl, - .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4x4_pcie_pcs_alt_tbl), + .pcs = sa8775p_qmp_gen4_pcie_pcs_alt_tbl, + .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_alt_tbl), .pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl, .pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_misc_tbl), }, diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h index 283d63c81593..951de964dc12 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h @@ -13,6 +13,8 @@ #define QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS 0x090 #define QPHY_V5_20_PCS_PCIE_EQ_CONFIG1 0x0a0 #define QPHY_V5_20_PCS_PCIE_PRESET_P10_POST 0x0e0 +#define QPHY_PCIE_V5_20_PCS_G3_RXEQEVAL_TIME 0x0f0 +#define QPHY_PCIE_V5_20_PCS_G4_RXEQEVAL_TIME 0x0f4 #define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG2 0x0fc #define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5 0x108 #define QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN 0x15c diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v8.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v8.h new file mode 100644 index 000000000000..89ace8024bc0 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v8.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_USB_V8_H_ +#define QCOM_PHY_QMP_PCS_USB_V8_H_ + +#define QPHY_V8_PCS_USB_POWER_STATE_CONFIG1 0x00 +#define QPHY_V8_PCS_USB_AUTONOMOUS_MODE_STATUS 0x04 +#define QPHY_V8_PCS_USB_AUTONOMOUS_MODE_CTRL 0x08 +#define QPHY_V8_PCS_USB_AUTONOMOUS_MODE_CTRL2 0x0c +#define QPHY_V8_PCS_USB_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x10 +#define QPHY_V8_PCS_USB_LFPS_RXTERM_IRQ_CLEAR 0x14 +#define QPHY_V8_PCS_USB_LFPS_DET_HIGH_COUNT_VAL 0x18 +#define QPHY_V8_PCS_USB_LFPS_TX_ECSTART 0x1c +#define QPHY_V8_PCS_USB_LFPS_PER_TIMER_VAL 0x20 +#define QPHY_V8_PCS_USB_LFPS_TX_END_CNT_U3_START 0x24 +#define QPHY_V8_PCS_USB_LFPS_CONFIG1 0x28 +#define QPHY_V8_PCS_USB_RXEQTRAINING_LOCK_TIME 0x2c +#define QPHY_V8_PCS_USB_RXEQTRAINING_WAIT_TIME 0x30 +#define QPHY_V8_PCS_USB_RXEQTRAINING_CTLE_TIME 0x34 +#define QPHY_V8_PCS_USB_RXEQTRAINING_WAIT_TIME_S2 0x38 +#define QPHY_V8_PCS_USB_RXEQTRAINING_DFE_TIME_S2 0x3c +#define QPHY_V8_PCS_USB_RCVR_DTCT_DLY_U3_L 0x40 +#define QPHY_V8_PCS_USB_RCVR_DTCT_DLY_U3_H 0x44 +#define QPHY_V8_PCS_USB_ARCVR_DTCT_EN_PERIOD 0x48 +#define QPHY_V8_PCS_USB_ARCVR_DTCT_CM_DLY 0x4c +#define QPHY_V8_PCS_USB_TXONESZEROS_RUN_LENGTH 0x50 +#define QPHY_V8_PCS_USB_ALFPS_DEGLITCH_VAL 0x54 +#define QPHY_V8_PCS_USB_SIGDET_STARTUP_TIMER_VAL 0x58 +#define QPHY_V8_PCS_USB_TEST_CONTROL 0x5c +#define QPHY_V8_PCS_USB_RXTERMINATION_DLY_SEL 0x60 +#define QPHY_V8_PCS_USB_POWER_STATE_CONFIG2 0x64 +#define QPHY_V8_PCS_USB_POWER_STATE_CONFIG3 0x68 +#define QPHY_V8_PCS_USB_POWER_STATE_CONFIG4 0x6c + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h index d3ad5b7f5425..bbee68df4e14 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5_20.h @@ -8,8 +8,12 @@ #define QPHY_V5_20_PCS_INSIG_SW_CTRL7 0x060 #define QPHY_V5_20_PCS_INSIG_MX_CTRL7 0x07c +#define QPHY_V5_20_PCS_LOCK_DETECT_CONFIG1 0x0c4 +#define QPHY_V5_20_PCS_LOCK_DETECT_CONFIG2 0x0c8 #define QPHY_V5_20_PCS_G3S2_PRE_GAIN 0x170 #define QPHY_V5_20_PCS_RX_SIGDET_LVL 0x188 +#define QPHY_V5_20_PCS_ALIGN_DETECT_CONFIG1 0x1b8 +#define QPHY_V5_20_PCS_ALIGN_DETECT_CONFIG2 0x1bc #define QPHY_V5_20_PCS_EQ_CONFIG2 0x1d8 #define QPHY_V5_20_PCS_EQ_CONFIG4 0x1e0 #define QPHY_V5_20_PCS_EQ_CONFIG5 0x1e4 diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v8.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v8.h new file mode 100644 index 000000000000..169fd5de7474 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v8.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_PCS_V8_H_ +#define QCOM_PHY_QMP_PCS_V8_H_ + +/* Only for QMP V8 PHY - USB/PCIe PCS registers */ +#define QPHY_V8_PCS_SW_RESET 0x000 +#define QPHY_V8_PCS_PCS_STATUS1 0x014 +#define QPHY_V8_PCS_POWER_DOWN_CONTROL 0x040 +#define QPHY_V8_PCS_START_CONTROL 0x044 +#define QPHY_V8_PCS_POWER_STATE_CONFIG1 0x090 +#define QPHY_V8_PCS_LOCK_DETECT_CONFIG1 0x0c4 +#define QPHY_V8_PCS_LOCK_DETECT_CONFIG2 0x0c8 +#define QPHY_V8_PCS_LOCK_DETECT_CONFIG3 0x0cc +#define QPHY_V8_PCS_LOCK_DETECT_CONFIG6 0x0d8 +#define QPHY_V8_PCS_REFGEN_REQ_CONFIG1 0x0dc +#define QPHY_V8_PCS_RX_SIGDET_LVL 0x188 +#define QPHY_V8_PCS_RCVR_DTCT_DLY_P1U2_L 0x190 +#define QPHY_V8_PCS_RCVR_DTCT_DLY_P1U2_H 0x194 +#define QPHY_V8_PCS_RATE_SLEW_CNTRL1 0x198 +#define QPHY_V8_PCS_CDR_RESET_TIME 0x1b0 +#define QPHY_V8_PCS_ALIGN_DETECT_CONFIG1 0x1c0 +#define QPHY_V8_PCS_ALIGN_DETECT_CONFIG2 0x1c4 +#define QPHY_V8_PCS_PCS_TX_RX_CONFIG 0x1d0 +#define QPHY_V8_PCS_EQ_CONFIG1 0x1dc +#define QPHY_V8_PCS_EQ_CONFIG2 0x1e0 +#define QPHY_V8_PCS_EQ_CONFIG5 0x1ec + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v8.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v8.h new file mode 100644 index 000000000000..d3b2292257bc --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v8.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_QSERDES_COM_V8_H_ +#define QCOM_PHY_QMP_QSERDES_COM_V8_H_ + +/* Only for QMP V8 PHY - QSERDES COM registers */ +#define QSERDES_V8_COM_SSC_STEP_SIZE1_MODE1 0x000 +#define QSERDES_V8_COM_SSC_STEP_SIZE2_MODE1 0x004 +#define QSERDES_V8_COM_SSC_STEP_SIZE3_MODE1 0x008 +#define QSERDES_V8_COM_CP_CTRL_MODE1 0x010 +#define QSERDES_V8_COM_PLL_RCTRL_MODE1 0x014 +#define QSERDES_V8_COM_PLL_CCTRL_MODE1 0x018 +#define QSERDES_V8_COM_CORECLK_DIV_MODE1 0x01c +#define QSERDES_V8_COM_LOCK_CMP1_MODE1 0x020 +#define QSERDES_V8_COM_LOCK_CMP2_MODE1 0x024 +#define QSERDES_V8_COM_DEC_START_MODE1 0x028 +#define QSERDES_V8_COM_DEC_START_MSB_MODE1 0x02c +#define QSERDES_V8_COM_DIV_FRAC_START1_MODE1 0x030 +#define QSERDES_V8_COM_DIV_FRAC_START2_MODE1 0x034 +#define QSERDES_V8_COM_DIV_FRAC_START3_MODE1 0x038 +#define QSERDES_V8_COM_HSCLK_SEL_1 0x03c +#define QSERDES_V8_COM_VCO_TUNE1_MODE1 0x048 +#define QSERDES_V8_COM_VCO_TUNE2_MODE1 0x04c +#define QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x050 +#define QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x054 +#define QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x058 +#define QSERDES_V8_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x05c +#define QSERDES_V8_COM_SSC_STEP_SIZE1_MODE0 0x060 +#define QSERDES_V8_COM_SSC_STEP_SIZE2_MODE0 0x064 +#define QSERDES_V8_COM_CP_CTRL_MODE0 0x070 +#define QSERDES_V8_COM_PLL_RCTRL_MODE0 0x074 +#define QSERDES_V8_COM_PLL_CCTRL_MODE0 0x078 +#define QSERDES_V8_COM_LOCK_CMP1_MODE0 0x080 +#define QSERDES_V8_COM_LOCK_CMP2_MODE0 0x084 +#define QSERDES_V8_COM_DEC_START_MODE0 0x088 +#define QSERDES_V8_COM_DEC_START_MSB_MODE0 0x08c +#define QSERDES_V8_COM_DIV_FRAC_START1_MODE0 0x090 +#define QSERDES_V8_COM_DIV_FRAC_START2_MODE0 0x094 +#define QSERDES_V8_COM_DIV_FRAC_START3_MODE0 0x098 +#define QSERDES_V8_COM_VCO_TUNE1_MODE0 0x0a8 +#define QSERDES_V8_COM_VCO_TUNE2_MODE0 0x0ac +#define QSERDES_V8_COM_BG_TIMER 0x0bc +#define QSERDES_V8_COM_SSC_EN_CENTER 0x0c0 +#define QSERDES_V8_COM_SSC_PER1 0x0cc +#define QSERDES_V8_COM_SSC_PER2 0x0d0 +#define QSERDES_V8_COM_BIAS_EN_CLKBUFLR_EN 0x0dc +#define QSERDES_V8_COM_SYSCLK_BUF_ENABLE 0x0e8 +#define QSERDES_V8_COM_SYSCLK_EN_SEL 0x110 +#define QSERDES_V8_COM_RESETSM_CNTRL 0x118 +#define QSERDES_V8_COM_LOCK_CMP_CFG 0x124 +#define QSERDES_V8_COM_VCO_TUNE_MAP 0x140 +#define QSERDES_V8_COM_CORE_CLK_EN 0x170 +#define QSERDES_V8_COM_CMN_CONFIG_1 0x174 +#define QSERDES_V8_COM_AUTO_GAIN_ADJ_CTRL_1 0x1a4 +#define QSERDES_V8_COM_AUTO_GAIN_ADJ_CTRL_2 0x1a8 +#define QSERDES_V8_COM_AUTO_GAIN_ADJ_CTRL_3 0x1ac +#define QSERDES_V8_COM_ADDITIONAL_MISC 0x1b4 +#define QSERDES_V8_COM_CMN_STATUS 0x2c8 +#define QSERDES_V8_COM_C_READY_STATUS 0x2f0 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v5.h new file mode 100644 index 000000000000..68c38fdfc1d8 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v5.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2025, The Linux Foundation. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_QSERDES_LN_SHRD_V5_H_ +#define QCOM_PHY_QMP_QSERDES_LN_SHRD_V5_H_ + +#define QSERDES_v5_LN_SHRD_UCDR_PI_CTRL2 0x04c + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v8.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v8.h new file mode 100644 index 000000000000..4cb8b1708607 --- /dev/null +++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v8.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V8_H_ +#define QCOM_PHY_QMP_QSERDES_TXRX_V8_H_ + +#define QSERDES_V8_TX_TX_EMP_POST1_LVL 0x00c +#define QSERDES_V8_TX_TX_DRV_LVL 0x014 +#define QSERDES_V8_TX_RES_CODE_LANE_TX 0x034 +#define QSERDES_V8_TX_RES_CODE_LANE_RX 0x038 +#define QSERDES_V8_TX_RES_CODE_LANE_OFFSET_TX 0x03c +#define QSERDES_V8_TX_RES_CODE_LANE_OFFSET_RX 0x040 +#define QSERDES_V8_TX_TRANSCEIVER_BIAS_EN 0x054 +#define QSERDES_V8_TX_HIGHZ_DRVR_EN 0x058 +#define QSERDES_V8_TX_TX_POL_INV 0x05c +#define QSERDES_V8_TX_LANE_MODE_1 0x084 +#define QSERDES_V8_TX_LANE_MODE_2 0x088 +#define QSERDES_V8_TX_LANE_MODE_3 0x08c +#define QSERDES_V8_TX_LANE_MODE_4 0x090 +#define QSERDES_V8_TX_LANE_MODE_5 0x094 +#define QSERDES_V8_TX_RCV_DETECT_LVL_2 0x0a4 +#define QSERDES_V8_TX_PI_QEC_CTRL 0x0e4 + +#define QSERDES_V8_RX_UCDR_FO_GAIN 0x008 +#define QSERDES_V8_RX_UCDR_SO_GAIN 0x014 +#define QSERDES_V8_RX_UCDR_SVS_FO_GAIN 0x020 +#define QSERDES_V8_RX_UCDR_FASTLOCK_FO_GAIN 0x030 +#define QSERDES_V8_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034 +#define QSERDES_V8_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c +#define QSERDES_V8_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040 +#define QSERDES_V8_RX_UCDR_PI_CONTROLS 0x044 +#define QSERDES_V8_RX_UCDR_SB2_THRESH1 0x04c +#define QSERDES_V8_RX_UCDR_SB2_THRESH2 0x050 +#define QSERDES_V8_RX_UCDR_SB2_GAIN1 0x054 +#define QSERDES_V8_RX_UCDR_SB2_GAIN2 0x058 +#define QSERDES_V8_RX_AUX_DATA_TCOARSE_TFINE 0x060 +#define QSERDES_V8_RX_VGA_CAL_CNTRL1 0x0d4 +#define QSERDES_V8_RX_VGA_CAL_CNTRL2 0x0d8 +#define QSERDES_V8_RX_GM_CAL 0x0dc +#define QSERDES_V8_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec +#define QSERDES_V8_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0 +#define QSERDES_V8_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4 +#define QSERDES_V8_RX_RX_IDAC_TSETTLE_LOW 0x0f8 +#define QSERDES_V8_RX_RX_IDAC_TSETTLE_HIGH 0x0fc +#define QSERDES_V8_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110 +#define QSERDES_V8_RX_SIGDET_ENABLES 0x118 +#define QSERDES_V8_RX_SIGDET_CNTRL 0x11c +#define QSERDES_V8_RX_SIGDET_DEGLITCH_CNTRL 0x124 +#define QSERDES_V8_RX_RX_MODE_00_LOW 0x15c +#define QSERDES_V8_RX_RX_MODE_00_HIGH 0x160 +#define QSERDES_V8_RX_RX_MODE_00_HIGH2 0x164 +#define QSERDES_V8_RX_RX_MODE_00_HIGH3 0x168 +#define QSERDES_V8_RX_RX_MODE_00_HIGH4 0x16c +#define QSERDES_V8_RX_RX_MODE_01_LOW 0x170 +#define QSERDES_V8_RX_RX_MODE_01_HIGH 0x174 +#define QSERDES_V8_RX_RX_MODE_01_HIGH2 0x178 +#define QSERDES_V8_RX_RX_MODE_01_HIGH3 0x17c +#define QSERDES_V8_RX_RX_MODE_01_HIGH4 0x180 +#define QSERDES_V8_RX_DFE_EN_TIMER 0x1a0 +#define QSERDES_V8_RX_DFE_CTLE_POST_CAL_OFFSET 0x1a4 +#define QSERDES_V8_RX_DCC_CTRL1 0x1a8 +#define QSERDES_V8_RX_VTH_CODE 0x1b0 +#define QSERDES_V8_RX_SIGDET_CAL_CTRL1 0x1e4 +#define QSERDES_V8_RX_SIGDET_CAL_TRIM 0x1f8 + +#endif diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c index b33e2e2b5014..9c69c77d10c8 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c @@ -1758,8 +1758,9 @@ static void qmp_ufs_init_registers(struct qmp_ufs *qmp, const struct qmp_phy_cfg qmp_ufs_init_all(qmp, &cfg->tbls_hs_b); } -static int qmp_ufs_com_init(struct qmp_ufs *qmp) +static int qmp_ufs_power_on(struct phy *phy) { + struct qmp_ufs *qmp = phy_get_drvdata(phy); const struct qmp_phy_cfg *cfg = qmp->cfg; void __iomem *pcs = qmp->pcs; int ret; @@ -1775,70 +1776,14 @@ static int qmp_ufs_com_init(struct qmp_ufs *qmp) goto err_disable_regulators; qphy_setbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], SW_PWRDN); - return 0; err_disable_regulators: regulator_bulk_disable(cfg->num_vregs, qmp->vregs); - return ret; } -static int qmp_ufs_com_exit(struct qmp_ufs *qmp) -{ - const struct qmp_phy_cfg *cfg = qmp->cfg; - - reset_control_assert(qmp->ufs_reset); - - clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks); - - regulator_bulk_disable(cfg->num_vregs, qmp->vregs); - - return 0; -} - -static int qmp_ufs_init(struct phy *phy) -{ - struct qmp_ufs *qmp = phy_get_drvdata(phy); - const struct qmp_phy_cfg *cfg = qmp->cfg; - int ret; - dev_vdbg(qmp->dev, "Initializing QMP phy\n"); - - if (cfg->no_pcs_sw_reset) { - /* - * Get UFS reset, which is delayed until now to avoid a - * circular dependency where UFS needs its PHY, but the PHY - * needs this UFS reset. - */ - if (!qmp->ufs_reset) { - qmp->ufs_reset = - devm_reset_control_get_exclusive(qmp->dev, - "ufsphy"); - - if (IS_ERR(qmp->ufs_reset)) { - ret = PTR_ERR(qmp->ufs_reset); - dev_err(qmp->dev, - "failed to get UFS reset: %d\n", - ret); - - qmp->ufs_reset = NULL; - return ret; - } - } - - ret = reset_control_assert(qmp->ufs_reset); - if (ret) - return ret; - } - - ret = qmp_ufs_com_init(qmp); - if (ret) - return ret; - - return 0; -} - -static int qmp_ufs_power_on(struct phy *phy) +static int qmp_ufs_phy_calibrate(struct phy *phy) { struct qmp_ufs *qmp = phy_get_drvdata(phy); const struct qmp_phy_cfg *cfg = qmp->cfg; @@ -1847,6 +1792,10 @@ static int qmp_ufs_power_on(struct phy *phy) unsigned int val; int ret; + ret = reset_control_assert(qmp->ufs_reset); + if (ret) + return ret; + qmp_ufs_init_registers(qmp, cfg); ret = reset_control_deassert(qmp->ufs_reset); @@ -1876,54 +1825,17 @@ static int qmp_ufs_power_off(struct phy *phy) struct qmp_ufs *qmp = phy_get_drvdata(phy); const struct qmp_phy_cfg *cfg = qmp->cfg; - /* PHY reset */ - if (!cfg->no_pcs_sw_reset) - qphy_setbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); - - /* stop SerDes */ - qphy_clrbits(qmp->pcs, cfg->regs[QPHY_START_CTRL], SERDES_START); - /* Put PHY into POWER DOWN state: active low */ qphy_clrbits(qmp->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], SW_PWRDN); - return 0; -} - -static int qmp_ufs_exit(struct phy *phy) -{ - struct qmp_ufs *qmp = phy_get_drvdata(phy); + clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks); - qmp_ufs_com_exit(qmp); + regulator_bulk_disable(cfg->num_vregs, qmp->vregs); return 0; } -static int qmp_ufs_enable(struct phy *phy) -{ - int ret; - - ret = qmp_ufs_init(phy); - if (ret) - return ret; - - ret = qmp_ufs_power_on(phy); - if (ret) - qmp_ufs_exit(phy); - - return ret; -} - -static int qmp_ufs_disable(struct phy *phy) -{ - int ret; - - ret = qmp_ufs_power_off(phy); - if (ret) - return ret; - return qmp_ufs_exit(phy); -} - static int qmp_ufs_set_mode(struct phy *phy, enum phy_mode mode, int submode) { struct qmp_ufs *qmp = phy_get_drvdata(phy); @@ -1940,9 +1852,40 @@ static int qmp_ufs_set_mode(struct phy *phy, enum phy_mode mode, int submode) return 0; } +static int qmp_ufs_phy_init(struct phy *phy) +{ + struct qmp_ufs *qmp = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qmp->cfg; + int ret; + + if (!cfg->no_pcs_sw_reset) + return 0; + + /* + * Get UFS reset, which is delayed until now to avoid a + * circular dependency where UFS needs its PHY, but the PHY + * needs this UFS reset. + */ + if (!qmp->ufs_reset) { + qmp->ufs_reset = + devm_reset_control_get_exclusive(qmp->dev, "ufsphy"); + + if (IS_ERR(qmp->ufs_reset)) { + ret = PTR_ERR(qmp->ufs_reset); + dev_err(qmp->dev, "failed to get PHY reset: %d\n", ret); + qmp->ufs_reset = NULL; + return ret; + } + } + + return 0; +} + static const struct phy_ops qcom_qmp_ufs_phy_ops = { - .power_on = qmp_ufs_enable, - .power_off = qmp_ufs_disable, + .init = qmp_ufs_phy_init, + .power_on = qmp_ufs_power_on, + .power_off = qmp_ufs_power_off, + .calibrate = qmp_ufs_phy_calibrate, .set_mode = qmp_ufs_set_mode, .owner = THIS_MODULE, }; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index d0f41e4aaa85..f58c82b2dd23 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -25,11 +25,15 @@ #include "phy-qcom-qmp-qserdes-txrx-v6.h" #include "phy-qcom-qmp-qserdes-txrx-v6_20.h" #include "phy-qcom-qmp-qserdes-txrx-v6_n4.h" +#include "phy-qcom-qmp-qserdes-ln-shrd-v5.h" #include "phy-qcom-qmp-qserdes-ln-shrd-v6.h" #include "phy-qcom-qmp-qserdes-com-v7.h" #include "phy-qcom-qmp-qserdes-txrx-v7.h" +#include "phy-qcom-qmp-qserdes-com-v8.h" +#include "phy-qcom-qmp-qserdes-txrx-v8.h" + #include "phy-qcom-qmp-qserdes-pll.h" #include "phy-qcom-qmp-pcs-v2.h" @@ -52,6 +56,8 @@ #include "phy-qcom-qmp-pcs-v7.h" +#include "phy-qcom-qmp-pcs-v8.h" + /* QPHY_SW_RESET bit */ #define SW_RESET BIT(0) /* QPHY_POWER_DOWN_CONTROL */ diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c index 49c37c53b38e..b5514a32ff8f 100644 --- a/drivers/phy/qualcomm/phy-qcom-qusb2.c +++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c @@ -1114,9 +1114,7 @@ static int qusb2_phy_probe(struct platform_device *pdev) phy_set_drvdata(generic_phy, qphy); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - if (!IS_ERR(phy_provider)) - dev_info(dev, "Registered Qcom-QUSB2 phy\n"); - else + if (IS_ERR(phy_provider)) pm_runtime_disable(dev); return PTR_ERR_OR_ZERO(phy_provider); diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c index bd44af36c67a..4e2dfd01adf2 100644 --- a/drivers/phy/rockchip/phy-rockchip-pcie.c +++ b/drivers/phy/rockchip/phy-rockchip-pcie.c @@ -30,9 +30,8 @@ #define PHY_CFG_ADDR_SHIFT 1 #define PHY_CFG_DATA_MASK 0xf #define PHY_CFG_ADDR_MASK 0x3f -#define PHY_CFG_RD_MASK 0x3ff #define PHY_CFG_WR_ENABLE 1 -#define PHY_CFG_WR_DISABLE 1 +#define PHY_CFG_WR_DISABLE 0 #define PHY_CFG_WR_SHIFT 0 #define PHY_CFG_WR_MASK 1 #define PHY_CFG_PLL_LOCK 0x10 @@ -160,6 +159,12 @@ static int rockchip_pcie_phy_power_on(struct phy *phy) guard(mutex)(&rk_phy->pcie_mutex); + regmap_write(rk_phy->reg_base, + rk_phy->phy_data->pcie_laneoff, + HIWORD_UPDATE(!PHY_LANE_IDLE_OFF, + PHY_LANE_IDLE_MASK, + PHY_LANE_IDLE_A_SHIFT + inst->index)); + if (rk_phy->pwr_cnt++) { return 0; } @@ -176,12 +181,6 @@ static int rockchip_pcie_phy_power_on(struct phy *phy) PHY_CFG_ADDR_MASK, PHY_CFG_ADDR_SHIFT)); - regmap_write(rk_phy->reg_base, - rk_phy->phy_data->pcie_laneoff, - HIWORD_UPDATE(!PHY_LANE_IDLE_OFF, - PHY_LANE_IDLE_MASK, - PHY_LANE_IDLE_A_SHIFT + inst->index)); - /* * No documented timeout value for phy operation below, * so we make it large enough here. And we use loop-break diff --git a/drivers/phy/samsung/phy-exynos-mipi-video.c b/drivers/phy/samsung/phy-exynos-mipi-video.c index f6756a609a9a..be925508ed97 100644 --- a/drivers/phy/samsung/phy-exynos-mipi-video.c +++ b/drivers/phy/samsung/phy-exynos-mipi-video.c @@ -213,6 +213,55 @@ static const struct mipi_phy_device_desc exynos5433_mipi_phy = { }, }; +static const struct mipi_phy_device_desc exynos7870_mipi_phy = { + .num_regmaps = 3, + .regmap_names = { + "samsung,pmu-syscon", + "samsung,disp-sysreg", + "samsung,cam0-sysreg" + }, + .num_phys = 4, + .phys = { + { + /* EXYNOS_MIPI_PHY_ID_CSIS0 */ + .coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM0, + .enable_val = EXYNOS4_PHY_ENABLE, + .enable_reg = EXYNOS7870_MIPI_PHY_CONTROL0, + .enable_map = EXYNOS_MIPI_REGMAP_PMU, + .resetn_val = BIT(0), + .resetn_reg = 0, + .resetn_map = EXYNOS_MIPI_REGMAP_CAM0, + }, { + /* EXYNOS_MIPI_PHY_ID_DSIM0 */ + .coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS0, + .enable_val = EXYNOS4_PHY_ENABLE, + .enable_reg = EXYNOS7870_MIPI_PHY_CONTROL0, + .enable_map = EXYNOS_MIPI_REGMAP_PMU, + .resetn_val = BIT(0), + .resetn_reg = 0, + .resetn_map = EXYNOS_MIPI_REGMAP_DISP, + }, { + /* EXYNOS_MIPI_PHY_ID_CSIS1 */ + .coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE, + .enable_val = EXYNOS4_PHY_ENABLE, + .enable_reg = EXYNOS7870_MIPI_PHY_CONTROL1, + .enable_map = EXYNOS_MIPI_REGMAP_PMU, + .resetn_val = BIT(1), + .resetn_reg = 0, + .resetn_map = EXYNOS_MIPI_REGMAP_CAM0, + }, { + /* EXYNOS_MIPI_PHY_ID_CSIS2 */ + .coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE, + .enable_val = EXYNOS4_PHY_ENABLE, + .enable_reg = EXYNOS7870_MIPI_PHY_CONTROL2, + .enable_map = EXYNOS_MIPI_REGMAP_PMU, + .resetn_val = BIT(2), + .resetn_reg = 0, + .resetn_map = EXYNOS_MIPI_REGMAP_CAM0, + }, + }, +}; + struct exynos_mipi_video_phy { struct regmap *regmaps[EXYNOS_MIPI_REGMAPS_NUM]; int num_phys; @@ -351,6 +400,9 @@ static const struct of_device_id exynos_mipi_video_phy_of_match[] = { }, { .compatible = "samsung,exynos5433-mipi-video-phy", .data = &exynos5433_mipi_phy, + }, { + .compatible = "samsung,exynos7870-mipi-video-phy", + .data = &exynos7870_mipi_phy, }, { /* sentinel */ }, }; diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c index 917a76d584f0..dd660ebe8045 100644 --- a/drivers/phy/samsung/phy-exynos5-usbdrd.c +++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c @@ -2025,6 +2025,35 @@ static const struct exynos5_usbdrd_phy_drvdata exynos850_usbdrd_phy = { .n_regulators = ARRAY_SIZE(exynos5_regulator_names), }; +static const struct exynos5_usbdrd_phy_tuning exynos990_tunes_utmi_postinit[] = { + PHY_TUNING_ENTRY_PHY(EXYNOS850_DRD_HSPPARACON, + (HSPPARACON_TXVREF | + HSPPARACON_TXPREEMPAMP | HSPPARACON_SQRX | + HSPPARACON_COMPDIS), + (FIELD_PREP_CONST(HSPPARACON_TXVREF, 7) | + FIELD_PREP_CONST(HSPPARACON_TXPREEMPAMP, 3) | + FIELD_PREP_CONST(HSPPARACON_SQRX, 5) | + FIELD_PREP_CONST(HSPPARACON_COMPDIS, 7))), + PHY_TUNING_ENTRY_LAST +}; + +static const struct exynos5_usbdrd_phy_tuning *exynos990_tunes[PTS_MAX] = { + [PTS_UTMI_POSTINIT] = exynos990_tunes_utmi_postinit, +}; + +static const struct exynos5_usbdrd_phy_drvdata exynos990_usbdrd_phy = { + .phy_cfg = phy_cfg_exynos850, + .phy_ops = &exynos850_usbdrd_phy_ops, + .phy_tunes = exynos990_tunes, + .pmu_offset_usbdrd0_phy = EXYNOS990_PHY_CTRL_USB20, + .clk_names = exynos5_clk_names, + .n_clks = ARRAY_SIZE(exynos5_clk_names), + .core_clk_names = exynos5_core_clk_names, + .n_core_clks = ARRAY_SIZE(exynos5_core_clk_names), + .regulator_names = exynos5_regulator_names, + .n_regulators = ARRAY_SIZE(exynos5_regulator_names), +}; + static const struct exynos5_usbdrd_phy_config phy_cfg_gs101[] = { { .id = EXYNOS5_DRDPHY_UTMI, @@ -2228,6 +2257,9 @@ static const struct of_device_id exynos5_usbdrd_phy_of_match[] = { }, { .compatible = "samsung,exynos850-usbdrd-phy", .data = &exynos850_usbdrd_phy + }, { + .compatible = "samsung,exynos990-usbdrd-phy", + .data = &exynos990_usbdrd_phy }, { }, }; diff --git a/drivers/phy/st/phy-stih407-usb.c b/drivers/phy/st/phy-stih407-usb.c index ebb1d0858aa3..7a3e4584895c 100644 --- a/drivers/phy/st/phy-stih407-usb.c +++ b/drivers/phy/st/phy-stih407-usb.c @@ -139,8 +139,6 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev) if (IS_ERR(phy_provider)) return PTR_ERR(phy_provider); - dev_info(dev, "STiH407 USB Generic picoPHY driver probed!"); - return 0; } diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c index b917cd413de7..27fe92f73f33 100644 --- a/drivers/phy/st/phy-stm32-usbphyc.c +++ b/drivers/phy/st/phy-stm32-usbphyc.c @@ -757,8 +757,8 @@ static int stm32_usbphyc_probe(struct platform_device *pdev) } version = readl_relaxed(usbphyc->base + STM32_USBPHYC_VERSION); - dev_info(dev, "registered rev:%lu.%lu\n", - FIELD_GET(MAJREV, version), FIELD_GET(MINREV, version)); + dev_dbg(dev, "registered rev: %lu.%lu\n", + FIELD_GET(MAJREV, version), FIELD_GET(MINREV, version)); return 0; diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c index 6f12b38cd894..a26aec3ab29e 100644 --- a/drivers/phy/ti/phy-twl4030-usb.c +++ b/drivers/phy/ti/phy-twl4030-usb.c @@ -784,7 +784,6 @@ static int twl4030_usb_probe(struct platform_device *pdev) pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(twl->dev); - dev_info(&pdev->dev, "Initialized TWL4030 USB module\n"); return 0; } diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 83962a114dc9..48a0d3a69ed0 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -214,7 +214,7 @@ config QCOM_Q6V5_MSS handled by QCOM_Q6V5_PAS driver. config QCOM_Q6V5_PAS - tristate "Qualcomm Hexagon v5 Peripheral Authentication Service support" + tristate "Qualcomm Peripheral Authentication Service support" depends on OF && ARCH_QCOM depends on QCOM_SMEM depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n @@ -229,11 +229,10 @@ config QCOM_Q6V5_PAS select QCOM_RPROC_COMMON select QCOM_SCM help - Say y here to support the TrustZone based Peripheral Image Loader - for the Qualcomm Hexagon v5 based remote processors. This is commonly - used to control subsystems such as ADSP (Audio DSP), - CDSP (Compute DSP), MPSS (Modem Peripheral SubSystem), and - SLPI (Sensor Low Power Island). + Say y here to support the TrustZone based Peripheral Image Loader for + the Qualcomm remote processors. This is commonly used to control + subsystems such as ADSP (Audio DSP), CDSP (Compute DSP), MPSS (Modem + Peripheral SubSystem), and SLPI (Sensor Low Power Island). config QCOM_Q6V5_WCSS tristate "Qualcomm Hexagon based WCSS Peripheral Image Loader" diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c index 9c7182b3b038..9c9e9c3cf378 100644 --- a/drivers/remoteproc/omap_remoteproc.c +++ b/drivers/remoteproc/omap_remoteproc.c @@ -1211,7 +1211,7 @@ static int omap_rproc_of_get_internal_memories(struct platform_device *pdev, oproc->mem[i].dev_addr = data->mems[i].dev_addr; oproc->mem[i].size = resource_size(res); - dev_dbg(dev, "memory %8s: bus addr %pa size 0x%x va %pK da 0x%x\n", + dev_dbg(dev, "memory %8s: bus addr %pa size 0x%x va %p da 0x%x\n", data->mems[i].name, &oproc->mem[i].bus_addr, oproc->mem[i].size, oproc->mem[i].cpu_addr, oproc->mem[i].dev_addr); diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c index 4a4eb9c0b133..842e4b6cc5f9 100644 --- a/drivers/remoteproc/pru_rproc.c +++ b/drivers/remoteproc/pru_rproc.c @@ -1055,7 +1055,7 @@ static int pru_rproc_probe(struct platform_device *pdev) pru->mem_regions[i].pa = res->start; pru->mem_regions[i].size = resource_size(res); - dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n", + dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %p\n", mem_names[i], &pru->mem_regions[i].pa, pru->mem_regions[i].size, pru->mem_regions[i].va); } diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index b306f223127c..02e29171cbbe 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Qualcomm ADSP/SLPI Peripheral Image Loader for MSM8974 and MSM8996 + * Qualcomm Peripheral Authentication Service remoteproc driver * * Copyright (C) 2016 Linaro Ltd * Copyright (C) 2014 Sony Mobile Communications AB @@ -31,11 +31,11 @@ #include "qcom_q6v5.h" #include "remoteproc_internal.h" -#define ADSP_DECRYPT_SHUTDOWN_DELAY_MS 100 +#define QCOM_PAS_DECRYPT_SHUTDOWN_DELAY_MS 100 #define MAX_ASSIGN_COUNT 3 -struct adsp_data { +struct qcom_pas_data { int crash_reason_smem; const char *firmware_name; const char *dtb_firmware_name; @@ -60,7 +60,7 @@ struct adsp_data { int region_assign_vmid; }; -struct qcom_adsp { +struct qcom_pas { struct device *dev; struct rproc *rproc; @@ -119,36 +119,37 @@ struct qcom_adsp { struct qcom_scm_pas_metadata dtb_pas_metadata; }; -static void adsp_segment_dump(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest, size_t offset, size_t size) +static void qcom_pas_segment_dump(struct rproc *rproc, + struct rproc_dump_segment *segment, + void *dest, size_t offset, size_t size) { - struct qcom_adsp *adsp = rproc->priv; + struct qcom_pas *pas = rproc->priv; int total_offset; - total_offset = segment->da + segment->offset + offset - adsp->mem_phys; - if (total_offset < 0 || total_offset + size > adsp->mem_size) { - dev_err(adsp->dev, + total_offset = segment->da + segment->offset + offset - pas->mem_phys; + if (total_offset < 0 || total_offset + size > pas->mem_size) { + dev_err(pas->dev, "invalid copy request for segment %pad with offset %zu and size %zu)\n", &segment->da, offset, size); memset(dest, 0xff, size); return; } - memcpy_fromio(dest, adsp->mem_region + total_offset, size); + memcpy_fromio(dest, pas->mem_region + total_offset, size); } -static void adsp_minidump(struct rproc *rproc) +static void qcom_pas_minidump(struct rproc *rproc) { - struct qcom_adsp *adsp = rproc->priv; + struct qcom_pas *pas = rproc->priv; if (rproc->dump_conf == RPROC_COREDUMP_DISABLED) return; - qcom_minidump(rproc, adsp->minidump_id, adsp_segment_dump); + qcom_minidump(rproc, pas->minidump_id, qcom_pas_segment_dump); } -static int adsp_pds_enable(struct qcom_adsp *adsp, struct device **pds, - size_t pd_count) +static int qcom_pas_pds_enable(struct qcom_pas *pas, struct device **pds, + size_t pd_count) { int ret; int i; @@ -174,8 +175,8 @@ unroll_pd_votes: return ret; }; -static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds, - size_t pd_count) +static void qcom_pas_pds_disable(struct qcom_pas *pas, struct device **pds, + size_t pd_count) { int i; @@ -185,65 +186,65 @@ static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds, } } -static int adsp_shutdown_poll_decrypt(struct qcom_adsp *adsp) +static int qcom_pas_shutdown_poll_decrypt(struct qcom_pas *pas) { unsigned int retry_num = 50; int ret; do { - msleep(ADSP_DECRYPT_SHUTDOWN_DELAY_MS); - ret = qcom_scm_pas_shutdown(adsp->pas_id); + msleep(QCOM_PAS_DECRYPT_SHUTDOWN_DELAY_MS); + ret = qcom_scm_pas_shutdown(pas->pas_id); } while (ret == -EINVAL && --retry_num); return ret; } -static int adsp_unprepare(struct rproc *rproc) +static int qcom_pas_unprepare(struct rproc *rproc) { - struct qcom_adsp *adsp = rproc->priv; + struct qcom_pas *pas = rproc->priv; /* - * adsp_load() did pass pas_metadata to the SCM driver for storing + * qcom_pas_load() did pass pas_metadata to the SCM driver for storing * metadata context. It might have been released already if * auth_and_reset() was successful, but in other cases clean it up * here. */ - qcom_scm_pas_metadata_release(&adsp->pas_metadata); - if (adsp->dtb_pas_id) - qcom_scm_pas_metadata_release(&adsp->dtb_pas_metadata); + qcom_scm_pas_metadata_release(&pas->pas_metadata); + if (pas->dtb_pas_id) + qcom_scm_pas_metadata_release(&pas->dtb_pas_metadata); return 0; } -static int adsp_load(struct rproc *rproc, const struct firmware *fw) +static int qcom_pas_load(struct rproc *rproc, const struct firmware *fw) { - struct qcom_adsp *adsp = rproc->priv; + struct qcom_pas *pas = rproc->priv; int ret; - /* Store firmware handle to be used in adsp_start() */ - adsp->firmware = fw; + /* Store firmware handle to be used in qcom_pas_start() */ + pas->firmware = fw; - if (adsp->lite_pas_id) - ret = qcom_scm_pas_shutdown(adsp->lite_pas_id); + if (pas->lite_pas_id) + ret = qcom_scm_pas_shutdown(pas->lite_pas_id); - if (adsp->dtb_pas_id) { - ret = request_firmware(&adsp->dtb_firmware, adsp->dtb_firmware_name, adsp->dev); + if (pas->dtb_pas_id) { + ret = request_firmware(&pas->dtb_firmware, pas->dtb_firmware_name, pas->dev); if (ret) { - dev_err(adsp->dev, "request_firmware failed for %s: %d\n", - adsp->dtb_firmware_name, ret); + dev_err(pas->dev, "request_firmware failed for %s: %d\n", + pas->dtb_firmware_name, ret); return ret; } - ret = qcom_mdt_pas_init(adsp->dev, adsp->dtb_firmware, adsp->dtb_firmware_name, - adsp->dtb_pas_id, adsp->dtb_mem_phys, - &adsp->dtb_pas_metadata); + ret = qcom_mdt_pas_init(pas->dev, pas->dtb_firmware, pas->dtb_firmware_name, + pas->dtb_pas_id, pas->dtb_mem_phys, + &pas->dtb_pas_metadata); if (ret) goto release_dtb_firmware; - ret = qcom_mdt_load_no_init(adsp->dev, adsp->dtb_firmware, adsp->dtb_firmware_name, - adsp->dtb_pas_id, adsp->dtb_mem_region, - adsp->dtb_mem_phys, adsp->dtb_mem_size, - &adsp->dtb_mem_reloc); + ret = qcom_mdt_load_no_init(pas->dev, pas->dtb_firmware, pas->dtb_firmware_name, + pas->dtb_pas_id, pas->dtb_mem_region, + pas->dtb_mem_phys, pas->dtb_mem_size, + &pas->dtb_mem_reloc); if (ret) goto release_dtb_metadata; } @@ -251,248 +252,246 @@ static int adsp_load(struct rproc *rproc, const struct firmware *fw) return 0; release_dtb_metadata: - qcom_scm_pas_metadata_release(&adsp->dtb_pas_metadata); + qcom_scm_pas_metadata_release(&pas->dtb_pas_metadata); release_dtb_firmware: - release_firmware(adsp->dtb_firmware); + release_firmware(pas->dtb_firmware); return ret; } -static int adsp_start(struct rproc *rproc) +static int qcom_pas_start(struct rproc *rproc) { - struct qcom_adsp *adsp = rproc->priv; + struct qcom_pas *pas = rproc->priv; int ret; - ret = qcom_q6v5_prepare(&adsp->q6v5); + ret = qcom_q6v5_prepare(&pas->q6v5); if (ret) return ret; - ret = adsp_pds_enable(adsp, adsp->proxy_pds, adsp->proxy_pd_count); + ret = qcom_pas_pds_enable(pas, pas->proxy_pds, pas->proxy_pd_count); if (ret < 0) goto disable_irqs; - ret = clk_prepare_enable(adsp->xo); + ret = clk_prepare_enable(pas->xo); if (ret) goto disable_proxy_pds; - ret = clk_prepare_enable(adsp->aggre2_clk); + ret = clk_prepare_enable(pas->aggre2_clk); if (ret) goto disable_xo_clk; - if (adsp->cx_supply) { - ret = regulator_enable(adsp->cx_supply); + if (pas->cx_supply) { + ret = regulator_enable(pas->cx_supply); if (ret) goto disable_aggre2_clk; } - if (adsp->px_supply) { - ret = regulator_enable(adsp->px_supply); + if (pas->px_supply) { + ret = regulator_enable(pas->px_supply); if (ret) goto disable_cx_supply; } - if (adsp->dtb_pas_id) { - ret = qcom_scm_pas_auth_and_reset(adsp->dtb_pas_id); + if (pas->dtb_pas_id) { + ret = qcom_scm_pas_auth_and_reset(pas->dtb_pas_id); if (ret) { - dev_err(adsp->dev, + dev_err(pas->dev, "failed to authenticate dtb image and release reset\n"); goto disable_px_supply; } } - ret = qcom_mdt_pas_init(adsp->dev, adsp->firmware, rproc->firmware, adsp->pas_id, - adsp->mem_phys, &adsp->pas_metadata); + ret = qcom_mdt_pas_init(pas->dev, pas->firmware, rproc->firmware, pas->pas_id, + pas->mem_phys, &pas->pas_metadata); if (ret) goto disable_px_supply; - ret = qcom_mdt_load_no_init(adsp->dev, adsp->firmware, rproc->firmware, adsp->pas_id, - adsp->mem_region, adsp->mem_phys, adsp->mem_size, - &adsp->mem_reloc); + ret = qcom_mdt_load_no_init(pas->dev, pas->firmware, rproc->firmware, pas->pas_id, + pas->mem_region, pas->mem_phys, pas->mem_size, + &pas->mem_reloc); if (ret) goto release_pas_metadata; - qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size); + qcom_pil_info_store(pas->info_name, pas->mem_phys, pas->mem_size); - ret = qcom_scm_pas_auth_and_reset(adsp->pas_id); + ret = qcom_scm_pas_auth_and_reset(pas->pas_id); if (ret) { - dev_err(adsp->dev, + dev_err(pas->dev, "failed to authenticate image and release reset\n"); goto release_pas_metadata; } - ret = qcom_q6v5_wait_for_start(&adsp->q6v5, msecs_to_jiffies(5000)); + ret = qcom_q6v5_wait_for_start(&pas->q6v5, msecs_to_jiffies(5000)); if (ret == -ETIMEDOUT) { - dev_err(adsp->dev, "start timed out\n"); - qcom_scm_pas_shutdown(adsp->pas_id); + dev_err(pas->dev, "start timed out\n"); + qcom_scm_pas_shutdown(pas->pas_id); goto release_pas_metadata; } - qcom_scm_pas_metadata_release(&adsp->pas_metadata); - if (adsp->dtb_pas_id) - qcom_scm_pas_metadata_release(&adsp->dtb_pas_metadata); + qcom_scm_pas_metadata_release(&pas->pas_metadata); + if (pas->dtb_pas_id) + qcom_scm_pas_metadata_release(&pas->dtb_pas_metadata); - /* Remove pointer to the loaded firmware, only valid in adsp_load() & adsp_start() */ - adsp->firmware = NULL; + /* firmware is used to pass reference from qcom_pas_start(), drop it now */ + pas->firmware = NULL; return 0; release_pas_metadata: - qcom_scm_pas_metadata_release(&adsp->pas_metadata); - if (adsp->dtb_pas_id) - qcom_scm_pas_metadata_release(&adsp->dtb_pas_metadata); + qcom_scm_pas_metadata_release(&pas->pas_metadata); + if (pas->dtb_pas_id) + qcom_scm_pas_metadata_release(&pas->dtb_pas_metadata); disable_px_supply: - if (adsp->px_supply) - regulator_disable(adsp->px_supply); + if (pas->px_supply) + regulator_disable(pas->px_supply); disable_cx_supply: - if (adsp->cx_supply) - regulator_disable(adsp->cx_supply); + if (pas->cx_supply) + regulator_disable(pas->cx_supply); disable_aggre2_clk: - clk_disable_unprepare(adsp->aggre2_clk); + clk_disable_unprepare(pas->aggre2_clk); disable_xo_clk: - clk_disable_unprepare(adsp->xo); + clk_disable_unprepare(pas->xo); disable_proxy_pds: - adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count); + qcom_pas_pds_disable(pas, pas->proxy_pds, pas->proxy_pd_count); disable_irqs: - qcom_q6v5_unprepare(&adsp->q6v5); + qcom_q6v5_unprepare(&pas->q6v5); - /* Remove pointer to the loaded firmware, only valid in adsp_load() & adsp_start() */ - adsp->firmware = NULL; + /* firmware is used to pass reference from qcom_pas_start(), drop it now */ + pas->firmware = NULL; return ret; } static void qcom_pas_handover(struct qcom_q6v5 *q6v5) { - struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5); - - if (adsp->px_supply) - regulator_disable(adsp->px_supply); - if (adsp->cx_supply) - regulator_disable(adsp->cx_supply); - clk_disable_unprepare(adsp->aggre2_clk); - clk_disable_unprepare(adsp->xo); - adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count); + struct qcom_pas *pas = container_of(q6v5, struct qcom_pas, q6v5); + + if (pas->px_supply) + regulator_disable(pas->px_supply); + if (pas->cx_supply) + regulator_disable(pas->cx_supply); + clk_disable_unprepare(pas->aggre2_clk); + clk_disable_unprepare(pas->xo); + qcom_pas_pds_disable(pas, pas->proxy_pds, pas->proxy_pd_count); } -static int adsp_stop(struct rproc *rproc) +static int qcom_pas_stop(struct rproc *rproc) { - struct qcom_adsp *adsp = rproc->priv; + struct qcom_pas *pas = rproc->priv; int handover; int ret; - ret = qcom_q6v5_request_stop(&adsp->q6v5, adsp->sysmon); + ret = qcom_q6v5_request_stop(&pas->q6v5, pas->sysmon); if (ret == -ETIMEDOUT) - dev_err(adsp->dev, "timed out on wait\n"); + dev_err(pas->dev, "timed out on wait\n"); - ret = qcom_scm_pas_shutdown(adsp->pas_id); - if (ret && adsp->decrypt_shutdown) - ret = adsp_shutdown_poll_decrypt(adsp); + ret = qcom_scm_pas_shutdown(pas->pas_id); + if (ret && pas->decrypt_shutdown) + ret = qcom_pas_shutdown_poll_decrypt(pas); if (ret) - dev_err(adsp->dev, "failed to shutdown: %d\n", ret); + dev_err(pas->dev, "failed to shutdown: %d\n", ret); - if (adsp->dtb_pas_id) { - ret = qcom_scm_pas_shutdown(adsp->dtb_pas_id); + if (pas->dtb_pas_id) { + ret = qcom_scm_pas_shutdown(pas->dtb_pas_id); if (ret) - dev_err(adsp->dev, "failed to shutdown dtb: %d\n", ret); + dev_err(pas->dev, "failed to shutdown dtb: %d\n", ret); } - handover = qcom_q6v5_unprepare(&adsp->q6v5); + handover = qcom_q6v5_unprepare(&pas->q6v5); if (handover) - qcom_pas_handover(&adsp->q6v5); + qcom_pas_handover(&pas->q6v5); - if (adsp->smem_host_id) - ret = qcom_smem_bust_hwspin_lock_by_host(adsp->smem_host_id); + if (pas->smem_host_id) + ret = qcom_smem_bust_hwspin_lock_by_host(pas->smem_host_id); return ret; } -static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) +static void *qcom_pas_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) { - struct qcom_adsp *adsp = rproc->priv; + struct qcom_pas *pas = rproc->priv; int offset; - offset = da - adsp->mem_reloc; - if (offset < 0 || offset + len > adsp->mem_size) + offset = da - pas->mem_reloc; + if (offset < 0 || offset + len > pas->mem_size) return NULL; if (is_iomem) *is_iomem = true; - return adsp->mem_region + offset; + return pas->mem_region + offset; } -static unsigned long adsp_panic(struct rproc *rproc) +static unsigned long qcom_pas_panic(struct rproc *rproc) { - struct qcom_adsp *adsp = rproc->priv; + struct qcom_pas *pas = rproc->priv; - return qcom_q6v5_panic(&adsp->q6v5); + return qcom_q6v5_panic(&pas->q6v5); } -static const struct rproc_ops adsp_ops = { - .unprepare = adsp_unprepare, - .start = adsp_start, - .stop = adsp_stop, - .da_to_va = adsp_da_to_va, +static const struct rproc_ops qcom_pas_ops = { + .unprepare = qcom_pas_unprepare, + .start = qcom_pas_start, + .stop = qcom_pas_stop, + .da_to_va = qcom_pas_da_to_va, .parse_fw = qcom_register_dump_segments, - .load = adsp_load, - .panic = adsp_panic, + .load = qcom_pas_load, + .panic = qcom_pas_panic, }; -static const struct rproc_ops adsp_minidump_ops = { - .unprepare = adsp_unprepare, - .start = adsp_start, - .stop = adsp_stop, - .da_to_va = adsp_da_to_va, +static const struct rproc_ops qcom_pas_minidump_ops = { + .unprepare = qcom_pas_unprepare, + .start = qcom_pas_start, + .stop = qcom_pas_stop, + .da_to_va = qcom_pas_da_to_va, .parse_fw = qcom_register_dump_segments, - .load = adsp_load, - .panic = adsp_panic, - .coredump = adsp_minidump, + .load = qcom_pas_load, + .panic = qcom_pas_panic, + .coredump = qcom_pas_minidump, }; -static int adsp_init_clock(struct qcom_adsp *adsp) +static int qcom_pas_init_clock(struct qcom_pas *pas) { - adsp->xo = devm_clk_get(adsp->dev, "xo"); - if (IS_ERR(adsp->xo)) - return dev_err_probe(adsp->dev, PTR_ERR(adsp->xo), + pas->xo = devm_clk_get(pas->dev, "xo"); + if (IS_ERR(pas->xo)) + return dev_err_probe(pas->dev, PTR_ERR(pas->xo), "failed to get xo clock"); - - adsp->aggre2_clk = devm_clk_get_optional(adsp->dev, "aggre2"); - if (IS_ERR(adsp->aggre2_clk)) - return dev_err_probe(adsp->dev, PTR_ERR(adsp->aggre2_clk), + pas->aggre2_clk = devm_clk_get_optional(pas->dev, "aggre2"); + if (IS_ERR(pas->aggre2_clk)) + return dev_err_probe(pas->dev, PTR_ERR(pas->aggre2_clk), "failed to get aggre2 clock"); return 0; } -static int adsp_init_regulator(struct qcom_adsp *adsp) +static int qcom_pas_init_regulator(struct qcom_pas *pas) { - adsp->cx_supply = devm_regulator_get_optional(adsp->dev, "cx"); - if (IS_ERR(adsp->cx_supply)) { - if (PTR_ERR(adsp->cx_supply) == -ENODEV) - adsp->cx_supply = NULL; + pas->cx_supply = devm_regulator_get_optional(pas->dev, "cx"); + if (IS_ERR(pas->cx_supply)) { + if (PTR_ERR(pas->cx_supply) == -ENODEV) + pas->cx_supply = NULL; else - return PTR_ERR(adsp->cx_supply); + return PTR_ERR(pas->cx_supply); } - if (adsp->cx_supply) - regulator_set_load(adsp->cx_supply, 100000); + if (pas->cx_supply) + regulator_set_load(pas->cx_supply, 100000); - adsp->px_supply = devm_regulator_get_optional(adsp->dev, "px"); - if (IS_ERR(adsp->px_supply)) { - if (PTR_ERR(adsp->px_supply) == -ENODEV) - adsp->px_supply = NULL; + pas->px_supply = devm_regulator_get_optional(pas->dev, "px"); + if (IS_ERR(pas->px_supply)) { + if (PTR_ERR(pas->px_supply) == -ENODEV) + pas->px_supply = NULL; else - return PTR_ERR(adsp->px_supply); + return PTR_ERR(pas->px_supply); } return 0; } -static int adsp_pds_attach(struct device *dev, struct device **devs, - char **pd_names) +static int qcom_pas_pds_attach(struct device *dev, struct device **devs, char **pd_names) { size_t num_pds = 0; int ret; @@ -528,10 +527,9 @@ unroll_attach: return ret; }; -static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds, - size_t pd_count) +static void qcom_pas_pds_detach(struct qcom_pas *pas, struct device **pds, size_t pd_count) { - struct device *dev = adsp->dev; + struct device *dev = pas->dev; int i; /* Handle single power domain */ @@ -544,62 +542,62 @@ static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds, dev_pm_domain_detach(pds[i], false); } -static int adsp_alloc_memory_region(struct qcom_adsp *adsp) +static int qcom_pas_alloc_memory_region(struct qcom_pas *pas) { struct reserved_mem *rmem; struct device_node *node; - node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0); + node = of_parse_phandle(pas->dev->of_node, "memory-region", 0); if (!node) { - dev_err(adsp->dev, "no memory-region specified\n"); + dev_err(pas->dev, "no memory-region specified\n"); return -EINVAL; } rmem = of_reserved_mem_lookup(node); of_node_put(node); if (!rmem) { - dev_err(adsp->dev, "unable to resolve memory-region\n"); + dev_err(pas->dev, "unable to resolve memory-region\n"); return -EINVAL; } - adsp->mem_phys = adsp->mem_reloc = rmem->base; - adsp->mem_size = rmem->size; - adsp->mem_region = devm_ioremap_wc(adsp->dev, adsp->mem_phys, adsp->mem_size); - if (!adsp->mem_region) { - dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n", - &rmem->base, adsp->mem_size); + pas->mem_phys = pas->mem_reloc = rmem->base; + pas->mem_size = rmem->size; + pas->mem_region = devm_ioremap_wc(pas->dev, pas->mem_phys, pas->mem_size); + if (!pas->mem_region) { + dev_err(pas->dev, "unable to map memory region: %pa+%zx\n", + &rmem->base, pas->mem_size); return -EBUSY; } - if (!adsp->dtb_pas_id) + if (!pas->dtb_pas_id) return 0; - node = of_parse_phandle(adsp->dev->of_node, "memory-region", 1); + node = of_parse_phandle(pas->dev->of_node, "memory-region", 1); if (!node) { - dev_err(adsp->dev, "no dtb memory-region specified\n"); + dev_err(pas->dev, "no dtb memory-region specified\n"); return -EINVAL; } rmem = of_reserved_mem_lookup(node); of_node_put(node); if (!rmem) { - dev_err(adsp->dev, "unable to resolve dtb memory-region\n"); + dev_err(pas->dev, "unable to resolve dtb memory-region\n"); return -EINVAL; } - adsp->dtb_mem_phys = adsp->dtb_mem_reloc = rmem->base; - adsp->dtb_mem_size = rmem->size; - adsp->dtb_mem_region = devm_ioremap_wc(adsp->dev, adsp->dtb_mem_phys, adsp->dtb_mem_size); - if (!adsp->dtb_mem_region) { - dev_err(adsp->dev, "unable to map dtb memory region: %pa+%zx\n", - &rmem->base, adsp->dtb_mem_size); + pas->dtb_mem_phys = pas->dtb_mem_reloc = rmem->base; + pas->dtb_mem_size = rmem->size; + pas->dtb_mem_region = devm_ioremap_wc(pas->dev, pas->dtb_mem_phys, pas->dtb_mem_size); + if (!pas->dtb_mem_region) { + dev_err(pas->dev, "unable to map dtb memory region: %pa+%zx\n", + &rmem->base, pas->dtb_mem_size); return -EBUSY; } return 0; } -static int adsp_assign_memory_region(struct qcom_adsp *adsp) +static int qcom_pas_assign_memory_region(struct qcom_pas *pas) { struct qcom_scm_vmperm perm[MAX_ASSIGN_COUNT]; struct device_node *node; @@ -607,45 +605,45 @@ static int adsp_assign_memory_region(struct qcom_adsp *adsp) int offset; int ret; - if (!adsp->region_assign_idx) + if (!pas->region_assign_idx) return 0; - for (offset = 0; offset < adsp->region_assign_count; ++offset) { + for (offset = 0; offset < pas->region_assign_count; ++offset) { struct reserved_mem *rmem = NULL; - node = of_parse_phandle(adsp->dev->of_node, "memory-region", - adsp->region_assign_idx + offset); + node = of_parse_phandle(pas->dev->of_node, "memory-region", + pas->region_assign_idx + offset); if (node) rmem = of_reserved_mem_lookup(node); of_node_put(node); if (!rmem) { - dev_err(adsp->dev, "unable to resolve shareable memory-region index %d\n", + dev_err(pas->dev, "unable to resolve shareable memory-region index %d\n", offset); return -EINVAL; } - if (adsp->region_assign_shared) { + if (pas->region_assign_shared) { perm[0].vmid = QCOM_SCM_VMID_HLOS; perm[0].perm = QCOM_SCM_PERM_RW; - perm[1].vmid = adsp->region_assign_vmid; + perm[1].vmid = pas->region_assign_vmid; perm[1].perm = QCOM_SCM_PERM_RW; perm_size = 2; } else { - perm[0].vmid = adsp->region_assign_vmid; + perm[0].vmid = pas->region_assign_vmid; perm[0].perm = QCOM_SCM_PERM_RW; perm_size = 1; } - adsp->region_assign_phys[offset] = rmem->base; - adsp->region_assign_size[offset] = rmem->size; - adsp->region_assign_owners[offset] = BIT(QCOM_SCM_VMID_HLOS); + pas->region_assign_phys[offset] = rmem->base; + pas->region_assign_size[offset] = rmem->size; + pas->region_assign_owners[offset] = BIT(QCOM_SCM_VMID_HLOS); - ret = qcom_scm_assign_mem(adsp->region_assign_phys[offset], - adsp->region_assign_size[offset], - &adsp->region_assign_owners[offset], + ret = qcom_scm_assign_mem(pas->region_assign_phys[offset], + pas->region_assign_size[offset], + &pas->region_assign_owners[offset], perm, perm_size); if (ret < 0) { - dev_err(adsp->dev, "assign memory %d failed\n", offset); + dev_err(pas->dev, "assign memory %d failed\n", offset); return ret; } } @@ -653,35 +651,35 @@ static int adsp_assign_memory_region(struct qcom_adsp *adsp) return 0; } -static void adsp_unassign_memory_region(struct qcom_adsp *adsp) +static void qcom_pas_unassign_memory_region(struct qcom_pas *pas) { struct qcom_scm_vmperm perm; int offset; int ret; - if (!adsp->region_assign_idx || adsp->region_assign_shared) + if (!pas->region_assign_idx || pas->region_assign_shared) return; - for (offset = 0; offset < adsp->region_assign_count; ++offset) { + for (offset = 0; offset < pas->region_assign_count; ++offset) { perm.vmid = QCOM_SCM_VMID_HLOS; perm.perm = QCOM_SCM_PERM_RW; - ret = qcom_scm_assign_mem(adsp->region_assign_phys[offset], - adsp->region_assign_size[offset], - &adsp->region_assign_owners[offset], + ret = qcom_scm_assign_mem(pas->region_assign_phys[offset], + pas->region_assign_size[offset], + &pas->region_assign_owners[offset], &perm, 1); if (ret < 0) - dev_err(adsp->dev, "unassign memory %d failed\n", offset); + dev_err(pas->dev, "unassign memory %d failed\n", offset); } } -static int adsp_probe(struct platform_device *pdev) +static int qcom_pas_probe(struct platform_device *pdev) { - const struct adsp_data *desc; - struct qcom_adsp *adsp; + const struct qcom_pas_data *desc; + struct qcom_pas *pas; struct rproc *rproc; const char *fw_name, *dtb_fw_name = NULL; - const struct rproc_ops *ops = &adsp_ops; + const struct rproc_ops *ops = &qcom_pas_ops; int ret; desc = of_device_get_match_data(&pdev->dev); @@ -706,9 +704,9 @@ static int adsp_probe(struct platform_device *pdev) } if (desc->minidump_id) - ops = &adsp_minidump_ops; + ops = &qcom_pas_minidump_ops; - rproc = devm_rproc_alloc(&pdev->dev, desc->sysmon_name, ops, fw_name, sizeof(*adsp)); + rproc = devm_rproc_alloc(&pdev->dev, desc->sysmon_name, ops, fw_name, sizeof(*pas)); if (!rproc) { dev_err(&pdev->dev, "unable to allocate remoteproc\n"); @@ -718,68 +716,65 @@ static int adsp_probe(struct platform_device *pdev) rproc->auto_boot = desc->auto_boot; rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); - adsp = rproc->priv; - adsp->dev = &pdev->dev; - adsp->rproc = rproc; - adsp->minidump_id = desc->minidump_id; - adsp->pas_id = desc->pas_id; - adsp->lite_pas_id = desc->lite_pas_id; - adsp->info_name = desc->sysmon_name; - adsp->smem_host_id = desc->smem_host_id; - adsp->decrypt_shutdown = desc->decrypt_shutdown; - adsp->region_assign_idx = desc->region_assign_idx; - adsp->region_assign_count = min_t(int, MAX_ASSIGN_COUNT, desc->region_assign_count); - adsp->region_assign_vmid = desc->region_assign_vmid; - adsp->region_assign_shared = desc->region_assign_shared; + pas = rproc->priv; + pas->dev = &pdev->dev; + pas->rproc = rproc; + pas->minidump_id = desc->minidump_id; + pas->pas_id = desc->pas_id; + pas->lite_pas_id = desc->lite_pas_id; + pas->info_name = desc->sysmon_name; + pas->smem_host_id = desc->smem_host_id; + pas->decrypt_shutdown = desc->decrypt_shutdown; + pas->region_assign_idx = desc->region_assign_idx; + pas->region_assign_count = min_t(int, MAX_ASSIGN_COUNT, desc->region_assign_count); + pas->region_assign_vmid = desc->region_assign_vmid; + pas->region_assign_shared = desc->region_assign_shared; if (dtb_fw_name) { - adsp->dtb_firmware_name = dtb_fw_name; - adsp->dtb_pas_id = desc->dtb_pas_id; + pas->dtb_firmware_name = dtb_fw_name; + pas->dtb_pas_id = desc->dtb_pas_id; } - platform_set_drvdata(pdev, adsp); + platform_set_drvdata(pdev, pas); - ret = device_init_wakeup(adsp->dev, true); + ret = device_init_wakeup(pas->dev, true); if (ret) goto free_rproc; - ret = adsp_alloc_memory_region(adsp); + ret = qcom_pas_alloc_memory_region(pas); if (ret) goto free_rproc; - ret = adsp_assign_memory_region(adsp); + ret = qcom_pas_assign_memory_region(pas); if (ret) goto free_rproc; - ret = adsp_init_clock(adsp); + ret = qcom_pas_init_clock(pas); if (ret) goto unassign_mem; - ret = adsp_init_regulator(adsp); + ret = qcom_pas_init_regulator(pas); if (ret) goto unassign_mem; - ret = adsp_pds_attach(&pdev->dev, adsp->proxy_pds, - desc->proxy_pd_names); + ret = qcom_pas_pds_attach(&pdev->dev, pas->proxy_pds, desc->proxy_pd_names); if (ret < 0) goto unassign_mem; - adsp->proxy_pd_count = ret; + pas->proxy_pd_count = ret; - ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem, desc->load_state, - qcom_pas_handover); + ret = qcom_q6v5_init(&pas->q6v5, pdev, rproc, desc->crash_reason_smem, + desc->load_state, qcom_pas_handover); if (ret) goto detach_proxy_pds; - qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name); - qcom_add_smd_subdev(rproc, &adsp->smd_subdev); - qcom_add_pdm_subdev(rproc, &adsp->pdm_subdev); - adsp->sysmon = qcom_add_sysmon_subdev(rproc, - desc->sysmon_name, - desc->ssctl_id); - if (IS_ERR(adsp->sysmon)) { - ret = PTR_ERR(adsp->sysmon); + qcom_add_glink_subdev(rproc, &pas->glink_subdev, desc->ssr_name); + qcom_add_smd_subdev(rproc, &pas->smd_subdev); + qcom_add_pdm_subdev(rproc, &pas->pdm_subdev); + pas->sysmon = qcom_add_sysmon_subdev(rproc, desc->sysmon_name, desc->ssctl_id); + if (IS_ERR(pas->sysmon)) { + ret = PTR_ERR(pas->sysmon); goto deinit_remove_pdm_smd_glink; } - qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name); + qcom_add_ssr_subdev(rproc, &pas->ssr_subdev, desc->ssr_name); ret = rproc_add(rproc); if (ret) goto remove_ssr_sysmon; @@ -787,41 +782,41 @@ static int adsp_probe(struct platform_device *pdev) return 0; remove_ssr_sysmon: - qcom_remove_ssr_subdev(rproc, &adsp->ssr_subdev); - qcom_remove_sysmon_subdev(adsp->sysmon); + qcom_remove_ssr_subdev(rproc, &pas->ssr_subdev); + qcom_remove_sysmon_subdev(pas->sysmon); deinit_remove_pdm_smd_glink: - qcom_remove_pdm_subdev(rproc, &adsp->pdm_subdev); - qcom_remove_smd_subdev(rproc, &adsp->smd_subdev); - qcom_remove_glink_subdev(rproc, &adsp->glink_subdev); - qcom_q6v5_deinit(&adsp->q6v5); + qcom_remove_pdm_subdev(rproc, &pas->pdm_subdev); + qcom_remove_smd_subdev(rproc, &pas->smd_subdev); + qcom_remove_glink_subdev(rproc, &pas->glink_subdev); + qcom_q6v5_deinit(&pas->q6v5); detach_proxy_pds: - adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count); + qcom_pas_pds_detach(pas, pas->proxy_pds, pas->proxy_pd_count); unassign_mem: - adsp_unassign_memory_region(adsp); + qcom_pas_unassign_memory_region(pas); free_rproc: - device_init_wakeup(adsp->dev, false); + device_init_wakeup(pas->dev, false); return ret; } -static void adsp_remove(struct platform_device *pdev) +static void qcom_pas_remove(struct platform_device *pdev) { - struct qcom_adsp *adsp = platform_get_drvdata(pdev); - - rproc_del(adsp->rproc); - - qcom_q6v5_deinit(&adsp->q6v5); - adsp_unassign_memory_region(adsp); - qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev); - qcom_remove_sysmon_subdev(adsp->sysmon); - qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev); - qcom_remove_pdm_subdev(adsp->rproc, &adsp->pdm_subdev); - qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev); - adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count); - device_init_wakeup(adsp->dev, false); + struct qcom_pas *pas = platform_get_drvdata(pdev); + + rproc_del(pas->rproc); + + qcom_q6v5_deinit(&pas->q6v5); + qcom_pas_unassign_memory_region(pas); + qcom_remove_glink_subdev(pas->rproc, &pas->glink_subdev); + qcom_remove_sysmon_subdev(pas->sysmon); + qcom_remove_smd_subdev(pas->rproc, &pas->smd_subdev); + qcom_remove_pdm_subdev(pas->rproc, &pas->pdm_subdev); + qcom_remove_ssr_subdev(pas->rproc, &pas->ssr_subdev); + qcom_pas_pds_detach(pas, pas->proxy_pds, pas->proxy_pd_count); + device_init_wakeup(pas->dev, false); } -static const struct adsp_data adsp_resource_init = { +static const struct qcom_pas_data adsp_resource_init = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, @@ -831,7 +826,7 @@ static const struct adsp_data adsp_resource_init = { .ssctl_id = 0x14, }; -static const struct adsp_data sa8775p_adsp_resource = { +static const struct qcom_pas_data sa8775p_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mbn", .pas_id = 1, @@ -848,7 +843,7 @@ static const struct adsp_data sa8775p_adsp_resource = { .ssctl_id = 0x14, }; -static const struct adsp_data sdm845_adsp_resource_init = { +static const struct qcom_pas_data sdm845_adsp_resource_init = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, @@ -859,7 +854,7 @@ static const struct adsp_data sdm845_adsp_resource_init = { .ssctl_id = 0x14, }; -static const struct adsp_data sm6350_adsp_resource = { +static const struct qcom_pas_data sm6350_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, @@ -875,7 +870,7 @@ static const struct adsp_data sm6350_adsp_resource = { .ssctl_id = 0x14, }; -static const struct adsp_data sm6375_mpss_resource = { +static const struct qcom_pas_data sm6375_mpss_resource = { .crash_reason_smem = 421, .firmware_name = "modem.mdt", .pas_id = 4, @@ -890,7 +885,7 @@ static const struct adsp_data sm6375_mpss_resource = { .ssctl_id = 0x12, }; -static const struct adsp_data sm8150_adsp_resource = { +static const struct qcom_pas_data sm8150_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, @@ -905,7 +900,7 @@ static const struct adsp_data sm8150_adsp_resource = { .ssctl_id = 0x14, }; -static const struct adsp_data sm8250_adsp_resource = { +static const struct qcom_pas_data sm8250_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, @@ -922,7 +917,7 @@ static const struct adsp_data sm8250_adsp_resource = { .ssctl_id = 0x14, }; -static const struct adsp_data sm8350_adsp_resource = { +static const struct qcom_pas_data sm8350_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, @@ -938,7 +933,7 @@ static const struct adsp_data sm8350_adsp_resource = { .ssctl_id = 0x14, }; -static const struct adsp_data msm8996_adsp_resource = { +static const struct qcom_pas_data msm8996_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, @@ -952,7 +947,7 @@ static const struct adsp_data msm8996_adsp_resource = { .ssctl_id = 0x14, }; -static const struct adsp_data cdsp_resource_init = { +static const struct qcom_pas_data cdsp_resource_init = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, @@ -962,7 +957,7 @@ static const struct adsp_data cdsp_resource_init = { .ssctl_id = 0x17, }; -static const struct adsp_data sa8775p_cdsp0_resource = { +static const struct qcom_pas_data sa8775p_cdsp0_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp0.mbn", .pas_id = 18, @@ -980,7 +975,7 @@ static const struct adsp_data sa8775p_cdsp0_resource = { .ssctl_id = 0x17, }; -static const struct adsp_data sa8775p_cdsp1_resource = { +static const struct qcom_pas_data sa8775p_cdsp1_resource = { .crash_reason_smem = 633, .firmware_name = "cdsp1.mbn", .pas_id = 30, @@ -998,7 +993,7 @@ static const struct adsp_data sa8775p_cdsp1_resource = { .ssctl_id = 0x20, }; -static const struct adsp_data sdm845_cdsp_resource_init = { +static const struct qcom_pas_data sdm845_cdsp_resource_init = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, @@ -1009,7 +1004,7 @@ static const struct adsp_data sdm845_cdsp_resource_init = { .ssctl_id = 0x17, }; -static const struct adsp_data sm6350_cdsp_resource = { +static const struct qcom_pas_data sm6350_cdsp_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, @@ -1025,7 +1020,7 @@ static const struct adsp_data sm6350_cdsp_resource = { .ssctl_id = 0x17, }; -static const struct adsp_data sm8150_cdsp_resource = { +static const struct qcom_pas_data sm8150_cdsp_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, @@ -1040,7 +1035,7 @@ static const struct adsp_data sm8150_cdsp_resource = { .ssctl_id = 0x17, }; -static const struct adsp_data sm8250_cdsp_resource = { +static const struct qcom_pas_data sm8250_cdsp_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, @@ -1055,7 +1050,7 @@ static const struct adsp_data sm8250_cdsp_resource = { .ssctl_id = 0x17, }; -static const struct adsp_data sc8280xp_nsp0_resource = { +static const struct qcom_pas_data sc8280xp_nsp0_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, @@ -1069,7 +1064,7 @@ static const struct adsp_data sc8280xp_nsp0_resource = { .ssctl_id = 0x17, }; -static const struct adsp_data sc8280xp_nsp1_resource = { +static const struct qcom_pas_data sc8280xp_nsp1_resource = { .crash_reason_smem = 633, .firmware_name = "cdsp.mdt", .pas_id = 30, @@ -1083,7 +1078,7 @@ static const struct adsp_data sc8280xp_nsp1_resource = { .ssctl_id = 0x20, }; -static const struct adsp_data x1e80100_adsp_resource = { +static const struct qcom_pas_data x1e80100_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .dtb_firmware_name = "adsp_dtb.mdt", @@ -1103,7 +1098,7 @@ static const struct adsp_data x1e80100_adsp_resource = { .ssctl_id = 0x14, }; -static const struct adsp_data x1e80100_cdsp_resource = { +static const struct qcom_pas_data x1e80100_cdsp_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .dtb_firmware_name = "cdsp_dtb.mdt", @@ -1123,7 +1118,7 @@ static const struct adsp_data x1e80100_cdsp_resource = { .ssctl_id = 0x17, }; -static const struct adsp_data sm8350_cdsp_resource = { +static const struct qcom_pas_data sm8350_cdsp_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, @@ -1140,7 +1135,7 @@ static const struct adsp_data sm8350_cdsp_resource = { .ssctl_id = 0x17, }; -static const struct adsp_data sa8775p_gpdsp0_resource = { +static const struct qcom_pas_data sa8775p_gpdsp0_resource = { .crash_reason_smem = 640, .firmware_name = "gpdsp0.mbn", .pas_id = 39, @@ -1157,7 +1152,7 @@ static const struct adsp_data sa8775p_gpdsp0_resource = { .ssctl_id = 0x21, }; -static const struct adsp_data sa8775p_gpdsp1_resource = { +static const struct qcom_pas_data sa8775p_gpdsp1_resource = { .crash_reason_smem = 641, .firmware_name = "gpdsp1.mbn", .pas_id = 40, @@ -1174,7 +1169,7 @@ static const struct adsp_data sa8775p_gpdsp1_resource = { .ssctl_id = 0x22, }; -static const struct adsp_data mpss_resource_init = { +static const struct qcom_pas_data mpss_resource_init = { .crash_reason_smem = 421, .firmware_name = "modem.mdt", .pas_id = 4, @@ -1191,7 +1186,7 @@ static const struct adsp_data mpss_resource_init = { .ssctl_id = 0x12, }; -static const struct adsp_data sc8180x_mpss_resource = { +static const struct qcom_pas_data sc8180x_mpss_resource = { .crash_reason_smem = 421, .firmware_name = "modem.mdt", .pas_id = 4, @@ -1206,7 +1201,7 @@ static const struct adsp_data sc8180x_mpss_resource = { .ssctl_id = 0x12, }; -static const struct adsp_data msm8996_slpi_resource_init = { +static const struct qcom_pas_data msm8996_slpi_resource_init = { .crash_reason_smem = 424, .firmware_name = "slpi.mdt", .pas_id = 12, @@ -1220,7 +1215,7 @@ static const struct adsp_data msm8996_slpi_resource_init = { .ssctl_id = 0x16, }; -static const struct adsp_data sdm845_slpi_resource_init = { +static const struct qcom_pas_data sdm845_slpi_resource_init = { .crash_reason_smem = 424, .firmware_name = "slpi.mdt", .pas_id = 12, @@ -1236,7 +1231,7 @@ static const struct adsp_data sdm845_slpi_resource_init = { .ssctl_id = 0x16, }; -static const struct adsp_data wcss_resource_init = { +static const struct qcom_pas_data wcss_resource_init = { .crash_reason_smem = 421, .firmware_name = "wcnss.mdt", .pas_id = 6, @@ -1246,7 +1241,7 @@ static const struct adsp_data wcss_resource_init = { .ssctl_id = 0x12, }; -static const struct adsp_data sdx55_mpss_resource = { +static const struct qcom_pas_data sdx55_mpss_resource = { .crash_reason_smem = 421, .firmware_name = "modem.mdt", .pas_id = 4, @@ -1261,7 +1256,7 @@ static const struct adsp_data sdx55_mpss_resource = { .ssctl_id = 0x22, }; -static const struct adsp_data sm8450_mpss_resource = { +static const struct qcom_pas_data sm8450_mpss_resource = { .crash_reason_smem = 421, .firmware_name = "modem.mdt", .pas_id = 4, @@ -1279,7 +1274,7 @@ static const struct adsp_data sm8450_mpss_resource = { .ssctl_id = 0x12, }; -static const struct adsp_data sm8550_adsp_resource = { +static const struct qcom_pas_data sm8550_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .dtb_firmware_name = "adsp_dtb.mdt", @@ -1299,7 +1294,7 @@ static const struct adsp_data sm8550_adsp_resource = { .smem_host_id = 2, }; -static const struct adsp_data sm8550_cdsp_resource = { +static const struct qcom_pas_data sm8550_cdsp_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .dtb_firmware_name = "cdsp_dtb.mdt", @@ -1320,7 +1315,7 @@ static const struct adsp_data sm8550_cdsp_resource = { .smem_host_id = 5, }; -static const struct adsp_data sm8550_mpss_resource = { +static const struct qcom_pas_data sm8550_mpss_resource = { .crash_reason_smem = 421, .firmware_name = "modem.mdt", .dtb_firmware_name = "modem_dtb.mdt", @@ -1344,7 +1339,7 @@ static const struct adsp_data sm8550_mpss_resource = { .region_assign_vmid = QCOM_SCM_VMID_MSS_MSA, }; -static const struct adsp_data sc7280_wpss_resource = { +static const struct qcom_pas_data sc7280_wpss_resource = { .crash_reason_smem = 626, .firmware_name = "wpss.mdt", .pas_id = 6, @@ -1361,7 +1356,7 @@ static const struct adsp_data sc7280_wpss_resource = { .ssctl_id = 0x19, }; -static const struct adsp_data sm8650_cdsp_resource = { +static const struct qcom_pas_data sm8650_cdsp_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .dtb_firmware_name = "cdsp_dtb.mdt", @@ -1386,7 +1381,7 @@ static const struct adsp_data sm8650_cdsp_resource = { .region_assign_vmid = QCOM_SCM_VMID_CDSP, }; -static const struct adsp_data sm8650_mpss_resource = { +static const struct qcom_pas_data sm8650_mpss_resource = { .crash_reason_smem = 421, .firmware_name = "modem.mdt", .dtb_firmware_name = "modem_dtb.mdt", @@ -1410,7 +1405,7 @@ static const struct adsp_data sm8650_mpss_resource = { .region_assign_vmid = QCOM_SCM_VMID_MSS_MSA, }; -static const struct adsp_data sm8750_mpss_resource = { +static const struct qcom_pas_data sm8750_mpss_resource = { .crash_reason_smem = 421, .firmware_name = "modem.mdt", .dtb_firmware_name = "modem_dtb.mdt", @@ -1434,7 +1429,7 @@ static const struct adsp_data sm8750_mpss_resource = { .region_assign_vmid = QCOM_SCM_VMID_MSS_MSA, }; -static const struct of_device_id adsp_of_match[] = { +static const struct of_device_id qcom_pas_of_match[] = { { .compatible = "qcom,msm8226-adsp-pil", .data = &msm8996_adsp_resource}, { .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource}, { .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init}, @@ -1504,17 +1499,17 @@ static const struct of_device_id adsp_of_match[] = { { .compatible = "qcom,x1e80100-cdsp-pas", .data = &x1e80100_cdsp_resource}, { }, }; -MODULE_DEVICE_TABLE(of, adsp_of_match); +MODULE_DEVICE_TABLE(of, qcom_pas_of_match); -static struct platform_driver adsp_driver = { - .probe = adsp_probe, - .remove = adsp_remove, +static struct platform_driver qcom_pas_driver = { + .probe = qcom_pas_probe, + .remove = qcom_pas_remove, .driver = { .name = "qcom_q6v5_pas", - .of_match_table = adsp_of_match, + .of_match_table = qcom_pas_of_match, }, }; -module_platform_driver(adsp_driver); -MODULE_DESCRIPTION("Qualcomm Hexagon v5 Peripheral Authentication Service driver"); +module_platform_driver(qcom_pas_driver); +MODULE_DESCRIPTION("Qualcomm Peripheral Authentication Service remoteproc driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 81b2ccf988e8..825672100528 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -699,7 +699,7 @@ static int rproc_alloc_carveout(struct rproc *rproc, return -ENOMEM; } - dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%zx\n", + dev_dbg(dev, "carveout va %p, dma %pad, len 0x%zx\n", va, &dma, mem->len); if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) { diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 25a655f33ec0..c5d46a878149 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -136,7 +136,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev, size = vring_size(num, rvring->align); memset(addr, 0, size); - dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n", + dev_dbg(dev, "vring%d: va %p qsz %d notifyid %d\n", id, addr, num, rvring->notifyid); /* diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c index 5412beb0a692..d083ecf02f5c 100644 --- a/drivers/remoteproc/st_slim_rproc.c +++ b/drivers/remoteproc/st_slim_rproc.c @@ -190,7 +190,7 @@ static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool * } } - dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%pK\n", + dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%p\n", da, len, va); return va; diff --git a/drivers/remoteproc/ti_k3_common.c b/drivers/remoteproc/ti_k3_common.c index d5dccc81d460..d4f20900f33b 100644 --- a/drivers/remoteproc/ti_k3_common.c +++ b/drivers/remoteproc/ti_k3_common.c @@ -450,7 +450,7 @@ int k3_rproc_of_get_memories(struct platform_device *pdev, kproc->mem[i].dev_addr = data->mems[i].dev_addr; kproc->mem[i].size = resource_size(res); - dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n", + dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n", data->mems[i].name, &kproc->mem[i].bus_addr, kproc->mem[i].size, kproc->mem[i].cpu_addr, kproc->mem[i].dev_addr); @@ -528,7 +528,7 @@ int k3_reserved_mem_init(struct k3_rproc *kproc) return -ENOMEM; } - dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", + dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %p da 0x%x\n", i + 1, &kproc->rmem[i].bus_addr, kproc->rmem[i].size, kproc->rmem[i].cpu_addr, kproc->rmem[i].dev_addr); diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c index e34c04c135fc..ca5ff280d2dc 100644 --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c @@ -1007,7 +1007,7 @@ static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev, return -ENOMEM; } - dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", + dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %p da 0x%x\n", i, &core->sram[i].bus_addr, core->sram[i].size, core->sram[i].cpu_addr, core->sram[i].dev_addr); diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c index 1af89782e116..0b7b173d0d26 100644 --- a/drivers/remoteproc/xlnx_r5_remoteproc.c +++ b/drivers/remoteproc/xlnx_r5_remoteproc.c @@ -68,7 +68,7 @@ struct zynqmp_sram_bank { }; /** - * struct mbox_info + * struct mbox_info - mailbox channel data * * @rx_mc_buf: to copy data from mailbox rx channel * @tx_mc_buf: to copy data to mailbox tx channel @@ -89,7 +89,7 @@ struct mbox_info { }; /** - * struct rsc_tbl_data + * struct rsc_tbl_data - resource table metadata * * Platform specific data structure used to sync resource table address. * It's important to maintain order and size of each field on remote side. @@ -128,7 +128,7 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = { }; /** - * struct zynqmp_r5_core + * struct zynqmp_r5_core - remoteproc core's internal data * * @rsc_tbl_va: resource table virtual address * @sram: Array of sram memories assigned to this core @@ -157,7 +157,7 @@ struct zynqmp_r5_core { }; /** - * struct zynqmp_r5_cluster + * struct zynqmp_r5_cluster - remoteproc cluster's internal data * * @dev: r5f subsystem cluster device node * @mode: cluster mode of type zynqmp_r5_cluster_mode @@ -732,7 +732,7 @@ static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw) } /** - * zynqmp_r5_rproc_prepare() + * zynqmp_r5_rproc_prepare() - prepare core to boot/attach * adds carveouts for TCM bank and reserved memory regions * * @rproc: Device node of each rproc @@ -765,7 +765,7 @@ static int zynqmp_r5_rproc_prepare(struct rproc *rproc) } /** - * zynqmp_r5_rproc_unprepare() + * zynqmp_r5_rproc_unprepare() - programming sequence after stop/detach. * Turns off TCM banks using power-domain id * * @rproc: Device node of each rproc @@ -908,7 +908,7 @@ static const struct rproc_ops zynqmp_r5_rproc_ops = { }; /** - * zynqmp_r5_add_rproc_core() + * zynqmp_r5_add_rproc_core() - Add core data to framework. * Allocate and add struct rproc object for each r5f core * This is called for each individual r5f core * @@ -938,6 +938,8 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev) rproc_coredump_set_elf_info(r5_rproc, ELFCLASS32, EM_ARM); + r5_rproc->recovery_disabled = true; + r5_rproc->has_iommu = false; r5_rproc->auto_boot = false; r5_core = r5_rproc->priv; r5_core->dev = cdev; @@ -1142,7 +1144,7 @@ static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster) } /** - * zynqmp_r5_get_tcm_node() + * zynqmp_r5_get_tcm_node() - Get TCM info * Ideally this function should parse tcm node and store information * in r5_core instance. For now, Hardcoded TCM information is used. * This approach is used as TCM bindings for system-dt is being developed @@ -1329,19 +1331,23 @@ static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster) /* * Number of cores is decided by number of child nodes of - * r5f subsystem node in dts. If Split mode is used in dts - * 2 child nodes are expected. + * r5f subsystem node in dts. + * In split mode maximum two child nodes are expected. + * However, only single core can be enabled too. + * Driver can handle following configuration in split mode: + * 1) core0 enabled, core1 disabled + * 2) core0 disabled, core1 enabled + * 3) core0 and core1 both are enabled. + * For now, no more than two cores are expected per cluster + * in split mode. * In lockstep mode if two child nodes are available, * only use first child node and consider it as core0 * and ignore core1 dt node. */ core_count = of_get_available_child_count(dev_node); - if (core_count == 0) { + if (core_count == 0 || core_count > 2) { dev_err(dev, "Invalid number of r5 cores %d", core_count); return -EINVAL; - } else if (cluster_mode == SPLIT_MODE && core_count != 2) { - dev_err(dev, "Invalid number of r5 cores for split mode\n"); - return -EINVAL; } else if (cluster_mode == LOCKSTEP_MODE && core_count == 2) { dev_warn(dev, "Only r5 core0 will be used\n"); core_count = 1; @@ -1464,6 +1470,45 @@ static void zynqmp_r5_cluster_exit(void *data) } /* + * zynqmp_r5_remoteproc_shutdown() + * Follow shutdown sequence in case of kexec call. + * + * @pdev: domain platform device for cluster + * + * Return: None. + */ +static void zynqmp_r5_remoteproc_shutdown(struct platform_device *pdev) +{ + const char *rproc_state_str = NULL; + struct zynqmp_r5_cluster *cluster; + struct zynqmp_r5_core *r5_core; + struct rproc *rproc; + int i, ret = 0; + + cluster = platform_get_drvdata(pdev); + + for (i = 0; i < cluster->core_count; i++) { + r5_core = cluster->r5_cores[i]; + rproc = r5_core->rproc; + + if (rproc->state == RPROC_RUNNING) { + ret = rproc_shutdown(rproc); + rproc_state_str = "shutdown"; + } else if (rproc->state == RPROC_ATTACHED) { + ret = rproc_detach(rproc); + rproc_state_str = "detach"; + } else { + ret = 0; + } + + if (ret) { + dev_err(cluster->dev, "failed to %s rproc %d\n", + rproc_state_str, rproc->index); + } + } +} + +/* * zynqmp_r5_remoteproc_probe() * parse device-tree, initialize hardware and allocate required resources * and remoteproc ops @@ -1524,6 +1569,7 @@ static struct platform_driver zynqmp_r5_remoteproc_driver = { .name = "zynqmp_r5_remoteproc", .of_match_table = zynqmp_r5_remoteproc_match, }, + .shutdown = zynqmp_r5_remoteproc_shutdown, }; module_platform_driver(zynqmp_r5_remoteproc_driver); diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 4730b1c8b322..484890b4a6a7 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -901,7 +901,7 @@ static int rpmsg_probe(struct virtio_device *vdev) goto vqs_del; } - dev_dbg(&vdev->dev, "buffers: va %pK, dma %pad\n", + dev_dbg(&vdev->dev, "buffers: va %p, dma %pad\n", bufs_va, &vrp->bufs_dma); /* half of the buffers is dedicated for RX */ diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c index 7a671a786197..5fd311ee4107 100644 --- a/drivers/soundwire/amd_manager.c +++ b/drivers/soundwire/amd_manager.c @@ -499,6 +499,7 @@ static int amd_sdw_port_params(struct sdw_bus *bus, struct sdw_port_params *p_pa break; case ACP70_PCI_REV_ID: case ACP71_PCI_REV_ID: + case ACP72_PCI_REV_ID: frame_fmt_reg = acp70_sdw_dp_reg[p_params->num].frame_fmt_reg; break; default: @@ -551,6 +552,7 @@ static int amd_sdw_transport_params(struct sdw_bus *bus, break; case ACP70_PCI_REV_ID: case ACP71_PCI_REV_ID: + case ACP72_PCI_REV_ID: frame_fmt_reg = acp70_sdw_dp_reg[params->port_num].frame_fmt_reg; sample_int_reg = acp70_sdw_dp_reg[params->port_num].sample_int_reg; hctrl_dp0_reg = acp70_sdw_dp_reg[params->port_num].hctrl_dp0_reg; @@ -614,6 +616,7 @@ static int amd_sdw_port_enable(struct sdw_bus *bus, break; case ACP70_PCI_REV_ID: case ACP71_PCI_REV_ID: + case ACP72_PCI_REV_ID: lane_ctrl_ch_en_reg = acp70_sdw_dp_reg[enable_ch->port_num].lane_ctrl_ch_en_reg; break; default: @@ -931,6 +934,9 @@ static void amd_sdw_irq_thread(struct work_struct *work) status_change_8to11 = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11); status_change_0to7 = readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7); + if (!status_change_0to7 && !status_change_8to11) + return; + dev_dbg(amd_manager->dev, "[SDW%d] SDW INT: 0to7=0x%x, 8to11=0x%x\n", amd_manager->instance, status_change_0to7, status_change_8to11); if (status_change_8to11 & AMD_SDW_WAKE_STAT_MASK) @@ -1035,6 +1041,7 @@ static int amd_sdw_manager_probe(struct platform_device *pdev) break; case ACP70_PCI_REV_ID: case ACP71_PCI_REV_ID: + case ACP72_PCI_REV_ID: amd_manager->num_dout_ports = AMD_ACP70_SDW_MAX_TX_PORTS; amd_manager->num_din_ports = AMD_ACP70_SDW_MAX_RX_PORTS; break; @@ -1074,6 +1081,7 @@ static void amd_sdw_manager_remove(struct platform_device *pdev) int ret; pm_runtime_disable(&pdev->dev); + cancel_work_sync(&amd_manager->amd_sdw_work); amd_disable_sdw_interrupts(amd_manager); sdw_bus_master_delete(&amd_manager->bus); ret = amd_disable_sdw_manager(amd_manager); @@ -1178,10 +1186,10 @@ static int __maybe_unused amd_pm_prepare(struct device *dev) * device is not in runtime suspend state, observed that device alerts are missing * without pm_prepare on AMD platforms in clockstop mode0. */ - if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) { - ret = pm_request_resume(dev); + if (amd_manager->power_mode_mask) { + ret = pm_runtime_resume(dev); if (ret < 0) { - dev_err(bus->dev, "pm_request_resume failed: %d\n", ret); + dev_err(bus->dev, "pm_runtime_resume failed: %d\n", ret); return 0; } } diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index 68db4b67a86f..4fd5cac799c5 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -1753,15 +1753,15 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave) /* Update the Slave driver */ if (slave_notify) { + if (slave->prop.use_domain_irq && slave->irq) + handle_nested_irq(slave->irq); + mutex_lock(&slave->sdw_dev_lock); if (slave->probed) { struct device *dev = &slave->dev; struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); - if (slave->prop.use_domain_irq && slave->irq) - handle_nested_irq(slave->irq); - if (drv->ops && drv->ops->interrupt_callback) { slave_intr.sdca_cascade = sdca_cascade; slave_intr.control_port = clear; diff --git a/drivers/soundwire/debugfs.c b/drivers/soundwire/debugfs.c index 3099ea074f10..230a51489486 100644 --- a/drivers/soundwire/debugfs.c +++ b/drivers/soundwire/debugfs.c @@ -291,6 +291,9 @@ static int cmd_go(void *data, u64 value) finish_t = ktime_get(); + dev_dbg(&slave->dev, "command completed, num_byte %zu status %d, time %lld ms\n", + num_bytes, ret, div_u64(finish_t - start_t, NSEC_PER_MSEC)); + out: if (fw) release_firmware(fw); @@ -298,9 +301,6 @@ out: pm_runtime_mark_last_busy(&slave->dev); pm_runtime_put(&slave->dev); - dev_dbg(&slave->dev, "command completed, num_byte %zu status %d, time %lld ms\n", - num_bytes, ret, div_u64(finish_t - start_t, NSEC_PER_MSEC)); - return ret; } DEFINE_DEBUGFS_ATTRIBUTE(cmd_go_fops, NULL, diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c index 5b31e1f69591..5d08364ad6d1 100644 --- a/drivers/soundwire/intel_ace2x.c +++ b/drivers/soundwire/intel_ace2x.c @@ -11,6 +11,7 @@ #include <linux/soundwire/sdw_registers.h> #include <linux/soundwire/sdw.h> #include <linux/soundwire/sdw_intel.h> +#include <linux/string_choices.h> #include <sound/hdaudio.h> #include <sound/hda-mlink.h> #include <sound/hda-sdw-bpt.h> @@ -183,7 +184,7 @@ static int intel_ace2x_bpt_open_stream(struct sdw_intel *sdw, struct sdw_slave * return 0; dev_err(cdns->dev, "%s: sdw_prepare_%s_dma_buffer failed %d\n", - __func__, command ? "read" : "write", ret); + __func__, str_read_write(command), ret); ret1 = hda_sdw_bpt_close(cdns->dev->parent, /* PCI device */ sdw->bpt_ctx.bpt_tx_stream, &sdw->bpt_ctx.dmab_tx_bdl, @@ -245,7 +246,7 @@ static void intel_ace2x_bpt_close_stream(struct sdw_intel *sdw, struct sdw_slave cdns->bus.bpt_stream = NULL; } -#define INTEL_BPT_MSG_BYTE_ALIGNMENT 32 +#define INTEL_BPT_MSG_BYTE_MIN 16 static int intel_ace2x_bpt_send_async(struct sdw_intel *sdw, struct sdw_slave *slave, struct sdw_bpt_msg *msg) @@ -253,9 +254,9 @@ static int intel_ace2x_bpt_send_async(struct sdw_intel *sdw, struct sdw_slave *s struct sdw_cdns *cdns = &sdw->cdns; int ret; - if (msg->len % INTEL_BPT_MSG_BYTE_ALIGNMENT) { - dev_err(cdns->dev, "BPT message length %d is not a multiple of %d bytes\n", - msg->len, INTEL_BPT_MSG_BYTE_ALIGNMENT); + if (msg->len < INTEL_BPT_MSG_BYTE_MIN) { + dev_err(cdns->dev, "BPT message length %d is less than the minimum bytes %d\n", + msg->len, INTEL_BPT_MSG_BYTE_MIN); return -EINVAL; } diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c index 10a602d4843a..6df2601fff90 100644 --- a/drivers/soundwire/intel_auxdevice.c +++ b/drivers/soundwire/intel_auxdevice.c @@ -65,6 +65,7 @@ static struct wake_capable_part wake_capable_list[] = { {0x025d, 0x715}, {0x025d, 0x716}, {0x025d, 0x717}, + {0x025d, 0x721}, {0x025d, 0x722}, }; diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c index 65afb28ef8fa..c69b78cd0b62 100644 --- a/drivers/soundwire/mipi_disco.c +++ b/drivers/soundwire/mipi_disco.c @@ -451,10 +451,10 @@ int sdw_slave_read_prop(struct sdw_slave *slave) "mipi-sdw-highPHY-capable"); prop->paging_support = mipi_device_property_read_bool(dev, - "mipi-sdw-paging-support"); + "mipi-sdw-paging-supported"); prop->bank_delay_support = mipi_device_property_read_bool(dev, - "mipi-sdw-bank-delay-support"); + "mipi-sdw-bank-delay-supported"); device_property_read_u32(dev, "mipi-sdw-port15-read-behavior", &prop->p15_behave); diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index 0f45e3404756..bd2b293b44f2 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -1622,9 +1622,9 @@ static int qcom_swrm_probe(struct platform_device *pdev) if (ret) goto err_master_add; - dev_info(dev, "Qualcomm Soundwire controller v%x.%x.%x Registered\n", - (ctrl->version >> 24) & 0xff, (ctrl->version >> 16) & 0xff, - ctrl->version & 0xffff); + dev_dbg(dev, "Qualcomm Soundwire controller v%x.%x.%x registered\n", + (ctrl->version >> 24) & 0xff, (ctrl->version >> 16) & 0xff, + ctrl->version & 0xffff); pm_runtime_set_autosuspend_delay(dev, 3000); pm_runtime_use_autosuspend(dev); diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index a4bea742b5d9..38c9dbd35606 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -1510,7 +1510,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream, if (ret < 0) { dev_err(bus->dev, "Prepare port(s) failed ret = %d\n", ret); - return ret; + goto restore_params; } } diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 2a72e7c1d131..4bbe4de1679b 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -532,6 +532,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) goto out_disable_phy; } + ret = phy_calibrate(phy); + if (ret) { + dev_err(hba->dev, "Failed to calibrate PHY: %d\n", ret); + goto out_disable_phy; + } + ufs_qcom_select_unipro_mode(host); return 0; @@ -726,26 +732,17 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, enum ufs_notify_change_status status) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct phy *phy = host->generic_phy; if (status == PRE_CHANGE) return 0; - if (ufs_qcom_is_link_off(hba)) { - /* - * Disable the tx/rx lane symbol clocks before PHY is - * powered down as the PLL source should be disabled - * after downstream clocks are disabled. - */ + if (!ufs_qcom_is_link_active(hba)) ufs_qcom_disable_lane_clks(host); - phy_power_off(phy); - /* reset the connected UFS device during power down */ - ufs_qcom_device_reset_ctrl(hba, true); - } else if (!ufs_qcom_is_link_active(hba)) { - ufs_qcom_disable_lane_clks(host); - } + /* reset the connected UFS device during power down */ + if (ufs_qcom_is_link_off(hba) && host->device_reset) + ufs_qcom_device_reset_ctrl(hba, true); return ufs_qcom_ice_suspend(host); } @@ -753,26 +750,11 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); - struct phy *phy = host->generic_phy; int err; - if (ufs_qcom_is_link_off(hba)) { - err = phy_power_on(phy); - if (err) { - dev_err(hba->dev, "%s: failed PHY power on: %d\n", - __func__, err); - return err; - } - - err = ufs_qcom_enable_lane_clks(host); - if (err) - return err; - - } else if (!ufs_qcom_is_link_active(hba)) { - err = ufs_qcom_enable_lane_clks(host); - if (err) - return err; - } + err = ufs_qcom_enable_lane_clks(host); + if (err) + return err; return ufs_qcom_ice_resume(host); } @@ -1151,12 +1133,20 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba) * @on: If true, enable clocks else disable them. * @status: PRE_CHANGE or POST_CHANGE notify * + * There are certain clocks which comes from the PHY so it needs + * to be managed together along with controller clocks which also + * provides a better power saving. Hence keep phy_power_off/on calls + * in ufs_qcom_setup_clocks, so that PHY's regulators & clks can be + * turned on/off along with UFS's clocks. + * * Return: 0 on success, non-zero on failure. */ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, enum ufs_notify_change_status status) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); + struct phy *phy; + int err; /* * In case ufs_qcom_init() is not yet done, simply ignore. @@ -1166,6 +1156,8 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, if (!host) return 0; + phy = host->generic_phy; + switch (status) { case PRE_CHANGE: if (on) { @@ -1175,10 +1167,22 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on, /* disable device ref_clk */ ufs_qcom_dev_ref_clk_ctrl(host, false); } + + err = phy_power_off(phy); + if (err) { + dev_err(hba->dev, "phy power off failed, ret=%d\n", err); + return err; + } } break; case POST_CHANGE: if (on) { + err = phy_power_on(phy); + if (err) { + dev_err(hba->dev, "phy power on failed, ret = %d\n", err); + return err; + } + /* enable the device ref clock for HS mode*/ if (ufshcd_is_hs_mode(&hba->pwr_info)) ufs_qcom_dev_ref_clk_ctrl(host, true); diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 61424342c096..c7a20278bc3c 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -908,6 +908,9 @@ void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev) { struct mlx5_vdpa_mr_resources *mres = &mvdev->mres; + if (!mres->wq_gc) + return; + atomic_set(&mres->shutdown, 1); flush_delayed_work(&mres->gc_dwork_ent); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index cccc49a08a1a..0ed2fc28e1ce 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -2491,7 +2491,7 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num) } mvq = &ndev->vqs[idx]; - ndev->needs_teardown = num != mvq->num_ent; + ndev->needs_teardown |= num != mvq->num_ent; mvq->num_ent = num; } @@ -3432,15 +3432,17 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev) ndev = to_mlx5_vdpa_ndev(mvdev); + /* Functions called here should be able to work with + * uninitialized resources. + */ free_fixed_resources(ndev); mlx5_vdpa_clean_mrs(mvdev); mlx5_vdpa_destroy_mr_resources(&ndev->mvdev); - mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx); - if (!is_zero_ether_addr(ndev->config.mac)) { pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); } + mlx5_cmd_cleanup_async_ctx(&mvdev->async_ctx); mlx5_vdpa_free_resources(&ndev->mvdev); free_irqs(ndev); kfree(ndev->event_cbs); @@ -3888,6 +3890,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, mvdev->actual_features = (device_features & BIT_ULL(VIRTIO_F_VERSION_1)); + mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx); + ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL); ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL); if (!ndev->vqs || !ndev->event_cbs) { @@ -3960,8 +3964,6 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, ndev->rqt_size = 1; } - mlx5_cmd_init_async_ctx(mdev, &mvdev->async_ctx); - ndev->mvdev.mlx_features = device_features; mvdev->vdev.dma_dev = &mdev->pdev->dev; err = mlx5_vdpa_alloc_resources(&ndev->mvdev); diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 6a9a37351310..04620bb77203 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -2216,6 +2216,7 @@ static void vduse_exit(void) cdev_del(&vduse_ctrl_cdev); unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX); class_unregister(&vduse_class); + idr_destroy(&vduse_idr); } module_exit(vduse_exit); diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c index ef490a4545f4..988b6919c2c3 100644 --- a/drivers/vfio/pci/vfio_pci_igd.c +++ b/drivers/vfio/pci/vfio_pci_igd.c @@ -437,8 +437,7 @@ static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev) bool vfio_pci_is_intel_display(struct pci_dev *pdev) { - return (pdev->vendor == PCI_VENDOR_ID_INTEL) && - ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY); + return (pdev->vendor == PCI_VENDOR_ID_INTEL) && pci_is_display(pdev); } int vfio_pci_igd_init(struct vfio_pci_core_device *vdev) diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig index 020d4fbb947c..bc0f38574497 100644 --- a/drivers/vhost/Kconfig +++ b/drivers/vhost/Kconfig @@ -95,4 +95,22 @@ config VHOST_CROSS_ENDIAN_LEGACY If unsure, say "N". +config VHOST_ENABLE_FORK_OWNER_CONTROL + bool "Enable VHOST_ENABLE_FORK_OWNER_CONTROL" + default y + help + This option enables two IOCTLs: VHOST_SET_FORK_FROM_OWNER and + VHOST_GET_FORK_FROM_OWNER. These allow userspace applications + to modify the vhost worker mode for vhost devices. + + Also expose module parameter 'fork_from_owner_default' to allow users + to configure the default mode for vhost workers. + + By default, `VHOST_ENABLE_FORK_OWNER_CONTROL` is set to `y`, + users can change the worker thread mode as needed. + If this config is disabled (n),the related IOCTLs and parameters will + be unavailable. + + If unsure, say "Y". + endif diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index bfb774c273ea..6edac0c1ba9b 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -74,7 +74,8 @@ static const u64 vhost_net_features[VIRTIO_FEATURES_DWORDS] = { (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_ACCESS_PLATFORM) | - (1ULL << VIRTIO_F_RING_RESET), + (1ULL << VIRTIO_F_RING_RESET) | + (1ULL << VIRTIO_F_IN_ORDER), VIRTIO_BIT(VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO) | VIRTIO_BIT(VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO), }; @@ -376,7 +377,8 @@ static void vhost_zerocopy_signal_used(struct vhost_net *net, while (j) { add = min(UIO_MAXIOV - nvq->done_idx, j); vhost_add_used_and_signal_n(vq->dev, vq, - &vq->heads[nvq->done_idx], add); + &vq->heads[nvq->done_idx], + NULL, add); nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV; j -= add; } @@ -451,7 +453,8 @@ static int vhost_net_enable_vq(struct vhost_net *n, return vhost_poll_start(poll, sock->file); } -static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq) +static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq, + unsigned int count) { struct vhost_virtqueue *vq = &nvq->vq; struct vhost_dev *dev = vq->dev; @@ -459,7 +462,8 @@ static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq) if (!nvq->done_idx) return; - vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); + vhost_add_used_and_signal_n(dev, vq, vq->heads, + vq->nheads, count); nvq->done_idx = 0; } @@ -468,6 +472,8 @@ static void vhost_tx_batch(struct vhost_net *net, struct socket *sock, struct msghdr *msghdr) { + struct vhost_virtqueue *vq = &nvq->vq; + bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER); struct tun_msg_ctl ctl = { .type = TUN_MSG_PTR, .num = nvq->batched_xdp, @@ -475,6 +481,11 @@ static void vhost_tx_batch(struct vhost_net *net, }; int i, err; + if (in_order) { + vq->heads[0].len = 0; + vq->nheads[0] = nvq->done_idx; + } + if (nvq->batched_xdp == 0) goto signal_used; @@ -496,7 +507,7 @@ static void vhost_tx_batch(struct vhost_net *net, } signal_used: - vhost_net_signal_used(nvq); + vhost_net_signal_used(nvq, in_order ? 1 : nvq->done_idx); nvq->batched_xdp = 0; } @@ -750,6 +761,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) int sent_pkts = 0; bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX); bool busyloop_intr; + bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER); do { busyloop_intr = false; @@ -786,11 +798,13 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) break; } - /* We can't build XDP buff, go for single - * packet path but let's flush batched - * packets. - */ - vhost_tx_batch(net, nvq, sock, &msg); + if (nvq->batched_xdp) { + /* We can't build XDP buff, go for single + * packet path but let's flush batched + * packets. + */ + vhost_tx_batch(net, nvq, sock, &msg); + } msg.msg_control = NULL; } else { if (tx_can_batch(vq, total_len)) @@ -811,8 +825,12 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock) pr_debug("Truncated TX packet: len %d != %zd\n", err, len); done: - vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); - vq->heads[nvq->done_idx].len = 0; + if (in_order) { + vq->heads[0].id = cpu_to_vhost32(vq, head); + } else { + vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); + vq->heads[nvq->done_idx].len = 0; + } ++nvq->done_idx; } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); @@ -991,7 +1009,7 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) } static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, - bool *busyloop_intr) + bool *busyloop_intr, unsigned int count) { struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; @@ -1001,7 +1019,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, if (!len && rvq->busyloop_timeout) { /* Flush batched heads first */ - vhost_net_signal_used(rnvq); + vhost_net_signal_used(rnvq, count); /* Both tx vq and rx socket were polled here */ vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true); @@ -1013,7 +1031,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, /* This is a multi-buffer version of vhost_get_desc, that works if * vq has read descriptors only. - * @vq - the relevant virtqueue + * @nvq - the relevant vhost_net virtqueue * @datalen - data length we'll be reading * @iovcount - returned count of io vectors we fill * @log - vhost log @@ -1021,14 +1039,17 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, * @quota - headcount quota, 1 for big buffer * returns number of buffer heads allocated, negative on error */ -static int get_rx_bufs(struct vhost_virtqueue *vq, +static int get_rx_bufs(struct vhost_net_virtqueue *nvq, struct vring_used_elem *heads, + u16 *nheads, int datalen, unsigned *iovcount, struct vhost_log *log, unsigned *log_num, unsigned int quota) { + struct vhost_virtqueue *vq = &nvq->vq; + bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER); unsigned int out, in; int seg = 0; int headcount = 0; @@ -1065,14 +1086,16 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, nlogs += *log_num; log += *log_num; } - heads[headcount].id = cpu_to_vhost32(vq, d); len = iov_length(vq->iov + seg, in); - heads[headcount].len = cpu_to_vhost32(vq, len); - datalen -= len; + if (!in_order) { + heads[headcount].id = cpu_to_vhost32(vq, d); + heads[headcount].len = cpu_to_vhost32(vq, len); + } ++headcount; + datalen -= len; seg += in; } - heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); + *iovcount = seg; if (unlikely(log)) *log_num = nlogs; @@ -1082,6 +1105,15 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, r = UIO_MAXIOV + 1; goto err; } + + if (!in_order) + heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); + else { + heads[0].len = cpu_to_vhost32(vq, len + datalen); + heads[0].id = cpu_to_vhost32(vq, d); + nheads[0] = headcount; + } + return headcount; err: vhost_discard_vq_desc(vq, headcount); @@ -1094,6 +1126,8 @@ static void handle_rx(struct vhost_net *net) { struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX]; struct vhost_virtqueue *vq = &nvq->vq; + bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER); + unsigned int count = 0; unsigned in, log; struct vhost_log *vq_log; struct msghdr msg = { @@ -1141,12 +1175,13 @@ static void handle_rx(struct vhost_net *net) do { sock_len = vhost_net_rx_peek_head_len(net, sock->sk, - &busyloop_intr); + &busyloop_intr, count); if (!sock_len) break; sock_len += sock_hlen; vhost_len = sock_len + vhost_hlen; - headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, + headcount = get_rx_bufs(nvq, vq->heads + count, + vq->nheads + count, vhost_len, &in, vq_log, &log, likely(mergeable) ? UIO_MAXIOV : 1); /* On error, stop handling until the next kick. */ @@ -1222,8 +1257,11 @@ static void handle_rx(struct vhost_net *net) goto out; } nvq->done_idx += headcount; - if (nvq->done_idx > VHOST_NET_BATCH) - vhost_net_signal_used(nvq); + count += in_order ? 1 : headcount; + if (nvq->done_idx > VHOST_NET_BATCH) { + vhost_net_signal_used(nvq, count); + count = 0; + } if (unlikely(vq_log)) vhost_log_write(vq, vq_log, log, vhost_len, vq->iov, in); @@ -1235,7 +1273,7 @@ static void handle_rx(struct vhost_net *net) else if (!sock_len) vhost_net_enable_vq(net, vq); out: - vhost_net_signal_used(nvq); + vhost_net_signal_used(nvq, count); mutex_unlock(&vq->mutex); } diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index c12a0d4e6386..abf51332a5c5 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -71,7 +71,7 @@ static int vhost_scsi_set_inline_sg_cnt(const char *buf, if (ret) return ret; - if (ret > VHOST_SCSI_PREALLOC_SGLS) { + if (cnt > VHOST_SCSI_PREALLOC_SGLS) { pr_err("Max inline_sg_cnt is %u\n", VHOST_SCSI_PREALLOC_SGLS); return -EINVAL; } @@ -152,7 +152,7 @@ struct vhost_scsi_nexus { struct vhost_scsi_tpg { /* Vhost port target portal group tag for TCM */ u16 tport_tpgt; - /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ + /* Used to track number of TPG Port/Lun Links wrt to explicit I_T Nexus shutdown */ int tv_tpg_port_count; /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ int tv_tpg_vhost_count; @@ -311,12 +311,12 @@ static void vhost_scsi_init_inflight(struct vhost_scsi *vs, mutex_lock(&vq->mutex); - /* store old infight */ + /* store old inflight */ idx = vs->vqs[i].inflight_idx; if (old_inflight) old_inflight[i] = &vs->vqs[i].inflights[idx]; - /* setup new infight */ + /* setup new inflight */ vs->vqs[i].inflight_idx = idx ^ 1; new_inflight = &vs->vqs[i].inflights[idx ^ 1]; kref_init(&new_inflight->kref); @@ -1226,10 +1226,8 @@ vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc, /* validated at handler entry */ vs_tpg = vhost_vq_get_backend(vq); tpg = READ_ONCE(vs_tpg[*vc->target]); - if (unlikely(!tpg)) { - vq_err(vq, "Target 0x%x does not exist\n", *vc->target); + if (unlikely(!tpg)) goto out; - } } if (tpgp) @@ -1249,7 +1247,7 @@ vhost_scsi_setup_resp_iovs(struct vhost_scsi_cmd *cmd, struct iovec *in_iovs, if (!in_iovs_cnt) return 0; /* - * Initiator's normally just put the virtio_scsi_cmd_resp in the first + * Initiators normally just put the virtio_scsi_cmd_resp in the first * iov, but just in case they wedged in some data with it we check for * greater than or equal to the response struct. */ @@ -1457,7 +1455,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) cmd = vhost_scsi_get_cmd(vq, tag); if (IS_ERR(cmd)) { ret = PTR_ERR(cmd); - vq_err(vq, "vhost_scsi_get_tag failed %dd\n", ret); + vq_err(vq, "vhost_scsi_get_tag failed %d\n", ret); goto err; } cmd->tvc_vq = vq; @@ -2609,7 +2607,7 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, return -ENOMEM; } /* - * Since we are running in 'demo mode' this call with generate a + * Since we are running in 'demo mode' this call will generate a * struct se_node_acl for the vhost_scsi struct se_portal_group with * the SCSI Initiator port name of the passed configfs group 'name'. */ @@ -2915,7 +2913,7 @@ static ssize_t vhost_scsi_wwn_version_show(struct config_item *item, char *page) { return sysfs_emit(page, "TCM_VHOST fabric module %s on %s/%s" - "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, + " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, utsname()->machine); } @@ -2983,13 +2981,13 @@ out_vhost_scsi_deregister: vhost_scsi_deregister(); out: return ret; -}; +} static void vhost_scsi_exit(void) { target_unregister_template(&vhost_scsi_ops); vhost_scsi_deregister(); -}; +} MODULE_DESCRIPTION("VHOST_SCSI series fabric driver"); MODULE_ALIAS("tcm_vhost"); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 1094256a943c..23286e4d7b49 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -22,6 +22,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/kthread.h> +#include <linux/cgroup.h> #include <linux/module.h> #include <linux/sort.h> #include <linux/sched/mm.h> @@ -41,6 +42,13 @@ static int max_iotlb_entries = 2048; module_param(max_iotlb_entries, int, 0444); MODULE_PARM_DESC(max_iotlb_entries, "Maximum number of iotlb entries. (default: 2048)"); +static bool fork_from_owner_default = VHOST_FORK_OWNER_TASK; + +#ifdef CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL +module_param(fork_from_owner_default, bool, 0444); +MODULE_PARM_DESC(fork_from_owner_default, + "Set task mode as the default(default: Y)"); +#endif enum { VHOST_MEMORY_F_LOG = 0x1, @@ -242,7 +250,7 @@ static void vhost_worker_queue(struct vhost_worker *worker, * test_and_set_bit() implies a memory barrier. */ llist_add(&work->node, &worker->work_list); - vhost_task_wake(worker->vtsk); + worker->ops->wakeup(worker); } } @@ -364,6 +372,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->avail = NULL; vq->used = NULL; vq->last_avail_idx = 0; + vq->next_avail_head = 0; vq->avail_idx = 0; vq->last_used_idx = 0; vq->signalled_used = 0; @@ -388,6 +397,44 @@ static void vhost_vq_reset(struct vhost_dev *dev, __vhost_vq_meta_reset(vq); } +static int vhost_run_work_kthread_list(void *data) +{ + struct vhost_worker *worker = data; + struct vhost_work *work, *work_next; + struct vhost_dev *dev = worker->dev; + struct llist_node *node; + + kthread_use_mm(dev->mm); + + for (;;) { + /* mb paired w/ kthread_stop */ + set_current_state(TASK_INTERRUPTIBLE); + + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); + break; + } + node = llist_del_all(&worker->work_list); + if (!node) + schedule(); + + node = llist_reverse_order(node); + /* make sure flag is seen after deletion */ + smp_wmb(); + llist_for_each_entry_safe(work, work_next, node, node) { + clear_bit(VHOST_WORK_QUEUED, &work->flags); + __set_current_state(TASK_RUNNING); + kcov_remote_start_common(worker->kcov_handle); + work->fn(work); + kcov_remote_stop(); + cond_resched(); + } + } + kthread_unuse_mm(dev->mm); + + return 0; +} + static bool vhost_run_work_list(void *data) { struct vhost_worker *worker = data; @@ -455,6 +502,8 @@ static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) vq->log = NULL; kfree(vq->heads); vq->heads = NULL; + kfree(vq->nheads); + vq->nheads = NULL; } /* Helper to allocate iovec buffers for all vqs. */ @@ -472,7 +521,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) GFP_KERNEL); vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads), GFP_KERNEL); - if (!vq->indirect || !vq->log || !vq->heads) + vq->nheads = kmalloc_array(dev->iov_limit, sizeof(*vq->nheads), + GFP_KERNEL); + if (!vq->indirect || !vq->log || !vq->heads || !vq->nheads) goto err_nomem; } return 0; @@ -552,6 +603,7 @@ void vhost_dev_init(struct vhost_dev *dev, dev->byte_weight = byte_weight; dev->use_worker = use_worker; dev->msg_handler = msg_handler; + dev->fork_owner = fork_from_owner_default; init_waitqueue_head(&dev->wait); INIT_LIST_HEAD(&dev->read_list); INIT_LIST_HEAD(&dev->pending_list); @@ -581,6 +633,46 @@ long vhost_dev_check_owner(struct vhost_dev *dev) } EXPORT_SYMBOL_GPL(vhost_dev_check_owner); +struct vhost_attach_cgroups_struct { + struct vhost_work work; + struct task_struct *owner; + int ret; +}; + +static void vhost_attach_cgroups_work(struct vhost_work *work) +{ + struct vhost_attach_cgroups_struct *s; + + s = container_of(work, struct vhost_attach_cgroups_struct, work); + s->ret = cgroup_attach_task_all(s->owner, current); +} + +static int vhost_attach_task_to_cgroups(struct vhost_worker *worker) +{ + struct vhost_attach_cgroups_struct attach; + int saved_cnt; + + attach.owner = current; + + vhost_work_init(&attach.work, vhost_attach_cgroups_work); + vhost_worker_queue(worker, &attach.work); + + mutex_lock(&worker->mutex); + + /* + * Bypass attachment_cnt check in __vhost_worker_flush: + * Temporarily change it to INT_MAX to bypass the check + */ + saved_cnt = worker->attachment_cnt; + worker->attachment_cnt = INT_MAX; + __vhost_worker_flush(worker); + worker->attachment_cnt = saved_cnt; + + mutex_unlock(&worker->mutex); + + return attach.ret; +} + /* Caller should have device mutex */ bool vhost_dev_has_owner(struct vhost_dev *dev) { @@ -594,10 +686,10 @@ static void vhost_attach_mm(struct vhost_dev *dev) if (dev->use_worker) { dev->mm = get_task_mm(current); } else { - /* vDPA device does not use worker thead, so there's - * no need to hold the address space for mm. This help + /* vDPA device does not use worker thread, so there's + * no need to hold the address space for mm. This helps * to avoid deadlock in the case of mmap() which may - * held the refcnt of the file and depends on release + * hold the refcnt of the file and depends on release * method to remove vma. */ dev->mm = current->mm; @@ -626,7 +718,7 @@ static void vhost_worker_destroy(struct vhost_dev *dev, WARN_ON(!llist_empty(&worker->work_list)); xa_erase(&dev->worker_xa, worker->id); - vhost_task_stop(worker->vtsk); + worker->ops->stop(worker); kfree(worker); } @@ -649,42 +741,115 @@ static void vhost_workers_free(struct vhost_dev *dev) xa_destroy(&dev->worker_xa); } +static void vhost_task_wakeup(struct vhost_worker *worker) +{ + return vhost_task_wake(worker->vtsk); +} + +static void vhost_kthread_wakeup(struct vhost_worker *worker) +{ + wake_up_process(worker->kthread_task); +} + +static void vhost_task_do_stop(struct vhost_worker *worker) +{ + return vhost_task_stop(worker->vtsk); +} + +static void vhost_kthread_do_stop(struct vhost_worker *worker) +{ + kthread_stop(worker->kthread_task); +} + +static int vhost_task_worker_create(struct vhost_worker *worker, + struct vhost_dev *dev, const char *name) +{ + struct vhost_task *vtsk; + u32 id; + int ret; + + vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed, + worker, name); + if (IS_ERR(vtsk)) + return PTR_ERR(vtsk); + + worker->vtsk = vtsk; + vhost_task_start(vtsk); + ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL); + if (ret < 0) { + vhost_task_do_stop(worker); + return ret; + } + worker->id = id; + return 0; +} + +static int vhost_kthread_worker_create(struct vhost_worker *worker, + struct vhost_dev *dev, const char *name) +{ + struct task_struct *task; + u32 id; + int ret; + + task = kthread_create(vhost_run_work_kthread_list, worker, "%s", name); + if (IS_ERR(task)) + return PTR_ERR(task); + + worker->kthread_task = task; + wake_up_process(task); + ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL); + if (ret < 0) + goto stop_worker; + + ret = vhost_attach_task_to_cgroups(worker); + if (ret) + goto stop_worker; + + worker->id = id; + return 0; + +stop_worker: + vhost_kthread_do_stop(worker); + return ret; +} + +static const struct vhost_worker_ops kthread_ops = { + .create = vhost_kthread_worker_create, + .stop = vhost_kthread_do_stop, + .wakeup = vhost_kthread_wakeup, +}; + +static const struct vhost_worker_ops vhost_task_ops = { + .create = vhost_task_worker_create, + .stop = vhost_task_do_stop, + .wakeup = vhost_task_wakeup, +}; + static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev) { struct vhost_worker *worker; - struct vhost_task *vtsk; char name[TASK_COMM_LEN]; int ret; - u32 id; + const struct vhost_worker_ops *ops = dev->fork_owner ? &vhost_task_ops : + &kthread_ops; worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT); if (!worker) return NULL; worker->dev = dev; + worker->ops = ops; snprintf(name, sizeof(name), "vhost-%d", current->pid); - vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed, - worker, name); - if (IS_ERR(vtsk)) - goto free_worker; - mutex_init(&worker->mutex); init_llist_head(&worker->work_list); worker->kcov_handle = kcov_common_handle(); - worker->vtsk = vtsk; - - vhost_task_start(vtsk); - - ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL); + ret = ops->create(worker, dev, name); if (ret < 0) - goto stop_worker; - worker->id = id; + goto free_worker; return worker; -stop_worker: - vhost_task_stop(vtsk); free_worker: kfree(worker); return NULL; @@ -731,7 +896,7 @@ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq, * We don't want to call synchronize_rcu for every vq during setup * because it will slow down VM startup. If we haven't done * VHOST_SET_VRING_KICK and not done the driver specific - * SET_ENDPOINT/RUNNUNG then we can skip the sync since there will + * SET_ENDPOINT/RUNNING then we can skip the sync since there will * not be any works queued for scsi and net. */ mutex_lock(&vq->mutex); @@ -865,6 +1030,14 @@ long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl, switch (ioctl) { /* dev worker ioctls */ case VHOST_NEW_WORKER: + /* + * vhost_tasks will account for worker threads under the parent's + * NPROC value but kthreads do not. To avoid userspace overflowing + * the system with worker threads fork_owner must be true. + */ + if (!dev->fork_owner) + return -EFAULT; + ret = vhost_new_worker(dev, &state); if (!ret && copy_to_user(argp, &state, sizeof(state))) ret = -EFAULT; @@ -982,6 +1155,7 @@ void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem) vhost_dev_cleanup(dev); + dev->fork_owner = fork_from_owner_default; dev->umem = umem; /* We don't need VQ locks below since vhost_dev_cleanup makes sure * VQs aren't running. @@ -1990,14 +2164,15 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg break; } if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) { - vq->last_avail_idx = s.num & 0xffff; + vq->next_avail_head = vq->last_avail_idx = + s.num & 0xffff; vq->last_used_idx = (s.num >> 16) & 0xffff; } else { if (s.num > 0xffff) { r = -EINVAL; break; } - vq->last_avail_idx = s.num; + vq->next_avail_head = vq->last_avail_idx = s.num; } /* Forget the cached index value. */ vq->avail_idx = vq->last_avail_idx; @@ -2135,6 +2310,45 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) goto done; } +#ifdef CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL + if (ioctl == VHOST_SET_FORK_FROM_OWNER) { + /* Only allow modification before owner is set */ + if (vhost_dev_has_owner(d)) { + r = -EBUSY; + goto done; + } + u8 fork_owner_val; + + if (get_user(fork_owner_val, (u8 __user *)argp)) { + r = -EFAULT; + goto done; + } + if (fork_owner_val != VHOST_FORK_OWNER_TASK && + fork_owner_val != VHOST_FORK_OWNER_KTHREAD) { + r = -EINVAL; + goto done; + } + d->fork_owner = !!fork_owner_val; + r = 0; + goto done; + } + if (ioctl == VHOST_GET_FORK_FROM_OWNER) { + u8 fork_owner_val = d->fork_owner; + + if (fork_owner_val != VHOST_FORK_OWNER_TASK && + fork_owner_val != VHOST_FORK_OWNER_KTHREAD) { + r = -EINVAL; + goto done; + } + if (put_user(fork_owner_val, (u8 __user *)argp)) { + r = -EFAULT; + goto done; + } + r = 0; + goto done; + } +#endif + /* You must be the owner to do anything else */ r = vhost_dev_check_owner(d); if (r) @@ -2590,11 +2804,12 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, unsigned int *out_num, unsigned int *in_num, struct vhost_log *log, unsigned int *log_num) { + bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER); struct vring_desc desc; unsigned int i, head, found = 0; u16 last_avail_idx = vq->last_avail_idx; __virtio16 ring_head; - int ret, access; + int ret, access, c = 0; if (vq->avail_idx == vq->last_avail_idx) { ret = vhost_get_avail_idx(vq); @@ -2605,17 +2820,21 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, return vq->num; } - /* Grab the next descriptor number they're advertising, and increment - * the index we've seen. */ - if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) { - vq_err(vq, "Failed to read head: idx %d address %p\n", - last_avail_idx, - &vq->avail->ring[last_avail_idx % vq->num]); - return -EFAULT; + if (in_order) + head = vq->next_avail_head & (vq->num - 1); + else { + /* Grab the next descriptor number they're + * advertising, and increment the index we've seen. */ + if (unlikely(vhost_get_avail_head(vq, &ring_head, + last_avail_idx))) { + vq_err(vq, "Failed to read head: idx %d address %p\n", + last_avail_idx, + &vq->avail->ring[last_avail_idx % vq->num]); + return -EFAULT; + } + head = vhost16_to_cpu(vq, ring_head); } - head = vhost16_to_cpu(vq, ring_head); - /* If their number is silly, that's an error. */ if (unlikely(head >= vq->num)) { vq_err(vq, "Guest says index %u > %u is available", @@ -2658,6 +2877,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, "in indirect descriptor at idx %d\n", i); return ret; } + ++c; continue; } @@ -2693,10 +2913,12 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, } *out_num += ret; } + ++c; } while ((i = next_desc(vq, &desc)) != -1); /* On success, increment avail index. */ vq->last_avail_idx++; + vq->next_avail_head += c; /* Assume notifications from guest are disabled at this point, * if they aren't we would need to update avail_event index. */ @@ -2720,8 +2942,9 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) cpu_to_vhost32(vq, head), cpu_to_vhost32(vq, len) }; + u16 nheads = 1; - return vhost_add_used_n(vq, &heads, 1); + return vhost_add_used_n(vq, &heads, &nheads, 1); } EXPORT_SYMBOL_GPL(vhost_add_used); @@ -2757,10 +2980,9 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, return 0; } -/* After we've used one of their buffers, we tell them about it. We'll then - * want to notify the guest, using eventfd. */ -int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, - unsigned count) +static int vhost_add_used_n_ooo(struct vhost_virtqueue *vq, + struct vring_used_elem *heads, + unsigned count) { int start, n, r; @@ -2773,7 +2995,72 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, heads += n; count -= n; } - r = __vhost_add_used_n(vq, heads, count); + return __vhost_add_used_n(vq, heads, count); +} + +static int vhost_add_used_n_in_order(struct vhost_virtqueue *vq, + struct vring_used_elem *heads, + const u16 *nheads, + unsigned count) +{ + vring_used_elem_t __user *used; + u16 old, new = vq->last_used_idx; + int start, i; + + if (!nheads) + return -EINVAL; + + start = vq->last_used_idx & (vq->num - 1); + used = vq->used->ring + start; + + for (i = 0; i < count; i++) { + if (vhost_put_used(vq, &heads[i], start, 1)) { + vq_err(vq, "Failed to write used"); + return -EFAULT; + } + start += nheads[i]; + new += nheads[i]; + if (start >= vq->num) + start -= vq->num; + } + + if (unlikely(vq->log_used)) { + /* Make sure data is seen before log. */ + smp_wmb(); + /* Log used ring entry write. */ + log_used(vq, ((void __user *)used - (void __user *)vq->used), + (vq->num - start) * sizeof *used); + if (start + count > vq->num) + log_used(vq, 0, + (start + count - vq->num) * sizeof *used); + } + + old = vq->last_used_idx; + vq->last_used_idx = new; + /* If the driver never bothers to signal in a very long while, + * used index might wrap around. If that happens, invalidate + * signalled_used index we stored. TODO: make sure driver + * signals at least once in 2^16 and remove this. */ + if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) + vq->signalled_used_valid = false; + return 0; +} + +/* After we've used one of their buffers, we tell them about it. We'll then + * want to notify the guest, using eventfd. */ +int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, + u16 *nheads, unsigned count) +{ + bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER); + int r; + + if (!in_order || !nheads) + r = vhost_add_used_n_ooo(vq, heads, count); + else + r = vhost_add_used_n_in_order(vq, heads, nheads, count); + + if (r < 0) + return r; /* Make sure buffer is written before we update index. */ smp_wmb(); @@ -2853,14 +3140,16 @@ EXPORT_SYMBOL_GPL(vhost_add_used_and_signal); /* multi-buffer version of vhost_add_used_and_signal */ void vhost_add_used_and_signal_n(struct vhost_dev *dev, struct vhost_virtqueue *vq, - struct vring_used_elem *heads, unsigned count) + struct vring_used_elem *heads, + u16 *nheads, + unsigned count) { - vhost_add_used_n(vq, heads, count); + vhost_add_used_n(vq, heads, nheads, count); vhost_signal(dev, vq); } EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n); -/* return true if we're sure that avaiable ring is empty */ +/* return true if we're sure that available ring is empty */ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) { int r; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index d1aed35c4b07..621a6d9a8791 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -26,7 +26,18 @@ struct vhost_work { unsigned long flags; }; +struct vhost_worker; +struct vhost_dev; + +struct vhost_worker_ops { + int (*create)(struct vhost_worker *worker, struct vhost_dev *dev, + const char *name); + void (*stop)(struct vhost_worker *worker); + void (*wakeup)(struct vhost_worker *worker); +}; + struct vhost_worker { + struct task_struct *kthread_task; struct vhost_task *vtsk; struct vhost_dev *dev; /* Used to serialize device wide flushing with worker swapping. */ @@ -36,6 +47,7 @@ struct vhost_worker { u32 id; int attachment_cnt; bool killed; + const struct vhost_worker_ops *ops; }; /* Poll a file (eventfd or socket) */ @@ -103,6 +115,8 @@ struct vhost_virtqueue { * Values are limited to 0x7fff, and the high bit is used as * a wrap counter when using VIRTIO_F_RING_PACKED. */ u16 last_avail_idx; + /* Next avail ring head when VIRTIO_F_IN_ORDER is negoitated */ + u16 next_avail_head; /* Caches available index value from user. */ u16 avail_idx; @@ -129,6 +143,7 @@ struct vhost_virtqueue { struct iovec iotlb_iov[64]; struct iovec *indirect; struct vring_used_elem *heads; + u16 *nheads; /* Protected by virtqueue mutex. */ struct vhost_iotlb *umem; struct vhost_iotlb *iotlb; @@ -176,6 +191,16 @@ struct vhost_dev { int byte_weight; struct xarray worker_xa; bool use_worker; + /* + * If fork_owner is true we use vhost_tasks to create + * the worker so all settings/limits like cgroups, NPROC, + * scheduler, etc are inherited from the owner. If false, + * we use kthreads and only attach to the same cgroups + * as the owner for compat with older kernels. + * here we use true as default value. + * The default value is set by fork_from_owner_default + */ + bool fork_owner; int (*msg_handler)(struct vhost_dev *dev, u32 asid, struct vhost_iotlb_msg *msg); }; @@ -213,11 +238,12 @@ bool vhost_vq_is_setup(struct vhost_virtqueue *vq); int vhost_vq_init_access(struct vhost_virtqueue *); int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len); int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads, - unsigned count); + u16 *nheads, unsigned count); void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *, unsigned int id, int len); void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *, - struct vring_used_elem *heads, unsigned count); + struct vring_used_elem *heads, u16 *nheads, + unsigned count); void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *); void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *); bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *); diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index bbce65452701..9f27c3f6091b 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -780,22 +780,6 @@ ssize_t vringh_iov_push_user(struct vringh_iov *wiov, EXPORT_SYMBOL(vringh_iov_push_user); /** - * vringh_abandon_user - we've decided not to handle the descriptor(s). - * @vrh: the vring. - * @num: the number of descriptors to put back (ie. num - * vringh_get_user() to undo). - * - * The next vringh_get_user() will return the old descriptor(s) again. - */ -void vringh_abandon_user(struct vringh *vrh, unsigned int num) -{ - /* We only update vring_avail_event(vr) when we want to be notified, - * so we haven't changed that yet. */ - vrh->last_avail_idx -= num; -} -EXPORT_SYMBOL(vringh_abandon_user); - -/** * vringh_complete_user - we've finished with descriptor, publish it. * @vrh: the vring. * @head: the head as filled in by vringh_getdesc_user. @@ -900,20 +884,6 @@ static inline int putused_kern(const struct vringh *vrh, return 0; } -static inline int xfer_kern(const struct vringh *vrh, void *src, - void *dst, size_t len) -{ - memcpy(dst, src, len); - return 0; -} - -static inline int kern_xfer(const struct vringh *vrh, void *dst, - void *src, size_t len) -{ - memcpy(dst, src, len); - return 0; -} - /** * vringh_init_kern - initialize a vringh for a kernelspace vring. * @vrh: the vringh to initialize. @@ -999,51 +969,6 @@ int vringh_getdesc_kern(struct vringh *vrh, EXPORT_SYMBOL(vringh_getdesc_kern); /** - * vringh_iov_pull_kern - copy bytes from vring_iov. - * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume) - * @dst: the place to copy. - * @len: the maximum length to copy. - * - * Returns the bytes copied <= len or a negative errno. - */ -ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len) -{ - return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern); -} -EXPORT_SYMBOL(vringh_iov_pull_kern); - -/** - * vringh_iov_push_kern - copy bytes into vring_iov. - * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume) - * @src: the place to copy from. - * @len: the maximum length to copy. - * - * Returns the bytes copied <= len or a negative errno. - */ -ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, - const void *src, size_t len) -{ - return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer); -} -EXPORT_SYMBOL(vringh_iov_push_kern); - -/** - * vringh_abandon_kern - we've decided not to handle the descriptor(s). - * @vrh: the vring. - * @num: the number of descriptors to put back (ie. num - * vringh_get_kern() to undo). - * - * The next vringh_get_kern() will return the old descriptor(s) again. - */ -void vringh_abandon_kern(struct vringh *vrh, unsigned int num) -{ - /* We only update vring_avail_event(vr) when we want to be notified, - * so we haven't changed that yet. */ - vrh->last_avail_idx -= num; -} -EXPORT_SYMBOL(vringh_abandon_kern); - -/** * vringh_complete_kern - we've finished with descriptor, publish it. * @vrh: the vring. * @head: the head as filled in by vringh_getdesc_kern. @@ -1535,23 +1460,6 @@ ssize_t vringh_iov_push_iotlb(struct vringh *vrh, EXPORT_SYMBOL(vringh_iov_push_iotlb); /** - * vringh_abandon_iotlb - we've decided not to handle the descriptor(s). - * @vrh: the vring. - * @num: the number of descriptors to put back (ie. num - * vringh_get_iotlb() to undo). - * - * The next vringh_get_iotlb() will return the old descriptor(s) again. - */ -void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num) -{ - /* We only update vring_avail_event(vr) when we want to be notified, - * so we haven't changed that yet. - */ - vrh->last_avail_idx -= num; -} -EXPORT_SYMBOL(vringh_abandon_iotlb); - -/** * vringh_complete_iotlb - we've finished with descriptor, publish it. * @vrh: the vring. * @head: the head as filled in by vringh_getdesc_iotlb. @@ -1572,32 +1480,6 @@ int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len) EXPORT_SYMBOL(vringh_complete_iotlb); /** - * vringh_notify_enable_iotlb - we want to know if something changes. - * @vrh: the vring. - * - * This always enables notifications, but returns false if there are - * now more buffers available in the vring. - */ -bool vringh_notify_enable_iotlb(struct vringh *vrh) -{ - return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb); -} -EXPORT_SYMBOL(vringh_notify_enable_iotlb); - -/** - * vringh_notify_disable_iotlb - don't tell us if something changes. - * @vrh: the vring. - * - * This is our normal running state: we disable and then only enable when - * we're going to sleep. - */ -void vringh_notify_disable_iotlb(struct vringh *vrh) -{ - __vringh_notify_disable(vrh, putu16_iotlb); -} -EXPORT_SYMBOL(vringh_notify_disable_iotlb); - -/** * vringh_need_notify_iotlb - must we tell the other side about used buffers? * @vrh: the vring we've called vringh_complete_iotlb() on. * diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 802153e23073..ae01457ea2cd 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -344,6 +344,10 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, len = iov_length(vq->iov, out); + if (len < VIRTIO_VSOCK_SKB_HEADROOM || + len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM) + return NULL; + /* len contains both payload and hdr */ skb = virtio_vsock_alloc_skb(len, GFP_KERNEL); if (!skb) @@ -367,18 +371,15 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, return skb; /* The pkt is too big or the length in the header is invalid */ - if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE || - payload_len + sizeof(*hdr) > len) { + if (payload_len + sizeof(*hdr) > len) { kfree_skb(skb); return NULL; } - virtio_vsock_skb_rx_put(skb); + virtio_vsock_skb_put(skb, payload_len); - nbytes = copy_from_iter(skb->data, payload_len, &iov_iter); - if (nbytes != payload_len) { - vq_err(vq, "Expected %zu byte payload, got %zu bytes\n", - payload_len, nbytes); + if (skb_copy_datagram_from_iter(skb, 0, &iov_iter, payload_len)) { + vq_err(vq, "Failed to copy %zu byte payload\n", payload_len); kfree_skb(skb); return NULL; } diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 5c48788cdbec..a09eb4d62f82 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -147,7 +147,7 @@ EXPORT_SYMBOL_GPL(virtio_config_changed); /** * virtio_config_driver_disable - disable config change reporting by drivers - * @dev: the device to reset + * @dev: the device to disable * * This is only allowed to be called by a driver and disabling can't * be nested. @@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(virtio_config_driver_disable); /** * virtio_config_driver_enable - enable config change reporting by drivers - * @dev: the device to reset + * @dev: the device to enable * * This is only allowed to be called by a driver and enabling can't * be nested. @@ -512,7 +512,7 @@ out: * On error, the caller must call put_device on &@dev->dev (and not kfree), * as another code path may have obtained a reference to @dev. * - * Returns: 0 on suceess, -error on failure + * Returns: 0 on success, -error on failure */ int register_virtio_device(struct virtio_device *dev) { @@ -536,6 +536,7 @@ int register_virtio_device(struct virtio_device *dev) goto out_ida_remove; spin_lock_init(&dev->config_lock); + dev->config_driver_disabled = false; dev->config_core_enabled = false; dev->config_change_pending = false; diff --git a/drivers/virtio/virtio_dma_buf.c b/drivers/virtio/virtio_dma_buf.c index 3fe1d03b0645..95c10632f84a 100644 --- a/drivers/virtio/virtio_dma_buf.c +++ b/drivers/virtio/virtio_dma_buf.c @@ -36,6 +36,8 @@ EXPORT_SYMBOL(virtio_dma_buf_export); /** * virtio_dma_buf_attach - mandatory attach callback for virtio dma-bufs + * @dma_buf: [in] buffer to attach + * @attach: [in] attachment structure */ int virtio_dma_buf_attach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach) diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 5d78c2d572ab..b152a1eca05a 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -65,7 +65,6 @@ #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/slab.h> -#include <linux/spinlock.h> #include <linux/virtio.h> #include <linux/virtio_config.h> #include <uapi/linux/virtio_mmio.h> @@ -88,22 +87,8 @@ struct virtio_mmio_device { void __iomem *base; unsigned long version; - - /* a list of queues so we can dispatch IRQs */ - spinlock_t lock; - struct list_head virtqueues; -}; - -struct virtio_mmio_vq_info { - /* the actual virtqueue */ - struct virtqueue *vq; - - /* the list node for the virtqueues list */ - struct list_head node; }; - - /* Configuration interface */ static u64 vm_get_features(struct virtio_device *vdev) @@ -300,9 +285,8 @@ static bool vm_notify_with_data(struct virtqueue *vq) static irqreturn_t vm_interrupt(int irq, void *opaque) { struct virtio_mmio_device *vm_dev = opaque; - struct virtio_mmio_vq_info *info; + struct virtqueue *vq; unsigned long status; - unsigned long flags; irqreturn_t ret = IRQ_NONE; /* Read and acknowledge interrupts */ @@ -315,10 +299,8 @@ static irqreturn_t vm_interrupt(int irq, void *opaque) } if (likely(status & VIRTIO_MMIO_INT_VRING)) { - spin_lock_irqsave(&vm_dev->lock, flags); - list_for_each_entry(info, &vm_dev->virtqueues, node) - ret |= vring_interrupt(irq, info->vq); - spin_unlock_irqrestore(&vm_dev->lock, flags); + virtio_device_for_each_vq(&vm_dev->vdev, vq) + ret |= vring_interrupt(irq, vq); } return ret; @@ -329,14 +311,8 @@ static irqreturn_t vm_interrupt(int irq, void *opaque) static void vm_del_vq(struct virtqueue *vq) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); - struct virtio_mmio_vq_info *info = vq->priv; - unsigned long flags; unsigned int index = vq->index; - spin_lock_irqsave(&vm_dev->lock, flags); - list_del(&info->node); - spin_unlock_irqrestore(&vm_dev->lock, flags); - /* Select and deactivate the queue */ writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); if (vm_dev->version == 1) { @@ -347,8 +323,6 @@ static void vm_del_vq(struct virtqueue *vq) } vring_del_virtqueue(vq); - - kfree(info); } static void vm_del_vqs(struct virtio_device *vdev) @@ -375,9 +349,7 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); bool (*notify)(struct virtqueue *vq); - struct virtio_mmio_vq_info *info; struct virtqueue *vq; - unsigned long flags; unsigned int num; int err; @@ -399,13 +371,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in goto error_available; } - /* Allocate and fill out our active queue description */ - info = kmalloc(sizeof(*info), GFP_KERNEL); - if (!info) { - err = -ENOMEM; - goto error_kmalloc; - } - num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX); if (num == 0) { err = -ENOENT; @@ -463,13 +428,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int in writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); } - vq->priv = info; - info->vq = vq; - - spin_lock_irqsave(&vm_dev->lock, flags); - list_add(&info->node, &vm_dev->virtqueues); - spin_unlock_irqrestore(&vm_dev->lock, flags); - return vq; error_bad_pfn: @@ -481,8 +439,6 @@ error_new_virtqueue: writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); } - kfree(info); -error_kmalloc: error_available: return ERR_PTR(err); } @@ -627,8 +583,6 @@ static int virtio_mmio_probe(struct platform_device *pdev) vm_dev->vdev.dev.release = virtio_mmio_release_dev; vm_dev->vdev.config = &virtio_mmio_config_ops; vm_dev->pdev = pdev; - INIT_LIST_HEAD(&vm_dev->virtqueues); - spin_lock_init(&vm_dev->lock); vm_dev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(vm_dev->base)) { diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 4397392bfef0..f5062061c408 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -2296,6 +2296,10 @@ static inline int virtqueue_add(struct virtqueue *_vq, * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). + * + * NB: ENOSPC is a special code that is only returned on an attempt to add a + * buffer to a full VQ. It indicates that some buffers are outstanding and that + * the operation can be retried after some buffers have been used. */ int virtqueue_add_sgs(struct virtqueue *_vq, struct scatterlist *sgs[], diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index a7b297dae489..657b07a60788 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -28,19 +28,6 @@ struct virtio_vdpa_device { struct virtio_device vdev; struct vdpa_device *vdpa; u64 features; - - /* The lock to protect virtqueue list */ - spinlock_t lock; - /* List of virtio_vdpa_vq_info */ - struct list_head virtqueues; -}; - -struct virtio_vdpa_vq_info { - /* the actual virtqueue */ - struct virtqueue *vq; - - /* the list node for the virtqueues list */ - struct list_head node; }; static inline struct virtio_vdpa_device * @@ -135,9 +122,9 @@ static irqreturn_t virtio_vdpa_config_cb(void *private) static irqreturn_t virtio_vdpa_virtqueue_cb(void *private) { - struct virtio_vdpa_vq_info *info = private; + struct virtqueue *vq = private; - return vring_interrupt(0, info->vq); + return vring_interrupt(0, vq); } static struct virtqueue * @@ -145,18 +132,15 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index, void (*callback)(struct virtqueue *vq), const char *name, bool ctx) { - struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev); struct vdpa_device *vdpa = vd_get_vdpa(vdev); struct device *dma_dev; const struct vdpa_config_ops *ops = vdpa->config; - struct virtio_vdpa_vq_info *info; bool (*notify)(struct virtqueue *vq) = virtio_vdpa_notify; struct vdpa_callback cb; struct virtqueue *vq; u64 desc_addr, driver_addr, device_addr; /* Assume split virtqueue, switch to packed if necessary */ struct vdpa_vq_state state = {0}; - unsigned long flags; u32 align, max_num, min_num = 1; bool may_reduce_num = true; int err; @@ -179,10 +163,6 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index, if (ops->get_vq_ready(vdpa, index)) return ERR_PTR(-ENOENT); - /* Allocate and fill out our active queue description */ - info = kmalloc(sizeof(*info), GFP_KERNEL); - if (!info) - return ERR_PTR(-ENOMEM); if (ops->get_vq_size) max_num = ops->get_vq_size(vdpa, index); else @@ -217,7 +197,7 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index, /* Setup virtqueue callback */ cb.callback = callback ? virtio_vdpa_virtqueue_cb : NULL; - cb.private = info; + cb.private = vq; cb.trigger = NULL; ops->set_vq_cb(vdpa, index, &cb); ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq)); @@ -248,13 +228,6 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index, ops->set_vq_ready(vdpa, index, 1); - vq->priv = info; - info->vq = vq; - - spin_lock_irqsave(&vd_dev->lock, flags); - list_add(&info->node, &vd_dev->virtqueues); - spin_unlock_irqrestore(&vd_dev->lock, flags); - return vq; err_vq: @@ -263,7 +236,6 @@ error_new_virtqueue: ops->set_vq_ready(vdpa, index, 0); /* VDPA driver should make sure vq is stopeed here */ WARN_ON(ops->get_vq_ready(vdpa, index)); - kfree(info); return ERR_PTR(err); } @@ -272,20 +244,12 @@ static void virtio_vdpa_del_vq(struct virtqueue *vq) struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev); struct vdpa_device *vdpa = vd_dev->vdpa; const struct vdpa_config_ops *ops = vdpa->config; - struct virtio_vdpa_vq_info *info = vq->priv; unsigned int index = vq->index; - unsigned long flags; - - spin_lock_irqsave(&vd_dev->lock, flags); - list_del(&info->node); - spin_unlock_irqrestore(&vd_dev->lock, flags); /* Select and deactivate the queue (best effort) */ ops->set_vq_ready(vdpa, index, 0); vring_del_virtqueue(vq); - - kfree(info); } static void virtio_vdpa_del_vqs(struct virtio_device *vdev) @@ -502,8 +466,6 @@ static int virtio_vdpa_probe(struct vdpa_device *vdpa) vd_dev->vdev.dev.release = virtio_vdpa_release_dev; vd_dev->vdev.config = &virtio_vdpa_config_ops; vd_dev->vdpa = vdpa; - INIT_LIST_HEAD(&vd_dev->virtqueues); - spin_lock_init(&vd_dev->lock); vd_dev->vdev.id.device = ops->get_device_id(vdpa); if (vd_dev->vdev.id.device == 0) diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index 26efca9ae0e7..c3fbb6068c52 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -644,6 +644,8 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) } else { wdd->timeout = DW_WDT_DEFAULT_SECONDS; watchdog_init_timeout(wdd, 0, dev); + /* Limit timeout value to hardware constraints. */ + dw_wdt_set_timeout(wdd, wdd->timeout); } platform_set_drvdata(pdev, dw_wdt); diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 9ab769aa0244..4ab3405ef8e6 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -577,7 +577,11 @@ static int iTCO_wdt_probe(struct platform_device *pdev) /* Check that the heartbeat value is within it's range; if not reset to the default */ if (iTCO_wdt_set_timeout(&p->wddev, heartbeat)) { - iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT); + ret = iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT); + if (ret != 0) { + dev_err(dev, "Failed to set watchdog timeout (%d)\n", WATCHDOG_TIMEOUT); + return ret; + } dev_info(dev, "timeout value out of range, using %d\n", WATCHDOG_TIMEOUT); heartbeat = WATCHDOG_TIMEOUT; diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c index a1e23dce8810..3b8488c86a2f 100644 --- a/drivers/watchdog/it87_wdt.c +++ b/drivers/watchdog/it87_wdt.c @@ -22,11 +22,13 @@ #include <linux/bits.h> #include <linux/dmi.h> +#include <linux/errno.h> #include <linux/init.h> #include <linux/io.h> -#include <linux/kernel.h> +#include <linux/ioport.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/printk.h> #include <linux/types.h> #include <linux/watchdog.h> diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c index c0b2a9c5250d..97bcd32bade5 100644 --- a/drivers/watchdog/renesas_wdt.c +++ b/drivers/watchdog/renesas_wdt.c @@ -300,7 +300,7 @@ static void rwdt_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } -static int __maybe_unused rwdt_suspend(struct device *dev) +static int rwdt_suspend(struct device *dev) { struct rwdt_priv *priv = dev_get_drvdata(dev); @@ -310,7 +310,7 @@ static int __maybe_unused rwdt_suspend(struct device *dev) return 0; } -static int __maybe_unused rwdt_resume(struct device *dev) +static int rwdt_resume(struct device *dev) { struct rwdt_priv *priv = dev_get_drvdata(dev); @@ -320,7 +320,7 @@ static int __maybe_unused rwdt_resume(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(rwdt_pm_ops, rwdt_suspend, rwdt_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(rwdt_pm_ops, rwdt_suspend, rwdt_resume); static const struct of_device_id rwdt_ids[] = { { .compatible = "renesas,rcar-gen2-wdt", }, @@ -334,7 +334,7 @@ static struct platform_driver rwdt_driver = { .driver = { .name = "renesas_wdt", .of_match_table = rwdt_ids, - .pm = &rwdt_pm_ops, + .pm = pm_sleep_ptr(&rwdt_pm_ops), }, .probe = rwdt_probe, .remove = rwdt_remove, diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c index d1f9ce4100a8..be7d7db47591 100644 --- a/drivers/watchdog/rti_wdt.c +++ b/drivers/watchdog/rti_wdt.c @@ -15,7 +15,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/of.h> -#include <linux/of_address.h> +#include <linux/of_reserved_mem.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/types.h> @@ -214,7 +214,6 @@ static int rti_wdt_probe(struct platform_device *pdev) struct rti_wdt_device *wdt; struct clk *clk; u32 last_ping = 0; - struct device_node *node; u32 reserved_mem_size; struct resource res; u32 *vaddr; @@ -299,15 +298,8 @@ static int rti_wdt_probe(struct platform_device *pdev) } } - node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0); - if (node) { - ret = of_address_to_resource(node, 0, &res); - of_node_put(node); - if (ret) { - dev_err(dev, "No memory address assigned to the region.\n"); - goto err_iomap; - } - + ret = of_reserved_mem_region_to_resource(pdev->dev.of_node, 0, &res); + if (!ret) { /* * If reserved memory is defined for watchdog reset cause. * Readout the Power-on(PON) reason and pass to bootstatus. diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c index 5f23913ce3b4..6ce1bfb39064 100644 --- a/drivers/watchdog/sbsa_gwdt.c +++ b/drivers/watchdog/sbsa_gwdt.c @@ -75,11 +75,17 @@ #define SBSA_GWDT_VERSION_MASK 0xF #define SBSA_GWDT_VERSION_SHIFT 16 +#define SBSA_GWDT_IMPL_MASK 0x7FF +#define SBSA_GWDT_IMPL_SHIFT 0 +#define SBSA_GWDT_IMPL_MEDIATEK 0x426 + /** * struct sbsa_gwdt - Internal representation of the SBSA GWDT * @wdd: kernel watchdog_device structure * @clk: store the System Counter clock frequency, in Hz. * @version: store the architecture version + * @need_ws0_race_workaround: + * indicate whether to adjust wdd->timeout to avoid a race with WS0 * @refresh_base: Virtual address of the watchdog refresh frame * @control_base: Virtual address of the watchdog control frame */ @@ -87,6 +93,7 @@ struct sbsa_gwdt { struct watchdog_device wdd; u32 clk; int version; + bool need_ws0_race_workaround; void __iomem *refresh_base; void __iomem *control_base; }; @@ -161,6 +168,31 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd, */ sbsa_gwdt_reg_write(((u64)gwdt->clk / 2) * timeout, gwdt); + /* + * Some watchdog hardware has a race condition where it will ignore + * sbsa_gwdt_keepalive() if it is called at the exact moment that a + * timeout occurs and WS0 is being asserted. Unfortunately, the default + * behavior of the watchdog core is very likely to trigger this race + * when action=0 because it programs WOR to be half of the desired + * timeout, and watchdog_next_keepalive() chooses the exact same time to + * send keepalive pings. + * + * This triggers a race where sbsa_gwdt_keepalive() can be called right + * as WS0 is being asserted, and affected hardware will ignore that + * write and continue to assert WS0. After another (timeout / 2) + * seconds, the same race happens again. If the driver wins then the + * explicit refresh will reset WS0 to false but if the hardware wins, + * then WS1 is asserted and the system resets. + * + * Avoid the problem by scheduling keepalive heartbeats one second later + * than the WOR timeout. + * + * This workaround might not be needed in a future revision of the + * hardware. + */ + if (gwdt->need_ws0_race_workaround) + wdd->min_hw_heartbeat_ms = timeout * 500 + 1000; + return 0; } @@ -202,12 +234,15 @@ static int sbsa_gwdt_keepalive(struct watchdog_device *wdd) static void sbsa_gwdt_get_version(struct watchdog_device *wdd) { struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd); - int ver; + int iidr, ver, impl; - ver = readl(gwdt->control_base + SBSA_GWDT_W_IIDR); - ver = (ver >> SBSA_GWDT_VERSION_SHIFT) & SBSA_GWDT_VERSION_MASK; + iidr = readl(gwdt->control_base + SBSA_GWDT_W_IIDR); + ver = (iidr >> SBSA_GWDT_VERSION_SHIFT) & SBSA_GWDT_VERSION_MASK; + impl = (iidr >> SBSA_GWDT_IMPL_SHIFT) & SBSA_GWDT_IMPL_MASK; gwdt->version = ver; + gwdt->need_ws0_race_workaround = + !action && (impl == SBSA_GWDT_IMPL_MEDIATEK); } static int sbsa_gwdt_start(struct watchdog_device *wdd) @@ -299,6 +334,15 @@ static int sbsa_gwdt_probe(struct platform_device *pdev) else wdd->max_hw_heartbeat_ms = GENMASK_ULL(47, 0) / gwdt->clk * 1000; + if (gwdt->need_ws0_race_workaround) { + /* + * A timeout of 3 seconds means that WOR will be set to 1.5 + * seconds and the heartbeat will be scheduled every 2.5 + * seconds. + */ + wdd->min_timeout = 3; + } + status = readl(cf_base + SBSA_GWDT_WCS); if (status & SBSA_GWDT_WCS_WS1) { dev_warn(dev, "System reset by WDT.\n"); diff --git a/drivers/watchdog/watchdog_core.h b/drivers/watchdog/watchdog_core.h index 5b35a8439e26..ab825d9f9248 100644 --- a/drivers/watchdog/watchdog_core.h +++ b/drivers/watchdog/watchdog_core.h @@ -24,8 +24,14 @@ * This material is provided "AS-IS" and at no charge. */ -#include <linux/hrtimer.h> +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/hrtimer_types.h> +#include <linux/init.h> #include <linux/kthread.h> +#include <linux/mutex_types.h> +#include <linux/types.h> +#include <linux/watchdog.h> #define MAX_DOGS 32 /* Maximum number of watchdog devices */ diff --git a/drivers/watchdog/watchdog_pretimeout.c b/drivers/watchdog/watchdog_pretimeout.c index e5295c990fa1..2526436dc74d 100644 --- a/drivers/watchdog/watchdog_pretimeout.c +++ b/drivers/watchdog/watchdog_pretimeout.c @@ -7,6 +7,8 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> +#include <linux/sysfs.h> +#include <linux/types.h> #include <linux/watchdog.h> #include "watchdog_core.h" diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c index fcc1ba02e75b..5c6e3fa001d8 100644 --- a/drivers/watchdog/ziirave_wdt.c +++ b/drivers/watchdog/ziirave_wdt.c @@ -302,6 +302,9 @@ static int ziirave_firm_verify(struct watchdog_device *wdd, const u16 len = be16_to_cpu(rec->len); const u32 addr = be32_to_cpu(rec->addr); + if (len > sizeof(data)) + return -EINVAL; + if (ziirave_firm_addr_readonly(addr)) continue; |