diff options
Diffstat (limited to 'drivers/hwtracing')
66 files changed, 3353 insertions, 1044 deletions
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index 06f0a7594169..ecd7086a5b83 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig @@ -133,6 +133,18 @@ config CORESIGHT_STM To compile this driver as a module, choose M here: the module will be called coresight-stm. +config CORESIGHT_CTCU + tristate "CoreSight TMC Control Unit driver" + depends on CORESIGHT_LINK_AND_SINK_TMC + help + This driver provides support for CoreSight TMC Control Unit + that hosts miscellaneous configuration registers. This is + primarily used for controlling the behaviors of the TMC + ETR device. + + To compile this driver as a module, choose M here: the + module will be called coresight-ctcu. + config CORESIGHT_CPU_DEBUG tristate "CoreSight CPU Debug driver" depends on ARM || ARM64 diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile index 4ba478211b31..8e62c3150aeb 100644 --- a/drivers/hwtracing/coresight/Makefile +++ b/drivers/hwtracing/coresight/Makefile @@ -25,7 +25,7 @@ subdir-ccflags-y += $(condflags) obj-$(CONFIG_CORESIGHT) += coresight.o coresight-y := coresight-core.o coresight-etm-perf.o coresight-platform.o \ coresight-sysfs.o coresight-syscfg.o coresight-config.o \ - coresight-cfg-preload.o coresight-cfg-afdo.o \ + coresight-cfg-preload.o coresight-cfg-afdo.o coresight-cfg-pstop.o \ coresight-syscfg-configfs.o coresight-trace-id.o obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o coresight-tmc-y := coresight-tmc-core.o coresight-tmc-etf.o \ @@ -51,3 +51,5 @@ coresight-cti-y := coresight-cti-core.o coresight-cti-platform.o \ coresight-cti-sysfs.o obj-$(CONFIG_ULTRASOC_SMB) += ultrasoc-smb.o obj-$(CONFIG_CORESIGHT_DUMMY) += coresight-dummy.o +obj-$(CONFIG_CORESIGHT_CTCU) += coresight-ctcu.o +coresight-ctcu-y := coresight-ctcu-core.o diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c index 3949ded0d4fa..fa170c966bc3 100644 --- a/drivers/hwtracing/coresight/coresight-catu.c +++ b/drivers/hwtracing/coresight/coresight-catu.c @@ -7,11 +7,13 @@ * Author: Suzuki K Poulose <suzuki.poulose@arm.com> */ +#include <linux/acpi.h> #include <linux/amba/bus.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include "coresight-catu.h" @@ -267,7 +269,7 @@ catu_init_sg_table(struct device *catu_dev, int node, * Each table can address upto 1MB and we can have * CATU_PAGES_PER_SYSPAGE tables in a system page. */ - nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE; + nr_tpages = DIV_ROUND_UP(size, CATU_PAGES_PER_SYSPAGE * SZ_1M); catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages, size >> PAGE_SHIFT, pages); if (IS_ERR(catu_table)) @@ -502,28 +504,20 @@ static const struct coresight_ops catu_ops = { .helper_ops = &catu_helper_ops, }; -static int catu_probe(struct amba_device *adev, const struct amba_id *id) +static int __catu_probe(struct device *dev, struct resource *res) { int ret = 0; u32 dma_mask; - struct catu_drvdata *drvdata; + struct catu_drvdata *drvdata = dev_get_drvdata(dev); struct coresight_desc catu_desc; struct coresight_platform_data *pdata = NULL; - struct device *dev = &adev->dev; void __iomem *base; catu_desc.name = coresight_alloc_device_name(&catu_devs, dev); if (!catu_desc.name) return -ENOMEM; - drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); - if (!drvdata) { - ret = -ENOMEM; - goto out; - } - - dev_set_drvdata(dev, drvdata); - base = devm_ioremap_resource(dev, &adev->res); + base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) { ret = PTR_ERR(base); goto out; @@ -567,20 +561,40 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id) drvdata->csdev = coresight_register(&catu_desc); if (IS_ERR(drvdata->csdev)) ret = PTR_ERR(drvdata->csdev); - else - pm_runtime_put(&adev->dev); out: return ret; } -static void catu_remove(struct amba_device *adev) +static int catu_probe(struct amba_device *adev, const struct amba_id *id) { - struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev); + struct catu_drvdata *drvdata; + int ret; + + drvdata = devm_kzalloc(&adev->dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + amba_set_drvdata(adev, drvdata); + ret = __catu_probe(&adev->dev, &adev->res); + if (!ret) + pm_runtime_put(&adev->dev); + + return ret; +} + +static void __catu_remove(struct device *dev) +{ + struct catu_drvdata *drvdata = dev_get_drvdata(dev); coresight_unregister(drvdata->csdev); } -static struct amba_id catu_ids[] = { +static void catu_remove(struct amba_device *adev) +{ + __catu_remove(&adev->dev); +} + +static const struct amba_id catu_ids[] = { CS_AMBA_ID(0x000bb9ee), {}, }; @@ -590,7 +604,6 @@ MODULE_DEVICE_TABLE(amba, catu_ids); static struct amba_driver catu_driver = { .drv = { .name = "coresight-catu", - .owner = THIS_MODULE, .suppress_bind_attrs = true, }, .probe = catu_probe, @@ -598,13 +611,98 @@ static struct amba_driver catu_driver = { .id_table = catu_ids, }; +static int catu_platform_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct catu_drvdata *drvdata; + int ret = 0; + + drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev); + if (IS_ERR(drvdata->pclk)) + return -ENODEV; + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + dev_set_drvdata(&pdev->dev, drvdata); + ret = __catu_probe(&pdev->dev, res); + pm_runtime_put(&pdev->dev); + if (ret) { + pm_runtime_disable(&pdev->dev); + if (!IS_ERR_OR_NULL(drvdata->pclk)) + clk_put(drvdata->pclk); + } + + return ret; +} + +static void catu_platform_remove(struct platform_device *pdev) +{ + struct catu_drvdata *drvdata = dev_get_drvdata(&pdev->dev); + + if (WARN_ON(!drvdata)) + return; + + __catu_remove(&pdev->dev); + pm_runtime_disable(&pdev->dev); + if (!IS_ERR_OR_NULL(drvdata->pclk)) + clk_put(drvdata->pclk); +} + +#ifdef CONFIG_PM +static int catu_runtime_suspend(struct device *dev) +{ + struct catu_drvdata *drvdata = dev_get_drvdata(dev); + + if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_disable_unprepare(drvdata->pclk); + return 0; +} + +static int catu_runtime_resume(struct device *dev) +{ + struct catu_drvdata *drvdata = dev_get_drvdata(dev); + + if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_prepare_enable(drvdata->pclk); + return 0; +} +#endif + +static const struct dev_pm_ops catu_dev_pm_ops = { + SET_RUNTIME_PM_OPS(catu_runtime_suspend, catu_runtime_resume, NULL) +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id catu_acpi_ids[] = { + {"ARMHC9CA", 0, 0, 0}, /* ARM CoreSight CATU */ + {}, +}; + +MODULE_DEVICE_TABLE(acpi, catu_acpi_ids); +#endif + +static struct platform_driver catu_platform_driver = { + .probe = catu_platform_probe, + .remove = catu_platform_remove, + .driver = { + .name = "coresight-catu-platform", + .acpi_match_table = ACPI_PTR(catu_acpi_ids), + .suppress_bind_attrs = true, + .pm = &catu_dev_pm_ops, + }, +}; + static int __init catu_init(void) { int ret; - ret = amba_driver_register(&catu_driver); - if (ret) - pr_info("Error registering catu driver\n"); + ret = coresight_init_driver("catu", &catu_driver, &catu_platform_driver); tmc_etr_set_catu_ops(&etr_catu_buf_ops); return ret; } @@ -612,7 +710,7 @@ static int __init catu_init(void) static void __exit catu_exit(void) { tmc_etr_remove_catu_ops(); - amba_driver_unregister(&catu_driver); + coresight_remove_driver(&catu_driver, &catu_platform_driver); } module_init(catu_init); diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h index 442e034bbfba..141feac1c14b 100644 --- a/drivers/hwtracing/coresight/coresight-catu.h +++ b/drivers/hwtracing/coresight/coresight-catu.h @@ -61,6 +61,7 @@ #define CATU_IRQEN_OFF 0x0 struct catu_drvdata { + struct clk *pclk; void __iomem *base; struct coresight_device *csdev; int irq; diff --git a/drivers/hwtracing/coresight/coresight-cfg-preload.c b/drivers/hwtracing/coresight/coresight-cfg-preload.c index e237a4edfa09..4980e68483c5 100644 --- a/drivers/hwtracing/coresight/coresight-cfg-preload.c +++ b/drivers/hwtracing/coresight/coresight-cfg-preload.c @@ -13,6 +13,7 @@ static struct cscfg_feature_desc *preload_feats[] = { #if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) &strobe_etm4x, + &gen_etrig_etm4x, #endif NULL }; @@ -20,6 +21,7 @@ static struct cscfg_feature_desc *preload_feats[] = { static struct cscfg_config_desc *preload_cfgs[] = { #if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) &afdo_etm4x, + &pstop_etm4x, #endif NULL }; diff --git a/drivers/hwtracing/coresight/coresight-cfg-preload.h b/drivers/hwtracing/coresight/coresight-cfg-preload.h index 21299e175477..291ba530a6a5 100644 --- a/drivers/hwtracing/coresight/coresight-cfg-preload.h +++ b/drivers/hwtracing/coresight/coresight-cfg-preload.h @@ -10,4 +10,6 @@ #if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) extern struct cscfg_feature_desc strobe_etm4x; extern struct cscfg_config_desc afdo_etm4x; +extern struct cscfg_feature_desc gen_etrig_etm4x; +extern struct cscfg_config_desc pstop_etm4x; #endif diff --git a/drivers/hwtracing/coresight/coresight-cfg-pstop.c b/drivers/hwtracing/coresight/coresight-cfg-pstop.c new file mode 100644 index 000000000000..c2bfbd07bfaf --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-cfg-pstop.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(C) 2023 Marvell. + * Based on coresight-cfg-afdo.c + */ + +#include "coresight-config.h" + +/* ETMv4 includes and features */ +#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) +#include "coresight-etm4x-cfg.h" + +/* preload configurations and features */ + +/* preload in features for ETMv4 */ + +/* panic_stop feature */ +static struct cscfg_parameter_desc gen_etrig_params[] = { + { + .name = "address", + .value = (u64)panic, + }, +}; + +static struct cscfg_regval_desc gen_etrig_regs[] = { + /* resource selector */ + { + .type = CS_CFG_REG_TYPE_RESOURCE, + .offset = TRCRSCTLRn(2), + .hw_info = ETM4_CFG_RES_SEL, + .val32 = 0x40001, + }, + /* single address comparator */ + { + .type = CS_CFG_REG_TYPE_RESOURCE | CS_CFG_REG_TYPE_VAL_64BIT | + CS_CFG_REG_TYPE_VAL_PARAM, + .offset = TRCACVRn(0), + .val32 = 0x0, + }, + { + .type = CS_CFG_REG_TYPE_RESOURCE, + .offset = TRCACATRn(0), + .val64 = 0xf00, + }, + /* Driver external output[0] with comparator out */ + { + .type = CS_CFG_REG_TYPE_RESOURCE, + .offset = TRCEVENTCTL0R, + .val32 = 0x2, + }, + /* end of regs */ +}; + +struct cscfg_feature_desc gen_etrig_etm4x = { + .name = "gen_etrig", + .description = "Generate external trigger on address match\n" + "parameter \'address\': address of kernel address\n", + .match_flags = CS_CFG_MATCH_CLASS_SRC_ETM4, + .nr_params = ARRAY_SIZE(gen_etrig_params), + .params_desc = gen_etrig_params, + .nr_regs = ARRAY_SIZE(gen_etrig_regs), + .regs_desc = gen_etrig_regs, +}; + +/* create a panic stop configuration */ + +/* the total number of parameters in used features */ +#define PSTOP_NR_PARAMS ARRAY_SIZE(gen_etrig_params) + +static const char *pstop_ref_names[] = { + "gen_etrig", +}; + +struct cscfg_config_desc pstop_etm4x = { + .name = "panicstop", + .description = "Stop ETM on kernel panic\n", + .nr_feat_refs = ARRAY_SIZE(pstop_ref_names), + .feat_ref_names = pstop_ref_names, + .nr_total_params = PSTOP_NR_PARAMS, +}; + +/* end of ETM4x configurations */ +#endif /* IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X) */ diff --git a/drivers/hwtracing/coresight/coresight-config.c b/drivers/hwtracing/coresight/coresight-config.c index 4723bf7402a2..4f72ae71b696 100644 --- a/drivers/hwtracing/coresight/coresight-config.c +++ b/drivers/hwtracing/coresight/coresight-config.c @@ -76,10 +76,10 @@ static int cscfg_set_on_enable(struct cscfg_feature_csdev *feat_csdev) unsigned long flags; int i; - spin_lock_irqsave(feat_csdev->drv_spinlock, flags); + raw_spin_lock_irqsave(feat_csdev->drv_spinlock, flags); for (i = 0; i < feat_csdev->nr_regs; i++) cscfg_set_reg(&feat_csdev->regs_csdev[i]); - spin_unlock_irqrestore(feat_csdev->drv_spinlock, flags); + raw_spin_unlock_irqrestore(feat_csdev->drv_spinlock, flags); dev_dbg(&feat_csdev->csdev->dev, "Feature %s: %s", feat_csdev->feat_desc->name, "set on enable"); return 0; @@ -91,10 +91,10 @@ static void cscfg_save_on_disable(struct cscfg_feature_csdev *feat_csdev) unsigned long flags; int i; - spin_lock_irqsave(feat_csdev->drv_spinlock, flags); + raw_spin_lock_irqsave(feat_csdev->drv_spinlock, flags); for (i = 0; i < feat_csdev->nr_regs; i++) cscfg_save_reg(&feat_csdev->regs_csdev[i]); - spin_unlock_irqrestore(feat_csdev->drv_spinlock, flags); + raw_spin_unlock_irqrestore(feat_csdev->drv_spinlock, flags); dev_dbg(&feat_csdev->csdev->dev, "Feature %s: %s", feat_csdev->feat_desc->name, "save on disable"); } diff --git a/drivers/hwtracing/coresight/coresight-config.h b/drivers/hwtracing/coresight/coresight-config.h index 6ba013975741..b9ebc9fcfb7f 100644 --- a/drivers/hwtracing/coresight/coresight-config.h +++ b/drivers/hwtracing/coresight/coresight-config.h @@ -206,7 +206,7 @@ struct cscfg_feature_csdev { const struct cscfg_feature_desc *feat_desc; struct coresight_device *csdev; struct list_head node; - spinlock_t *drv_spinlock; + raw_spinlock_t *drv_spinlock; int nr_params; struct cscfg_parameter_csdev *params_csdev; int nr_regs; diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index b83613e34289..fb43ef6a3b1f 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -19,10 +19,12 @@ #include <linux/property.h> #include <linux/delay.h> #include <linux/pm_runtime.h> +#include <linux/panic_notifier.h> #include "coresight-etm-perf.h" #include "coresight-priv.h" #include "coresight-syscfg.h" +#include "coresight-trace-id.h" /* * Mutex used to lock all sysfs enable and disable actions and loading and @@ -75,22 +77,54 @@ struct coresight_device *coresight_get_percpu_sink(int cpu) } EXPORT_SYMBOL_GPL(coresight_get_percpu_sink); +static struct coresight_device *coresight_get_source(struct coresight_path *path) +{ + struct coresight_device *csdev; + + if (!path) + return NULL; + + csdev = list_first_entry(&path->path_list, struct coresight_node, link)->csdev; + if (!coresight_is_device_source(csdev)) + return NULL; + + return csdev; +} + +/** + * coresight_blocks_source - checks whether the connection matches the source + * of path if connection is bound to specific source. + * @src: The source device of the trace path + * @conn: The connection of one outport + * + * Return false if the connection doesn't have a source binded or source of the + * path matches the source binds to connection. + */ +static bool coresight_blocks_source(struct coresight_device *src, + struct coresight_connection *conn) +{ + return conn->filter_src_fwnode && (conn->filter_src_dev != src); +} + static struct coresight_connection * -coresight_find_out_connection(struct coresight_device *src_dev, - struct coresight_device *dest_dev) +coresight_find_out_connection(struct coresight_device *csdev, + struct coresight_device *out_dev, + struct coresight_device *trace_src) { int i; struct coresight_connection *conn; - for (i = 0; i < src_dev->pdata->nr_outconns; i++) { - conn = src_dev->pdata->out_conns[i]; - if (conn->dest_dev == dest_dev) + for (i = 0; i < csdev->pdata->nr_outconns; i++) { + conn = csdev->pdata->out_conns[i]; + if (coresight_blocks_source(trace_src, conn)) + continue; + if (conn->dest_dev == out_dev) return conn; } - dev_err(&src_dev->dev, - "couldn't find output connection, src_dev: %s, dest_dev: %s\n", - dev_name(&src_dev->dev), dev_name(&dest_dev->dev)); + dev_err(&csdev->dev, + "couldn't find output connection, csdev: %s, out_dev: %s\n", + dev_name(&csdev->dev), dev_name(&out_dev->dev)); return ERR_PTR(-ENODEV); } @@ -251,7 +285,8 @@ static void coresight_disable_sink(struct coresight_device *csdev) static int coresight_enable_link(struct coresight_device *csdev, struct coresight_device *parent, - struct coresight_device *child) + struct coresight_device *child, + struct coresight_device *source) { int link_subtype; struct coresight_connection *inconn, *outconn; @@ -259,8 +294,8 @@ static int coresight_enable_link(struct coresight_device *csdev, if (!parent || !child) return -EINVAL; - inconn = coresight_find_out_connection(parent, csdev); - outconn = coresight_find_out_connection(csdev, child); + inconn = coresight_find_out_connection(parent, csdev, source); + outconn = coresight_find_out_connection(csdev, child, source); link_subtype = csdev->subtype.link_subtype; if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG && IS_ERR(inconn)) @@ -273,15 +308,16 @@ static int coresight_enable_link(struct coresight_device *csdev, static void coresight_disable_link(struct coresight_device *csdev, struct coresight_device *parent, - struct coresight_device *child) + struct coresight_device *child, + struct coresight_device *source) { struct coresight_connection *inconn, *outconn; if (!parent || !child) return; - inconn = coresight_find_out_connection(parent, csdev); - outconn = coresight_find_out_connection(csdev, child); + inconn = coresight_find_out_connection(parent, csdev, source); + outconn = coresight_find_out_connection(csdev, child, source); link_ops(csdev)->disable(csdev, inconn, outconn); } @@ -297,12 +333,12 @@ static int coresight_enable_helper(struct coresight_device *csdev, return helper_ops(csdev)->enable(csdev, mode, data); } -static void coresight_disable_helper(struct coresight_device *csdev) +static void coresight_disable_helper(struct coresight_device *csdev, void *data) { - helper_ops(csdev)->disable(csdev, NULL); + helper_ops(csdev)->disable(csdev, data); } -static void coresight_disable_helpers(struct coresight_device *csdev) +static void coresight_disable_helpers(struct coresight_device *csdev, void *data) { int i; struct coresight_device *helper; @@ -310,7 +346,7 @@ static void coresight_disable_helpers(struct coresight_device *csdev) for (i = 0; i < csdev->pdata->nr_outconns; ++i) { helper = csdev->pdata->out_conns[i]->dest_dev; if (helper && coresight_is_helper(helper)) - coresight_disable_helper(helper); + coresight_disable_helper(helper, data); } } @@ -327,7 +363,7 @@ static void coresight_disable_helpers(struct coresight_device *csdev) void coresight_disable_source(struct coresight_device *csdev, void *data) { source_ops(csdev)->disable(csdev, data); - coresight_disable_helpers(csdev); + coresight_disable_helpers(csdev, NULL); } EXPORT_SYMBOL_GPL(coresight_disable_source); @@ -336,16 +372,16 @@ EXPORT_SYMBOL_GPL(coresight_disable_source); * @nd in the list. If @nd is NULL, all the components, except the SOURCE are * disabled. */ -static void coresight_disable_path_from(struct list_head *path, +static void coresight_disable_path_from(struct coresight_path *path, struct coresight_node *nd) { u32 type; struct coresight_device *csdev, *parent, *child; if (!nd) - nd = list_first_entry(path, struct coresight_node, link); + nd = list_first_entry(&path->path_list, struct coresight_node, link); - list_for_each_entry_continue(nd, path, link) { + list_for_each_entry_continue(nd, &path->path_list, link) { csdev = nd->csdev; type = csdev->type; @@ -375,18 +411,19 @@ static void coresight_disable_path_from(struct list_head *path, case CORESIGHT_DEV_TYPE_LINK: parent = list_prev_entry(nd, link)->csdev; child = list_next_entry(nd, link)->csdev; - coresight_disable_link(csdev, parent, child); + coresight_disable_link(csdev, parent, child, + coresight_get_source(path)); break; default: break; } /* Disable all helpers adjacent along the path last */ - coresight_disable_helpers(csdev); + coresight_disable_helpers(csdev, path); } } -void coresight_disable_path(struct list_head *path) +void coresight_disable_path(struct coresight_path *path) { coresight_disable_path_from(path, NULL); } @@ -411,20 +448,22 @@ static int coresight_enable_helpers(struct coresight_device *csdev, return 0; } -int coresight_enable_path(struct list_head *path, enum cs_mode mode, +int coresight_enable_path(struct coresight_path *path, enum cs_mode mode, void *sink_data) { int ret = 0; u32 type; struct coresight_node *nd; struct coresight_device *csdev, *parent, *child; + struct coresight_device *source; - list_for_each_entry_reverse(nd, path, link) { + source = coresight_get_source(path); + list_for_each_entry_reverse(nd, &path->path_list, link) { csdev = nd->csdev; type = csdev->type; /* Enable all helpers adjacent to the path first */ - ret = coresight_enable_helpers(csdev, mode, sink_data); + ret = coresight_enable_helpers(csdev, mode, path); if (ret) goto err; /* @@ -456,7 +495,7 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode, case CORESIGHT_DEV_TYPE_LINK: parent = list_prev_entry(nd, link)->csdev; child = list_next_entry(nd, link)->csdev; - ret = coresight_enable_link(csdev, parent, child); + ret = coresight_enable_link(csdev, parent, child, source); if (ret) goto err; break; @@ -472,38 +511,41 @@ err: goto out; } -struct coresight_device *coresight_get_sink(struct list_head *path) +struct coresight_device *coresight_get_sink(struct coresight_path *path) { struct coresight_device *csdev; if (!path) return NULL; - csdev = list_last_entry(path, struct coresight_node, link)->csdev; + csdev = list_last_entry(&path->path_list, struct coresight_node, link)->csdev; if (csdev->type != CORESIGHT_DEV_TYPE_SINK && csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) return NULL; return csdev; } +EXPORT_SYMBOL_GPL(coresight_get_sink); + +u32 coresight_get_sink_id(struct coresight_device *csdev) +{ + if (!csdev->ea) + return 0; + + /* + * See function etm_perf_add_symlink_sink() to know where + * this comes from. + */ + return (u32) (unsigned long) csdev->ea->var; +} static int coresight_sink_by_id(struct device *dev, const void *data) { struct coresight_device *csdev = to_coresight_device(dev); - unsigned long hash; if (csdev->type == CORESIGHT_DEV_TYPE_SINK || - csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) { - - if (!csdev->ea) - return 0; - /* - * See function etm_perf_add_symlink_sink() to know where - * this comes from. - */ - hash = (unsigned long)csdev->ea->var; - - if ((u32)hash == *(u32 *)data) + csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) { + if (coresight_get_sink_id(csdev) == *(u32 *)data) return 1; } @@ -614,9 +656,54 @@ static void coresight_drop_device(struct coresight_device *csdev) } } +/* + * coresight device will read their existing or alloc a trace ID, if their trace_id + * callback is set. + * + * Return 0 if the trace_id callback is not set. + * Return the result of the trace_id callback if it is set. The return value + * will be the trace_id if successful, and an error number if it fails. + */ +static int coresight_get_trace_id(struct coresight_device *csdev, + enum cs_mode mode, + struct coresight_device *sink) +{ + if (coresight_ops(csdev)->trace_id) + return coresight_ops(csdev)->trace_id(csdev, mode, sink); + + return 0; +} + +/* + * Call this after creating the path and before enabling it. This leaves + * the trace ID set on the path, or it remains 0 if it couldn't be assigned. + */ +void coresight_path_assign_trace_id(struct coresight_path *path, + enum cs_mode mode) +{ + struct coresight_device *sink = coresight_get_sink(path); + struct coresight_node *nd; + int trace_id; + + list_for_each_entry(nd, &path->path_list, link) { + /* Assign a trace ID to the path for the first device that wants to do it */ + trace_id = coresight_get_trace_id(nd->csdev, mode, sink); + + /* + * 0 in this context is that it didn't want to assign so keep searching. + * Non 0 is either success or fail. + */ + if (trace_id != 0) { + path->trace_id = trace_id; + return; + } + } +} + /** * _coresight_build_path - recursively build a path from a @csdev to a sink. * @csdev: The device to start from. + * @source: The trace source device of the path. * @sink: The final sink we want in this path. * @path: The list to add devices to. * @@ -626,8 +713,9 @@ static void coresight_drop_device(struct coresight_device *csdev) * the source is the first device and the sink the last one. */ static int _coresight_build_path(struct coresight_device *csdev, + struct coresight_device *source, struct coresight_device *sink, - struct list_head *path) + struct coresight_path *path) { int i, ret; bool found = false; @@ -639,7 +727,7 @@ static int _coresight_build_path(struct coresight_device *csdev, if (coresight_is_percpu_source(csdev) && coresight_is_percpu_sink(sink) && sink == per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev))) { - if (_coresight_build_path(sink, sink, path) == 0) { + if (_coresight_build_path(sink, source, sink, path) == 0) { found = true; goto out; } @@ -650,8 +738,12 @@ static int _coresight_build_path(struct coresight_device *csdev, struct coresight_device *child_dev; child_dev = csdev->pdata->out_conns[i]->dest_dev; + + if (coresight_blocks_source(source, csdev->pdata->out_conns[i])) + continue; + if (child_dev && - _coresight_build_path(child_dev, sink, path) == 0) { + _coresight_build_path(child_dev, source, sink, path) == 0) { found = true; break; } @@ -676,27 +768,27 @@ out: return -ENOMEM; node->csdev = csdev; - list_add(&node->link, path); + list_add(&node->link, &path->path_list); return 0; } -struct list_head *coresight_build_path(struct coresight_device *source, +struct coresight_path *coresight_build_path(struct coresight_device *source, struct coresight_device *sink) { - struct list_head *path; + struct coresight_path *path; int rc; if (!sink) return ERR_PTR(-EINVAL); - path = kzalloc(sizeof(struct list_head), GFP_KERNEL); + path = kzalloc(sizeof(struct coresight_path), GFP_KERNEL); if (!path) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(path); + INIT_LIST_HEAD(&path->path_list); - rc = _coresight_build_path(source, sink, path); + rc = _coresight_build_path(source, source, sink, path); if (rc) { kfree(path); return ERR_PTR(rc); @@ -712,12 +804,12 @@ struct list_head *coresight_build_path(struct coresight_device *source, * Go through all the elements of a path and 1) removed it from the list and * 2) free the memory allocated for each node. */ -void coresight_release_path(struct list_head *path) +void coresight_release_path(struct coresight_path *path) { struct coresight_device *csdev; struct coresight_node *nd, *next; - list_for_each_entry_safe(nd, next, path, link) { + list_for_each_entry_safe(nd, next, &path->path_list, link) { csdev = nd->csdev; coresight_drop_device(csdev); @@ -902,6 +994,7 @@ static void coresight_device_release(struct device *dev) struct coresight_device *csdev = to_coresight_device(dev); fwnode_handle_put(csdev->dev.fwnode); + free_percpu(csdev->perf_sink_id_map.cpu_map); kfree(csdev); } @@ -924,6 +1017,16 @@ static int coresight_orphan_match(struct device *dev, void *data) for (i = 0; i < src_csdev->pdata->nr_outconns; i++) { conn = src_csdev->pdata->out_conns[i]; + /* Fix filter source device before skip the port */ + if (conn->filter_src_fwnode && !conn->filter_src_dev) { + if (dst_csdev && + (conn->filter_src_fwnode == dst_csdev->dev.fwnode) && + !WARN_ON_ONCE(!coresight_is_device_source(dst_csdev))) + conn->filter_src_dev = dst_csdev; + else + still_orphan = true; + } + /* Skip the port if it's already connected. */ if (conn->dest_dev) continue; @@ -974,18 +1077,40 @@ static int coresight_fixup_orphan_conns(struct coresight_device *csdev) csdev, coresight_orphan_match); } +static int coresight_clear_filter_source(struct device *dev, void *data) +{ + int i; + struct coresight_device *source = data; + struct coresight_device *csdev = to_coresight_device(dev); + + for (i = 0; i < csdev->pdata->nr_outconns; ++i) { + if (csdev->pdata->out_conns[i]->filter_src_dev == source) + csdev->pdata->out_conns[i]->filter_src_dev = NULL; + } + return 0; +} + /* coresight_remove_conns - Remove other device's references to this device */ static void coresight_remove_conns(struct coresight_device *csdev) { int i, j; struct coresight_connection *conn; + if (coresight_is_device_source(csdev)) + bus_for_each_dev(&coresight_bustype, NULL, csdev, + coresight_clear_filter_source); + /* * Remove the input connection references from the destination device * for each output connection. */ for (i = 0; i < csdev->pdata->nr_outconns; i++) { conn = csdev->pdata->out_conns[i]; + if (conn->filter_src_fwnode) { + conn->filter_src_dev = NULL; + fwnode_handle_put(conn->filter_src_fwnode); + } + if (!conn->dest_dev) continue; @@ -1014,18 +1139,20 @@ static void coresight_remove_conns(struct coresight_device *csdev) } /** - * coresight_timeout - loop until a bit has changed to a specific register - * state. + * coresight_timeout_action - loop until a bit has changed to a specific register + * state, with a callback after every trial. * @csa: coresight device access for the device * @offset: Offset of the register from the base of the device. * @position: the position of the bit of interest. * @value: the value the bit should have. + * @cb: Call back after each trial. * * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if * TIMEOUT_US has elapsed, which ever happens first. */ -int coresight_timeout(struct csdev_access *csa, u32 offset, - int position, int value) +int coresight_timeout_action(struct csdev_access *csa, u32 offset, + int position, int value, + coresight_timeout_cb_t cb) { int i; u32 val; @@ -1041,7 +1168,8 @@ int coresight_timeout(struct csdev_access *csa, u32 offset, if (!(val & BIT(position))) return 0; } - + if (cb) + cb(csa, offset, position, value); /* * Delay is arbitrary - the specification doesn't say how long * we are expected to wait. Extra check required to make sure @@ -1053,6 +1181,13 @@ int coresight_timeout(struct csdev_access *csa, u32 offset, return -EAGAIN; } +EXPORT_SYMBOL_GPL(coresight_timeout_action); + +int coresight_timeout(struct csdev_access *csa, u32 offset, + int position, int value) +{ + return coresight_timeout_action(csa, offset, position, value, NULL); +} EXPORT_SYMBOL_GPL(coresight_timeout); u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset) @@ -1159,6 +1294,16 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev)); dev_set_name(&csdev->dev, "%s", desc->name); + if (csdev->type == CORESIGHT_DEV_TYPE_SINK || + csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) { + raw_spin_lock_init(&csdev->perf_sink_id_map.lock); + csdev->perf_sink_id_map.cpu_map = alloc_percpu(atomic_t); + if (!csdev->perf_sink_id_map.cpu_map) { + kfree(csdev); + ret = -ENOMEM; + goto err_out; + } + } /* * Make sure the device registration and the connection fixup * are synchronised, so that we don't see uninitialised devices @@ -1365,6 +1510,36 @@ const struct bus_type coresight_bustype = { .name = "coresight", }; +static int coresight_panic_sync(struct device *dev, void *data) +{ + int mode; + struct coresight_device *csdev; + + /* Run through panic sync handlers for all enabled devices */ + csdev = container_of(dev, struct coresight_device, dev); + mode = coresight_get_mode(csdev); + + if ((mode == CS_MODE_SYSFS) || (mode == CS_MODE_PERF)) { + if (panic_ops(csdev)) + panic_ops(csdev)->sync(csdev); + } + + return 0; +} + +static int coresight_panic_cb(struct notifier_block *self, + unsigned long v, void *p) +{ + bus_for_each_dev(&coresight_bustype, NULL, NULL, + coresight_panic_sync); + + return 0; +} + +static struct notifier_block coresight_notifier = { + .notifier_call = coresight_panic_cb, +}; + static int __init coresight_init(void) { int ret; @@ -1377,11 +1552,20 @@ static int __init coresight_init(void) if (ret) goto exit_bus_unregister; + /* Register function to be called for panic */ + ret = atomic_notifier_chain_register(&panic_notifier_list, + &coresight_notifier); + if (ret) + goto exit_perf; + /* initialise the coresight syscfg API */ ret = cscfg_init(); if (!ret) return 0; + atomic_notifier_chain_unregister(&panic_notifier_list, + &coresight_notifier); +exit_perf: etm_perf_exit(); exit_bus_unregister: bus_unregister(&coresight_bustype); @@ -1391,6 +1575,8 @@ exit_bus_unregister: static void __exit coresight_exit(void) { cscfg_exit(); + atomic_notifier_chain_unregister(&panic_notifier_list, + &coresight_notifier); etm_perf_exit(); bus_unregister(&coresight_bustype); } @@ -1398,6 +1584,67 @@ static void __exit coresight_exit(void) module_init(coresight_init); module_exit(coresight_exit); +int coresight_init_driver(const char *drv, struct amba_driver *amba_drv, + struct platform_driver *pdev_drv) +{ + int ret; + + ret = amba_driver_register(amba_drv); + if (ret) { + pr_err("%s: error registering AMBA driver\n", drv); + return ret; + } + + ret = platform_driver_register(pdev_drv); + if (!ret) + return 0; + + pr_err("%s: error registering platform driver\n", drv); + amba_driver_unregister(amba_drv); + return ret; +} +EXPORT_SYMBOL_GPL(coresight_init_driver); + +void coresight_remove_driver(struct amba_driver *amba_drv, + struct platform_driver *pdev_drv) +{ + amba_driver_unregister(amba_drv); + platform_driver_unregister(pdev_drv); +} +EXPORT_SYMBOL_GPL(coresight_remove_driver); + +int coresight_etm_get_trace_id(struct coresight_device *csdev, enum cs_mode mode, + struct coresight_device *sink) +{ + int cpu, trace_id; + + if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE || !source_ops(csdev)->cpu_id) + return -EINVAL; + + cpu = source_ops(csdev)->cpu_id(csdev); + switch (mode) { + case CS_MODE_SYSFS: + trace_id = coresight_trace_id_get_cpu_id(cpu); + break; + case CS_MODE_PERF: + if (WARN_ON(!sink)) + return -EINVAL; + + trace_id = coresight_trace_id_get_cpu_id_map(cpu, &sink->perf_sink_id_map); + break; + default: + trace_id = -EINVAL; + break; + } + + if (!IS_VALID_CS_TRACE_ID(trace_id)) + dev_err(&csdev->dev, + "Failed to allocate trace ID on CPU%d\n", cpu); + + return trace_id; +} +EXPORT_SYMBOL_GPL(coresight_etm_get_trace_id); + MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>"); MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>"); diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c index 1874df7c6a73..342c3aaf414d 100644 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c @@ -4,6 +4,7 @@ * * Author: Leo Yan <leo.yan@linaro.org> */ +#include <linux/acpi.h> #include <linux/amba/bus.h> #include <linux/coresight.h> #include <linux/cpu.h> @@ -18,6 +19,7 @@ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/panic_notifier.h> +#include <linux/platform_device.h> #include <linux/pm_qos.h> #include <linux/slab.h> #include <linux/smp.h> @@ -84,6 +86,7 @@ #define DEBUG_WAIT_TIMEOUT 32000 struct debug_drvdata { + struct clk *pclk; void __iomem *base; struct device *dev; int cpu; @@ -557,18 +560,12 @@ static void debug_func_exit(void) debugfs_remove_recursive(debug_debugfs_dir); } -static int debug_probe(struct amba_device *adev, const struct amba_id *id) +static int __debug_probe(struct device *dev, struct resource *res) { + struct debug_drvdata *drvdata = dev_get_drvdata(dev); void __iomem *base; - struct device *dev = &adev->dev; - struct debug_drvdata *drvdata; - struct resource *res = &adev->res; int ret; - drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); - if (!drvdata) - return -ENOMEM; - drvdata->cpu = coresight_get_cpu(dev); if (drvdata->cpu < 0) return drvdata->cpu; @@ -579,10 +576,7 @@ static int debug_probe(struct amba_device *adev, const struct amba_id *id) return -EBUSY; } - drvdata->dev = &adev->dev; - amba_set_drvdata(adev, drvdata); - - /* Validity for the resource is already checked by the AMBA core */ + drvdata->dev = dev; base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); @@ -629,10 +623,21 @@ err: return ret; } -static void debug_remove(struct amba_device *adev) +static int debug_probe(struct amba_device *adev, const struct amba_id *id) { - struct device *dev = &adev->dev; - struct debug_drvdata *drvdata = amba_get_drvdata(adev); + struct debug_drvdata *drvdata; + + drvdata = devm_kzalloc(&adev->dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + amba_set_drvdata(adev, drvdata); + return __debug_probe(&adev->dev, &adev->res); +} + +static void __debug_remove(struct device *dev) +{ + struct debug_drvdata *drvdata = dev_get_drvdata(dev); per_cpu(debug_drvdata, drvdata->cpu) = NULL; @@ -646,6 +651,11 @@ static void debug_remove(struct amba_device *adev) debug_func_exit(); } +static void debug_remove(struct amba_device *adev) +{ + __debug_remove(&adev->dev); +} + static const struct amba_cs_uci_id uci_id_debug[] = { { /* CPU Debug UCI data */ @@ -677,7 +687,102 @@ static struct amba_driver debug_driver = { .id_table = debug_ids, }; -module_amba_driver(debug_driver); +static int debug_platform_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct debug_drvdata *drvdata; + int ret = 0; + + drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev); + if (IS_ERR(drvdata->pclk)) + return -ENODEV; + + dev_set_drvdata(&pdev->dev, drvdata); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + ret = __debug_probe(&pdev->dev, res); + if (ret) { + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_disable(&pdev->dev); + if (!IS_ERR_OR_NULL(drvdata->pclk)) + clk_put(drvdata->pclk); + } + return ret; +} + +static void debug_platform_remove(struct platform_device *pdev) +{ + struct debug_drvdata *drvdata = dev_get_drvdata(&pdev->dev); + + if (WARN_ON(!drvdata)) + return; + + __debug_remove(&pdev->dev); + pm_runtime_disable(&pdev->dev); + if (!IS_ERR_OR_NULL(drvdata->pclk)) + clk_put(drvdata->pclk); +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id debug_platform_ids[] = { + {"ARMHC503", 0, 0, 0}, /* ARM CoreSight Debug */ + {}, +}; +MODULE_DEVICE_TABLE(acpi, debug_platform_ids); +#endif + +#ifdef CONFIG_PM +static int debug_runtime_suspend(struct device *dev) +{ + struct debug_drvdata *drvdata = dev_get_drvdata(dev); + + if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_disable_unprepare(drvdata->pclk); + return 0; +} + +static int debug_runtime_resume(struct device *dev) +{ + struct debug_drvdata *drvdata = dev_get_drvdata(dev); + + if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_prepare_enable(drvdata->pclk); + return 0; +} +#endif + +static const struct dev_pm_ops debug_dev_pm_ops = { + SET_RUNTIME_PM_OPS(debug_runtime_suspend, debug_runtime_resume, NULL) +}; + +static struct platform_driver debug_platform_driver = { + .probe = debug_platform_probe, + .remove = debug_platform_remove, + .driver = { + .name = "coresight-debug-platform", + .acpi_match_table = ACPI_PTR(debug_platform_ids), + .suppress_bind_attrs = true, + .pm = &debug_dev_pm_ops, + }, +}; + +static int __init debug_init(void) +{ + return coresight_init_driver("debug", &debug_driver, &debug_platform_driver); +} + +static void __exit debug_exit(void) +{ + coresight_remove_driver(&debug_driver, &debug_platform_driver); +} +module_init(debug_init); +module_exit(debug_exit); MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>"); MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver"); diff --git a/drivers/hwtracing/coresight/coresight-ctcu-core.c b/drivers/hwtracing/coresight/coresight-ctcu-core.c new file mode 100644 index 000000000000..c6bafc96db96 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-ctcu-core.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/clk.h> +#include <linux/coresight.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> + +#include "coresight-ctcu.h" +#include "coresight-priv.h" + +DEFINE_CORESIGHT_DEVLIST(ctcu_devs, "ctcu"); + +#define ctcu_writel(drvdata, val, offset) __raw_writel((val), drvdata->base + offset) +#define ctcu_readl(drvdata, offset) __raw_readl(drvdata->base + offset) + +/* + * The TMC Coresight Control Unit utilizes four ATID registers to control the data + * filter function based on the trace ID for each TMC ETR sink. The length of each + * ATID register is 32 bits. Therefore, an ETR device has a 128-bit long field + * in CTCU. Each trace ID is represented by one bit in that filed. + * e.g. ETR0ATID0 layout, set bit 5 for traceid 5 + * bit5 + * ------------------------------------------------------ + * | |28| |24| |20| |16| |12| |8| 1|4| |0| + * ------------------------------------------------------ + * + * e.g. ETR0: + * 127 0 from ATID_offset for ETR0ATID0 + * ------------------------- + * |ATID3|ATID2|ATID1|ATID0| + */ +#define CTCU_ATID_REG_OFFSET(traceid, atid_offset) \ + ((traceid / 32) * 4 + atid_offset) + +#define CTCU_ATID_REG_BIT(traceid) (traceid % 32) +#define CTCU_ATID_REG_SIZE 0x10 +#define CTCU_ETR0_ATID0 0xf8 +#define CTCU_ETR1_ATID0 0x108 + +static const struct ctcu_etr_config sa8775p_etr_cfgs[] = { + { + .atid_offset = CTCU_ETR0_ATID0, + .port_num = 0, + }, + { + .atid_offset = CTCU_ETR1_ATID0, + .port_num = 1, + }, +}; + +static const struct ctcu_config sa8775p_cfgs = { + .etr_cfgs = sa8775p_etr_cfgs, + .num_etr_config = ARRAY_SIZE(sa8775p_etr_cfgs), +}; + +static void ctcu_program_atid_register(struct ctcu_drvdata *drvdata, u32 reg_offset, + u8 bit, bool enable) +{ + u32 val; + + CS_UNLOCK(drvdata->base); + val = ctcu_readl(drvdata, reg_offset); + if (enable) + val |= BIT(bit); + else + val &= ~BIT(bit); + + ctcu_writel(drvdata, val, reg_offset); + CS_LOCK(drvdata->base); +} + +/* + * __ctcu_set_etr_traceid: Set bit in the ATID register based on trace ID when enable is true. + * Reset the bit of the ATID register based on trace ID when enable is false. + * + * @csdev: coresight_device of CTCU. + * @traceid: trace ID of the source tracer. + * @port_num: port number connected to TMC ETR sink. + * @enable: True for set bit and false for reset bit. + * + * Returns 0 indicates success. Non-zero result means failure. + */ +static int __ctcu_set_etr_traceid(struct coresight_device *csdev, u8 traceid, int port_num, + bool enable) +{ + struct ctcu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + u32 atid_offset, reg_offset; + u8 refcnt, bit; + + atid_offset = drvdata->atid_offset[port_num]; + if (atid_offset == 0) + return -EINVAL; + + bit = CTCU_ATID_REG_BIT(traceid); + reg_offset = CTCU_ATID_REG_OFFSET(traceid, atid_offset); + if (reg_offset - atid_offset > CTCU_ATID_REG_SIZE) + return -EINVAL; + + guard(raw_spinlock_irqsave)(&drvdata->spin_lock); + refcnt = drvdata->traceid_refcnt[port_num][traceid]; + /* Only program the atid register when the refcnt value is 1 or 0 */ + if ((enable && !refcnt++) || (!enable && !--refcnt)) + ctcu_program_atid_register(drvdata, reg_offset, bit, enable); + + drvdata->traceid_refcnt[port_num][traceid] = refcnt; + + return 0; +} + +/* + * Searching the sink device from helper's view in case there are multiple helper devices + * connected to the sink device. + */ +static int ctcu_get_active_port(struct coresight_device *sink, struct coresight_device *helper) +{ + struct coresight_platform_data *pdata = helper->pdata; + int i; + + for (i = 0; i < pdata->nr_inconns; ++i) { + if (pdata->in_conns[i]->src_dev == sink) + return pdata->in_conns[i]->dest_port; + } + + return -EINVAL; +} + +static int ctcu_set_etr_traceid(struct coresight_device *csdev, struct coresight_path *path, + bool enable) +{ + struct coresight_device *sink = coresight_get_sink(path); + u8 traceid = path->trace_id; + int port_num; + + if ((sink == NULL) || !IS_VALID_CS_TRACE_ID(traceid)) { + dev_err(&csdev->dev, "Invalid sink device or trace ID\n"); + return -EINVAL; + } + + port_num = ctcu_get_active_port(sink, csdev); + if (port_num < 0) + return -EINVAL; + + dev_dbg(&csdev->dev, "traceid is %d\n", traceid); + + return __ctcu_set_etr_traceid(csdev, traceid, port_num, enable); +} + +static int ctcu_enable(struct coresight_device *csdev, enum cs_mode mode, void *data) +{ + struct coresight_path *path = (struct coresight_path *)data; + + return ctcu_set_etr_traceid(csdev, path, true); +} + +static int ctcu_disable(struct coresight_device *csdev, void *data) +{ + struct coresight_path *path = (struct coresight_path *)data; + + return ctcu_set_etr_traceid(csdev, path, false); +} + +static const struct coresight_ops_helper ctcu_helper_ops = { + .enable = ctcu_enable, + .disable = ctcu_disable, +}; + +static const struct coresight_ops ctcu_ops = { + .helper_ops = &ctcu_helper_ops, +}; + +static int ctcu_probe(struct platform_device *pdev) +{ + const struct ctcu_etr_config *etr_cfg; + struct coresight_platform_data *pdata; + struct coresight_desc desc = { 0 }; + struct device *dev = &pdev->dev; + const struct ctcu_config *cfgs; + struct ctcu_drvdata *drvdata; + void __iomem *base; + int i; + + desc.name = coresight_alloc_device_name(&ctcu_devs, dev); + if (!desc.name) + return -ENOMEM; + + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + pdata = coresight_get_platform_data(dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + dev->platform_data = pdata; + + base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(base)) + return PTR_ERR(base); + + drvdata->apb_clk = coresight_get_enable_apb_pclk(dev); + if (IS_ERR(drvdata->apb_clk)) + return -ENODEV; + + cfgs = of_device_get_match_data(dev); + if (cfgs) { + if (cfgs->num_etr_config <= ETR_MAX_NUM) { + for (i = 0; i < cfgs->num_etr_config; i++) { + etr_cfg = &cfgs->etr_cfgs[i]; + drvdata->atid_offset[i] = etr_cfg->atid_offset; + } + } + } + + drvdata->base = base; + drvdata->dev = dev; + platform_set_drvdata(pdev, drvdata); + + desc.type = CORESIGHT_DEV_TYPE_HELPER; + desc.subtype.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_CTCU; + desc.pdata = pdata; + desc.dev = dev; + desc.ops = &ctcu_ops; + desc.access = CSDEV_ACCESS_IOMEM(base); + + drvdata->csdev = coresight_register(&desc); + if (IS_ERR(drvdata->csdev)) { + if (!IS_ERR_OR_NULL(drvdata->apb_clk)) + clk_put(drvdata->apb_clk); + + return PTR_ERR(drvdata->csdev); + } + + return 0; +} + +static void ctcu_remove(struct platform_device *pdev) +{ + struct ctcu_drvdata *drvdata = platform_get_drvdata(pdev); + + coresight_unregister(drvdata->csdev); +} + +static int ctcu_platform_probe(struct platform_device *pdev) +{ + int ret; + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + ret = ctcu_probe(pdev); + pm_runtime_put(&pdev->dev); + if (ret) + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static void ctcu_platform_remove(struct platform_device *pdev) +{ + struct ctcu_drvdata *drvdata = platform_get_drvdata(pdev); + + if (WARN_ON(!drvdata)) + return; + + ctcu_remove(pdev); + pm_runtime_disable(&pdev->dev); + if (!IS_ERR_OR_NULL(drvdata->apb_clk)) + clk_put(drvdata->apb_clk); +} + +#ifdef CONFIG_PM +static int ctcu_runtime_suspend(struct device *dev) +{ + struct ctcu_drvdata *drvdata = dev_get_drvdata(dev); + + if (drvdata && !IS_ERR_OR_NULL(drvdata->apb_clk)) + clk_disable_unprepare(drvdata->apb_clk); + + return 0; +} + +static int ctcu_runtime_resume(struct device *dev) +{ + struct ctcu_drvdata *drvdata = dev_get_drvdata(dev); + + if (drvdata && !IS_ERR_OR_NULL(drvdata->apb_clk)) + clk_prepare_enable(drvdata->apb_clk); + + return 0; +} +#endif + +static const struct dev_pm_ops ctcu_dev_pm_ops = { + SET_RUNTIME_PM_OPS(ctcu_runtime_suspend, ctcu_runtime_resume, NULL) +}; + +static const struct of_device_id ctcu_match[] = { + {.compatible = "qcom,sa8775p-ctcu", .data = &sa8775p_cfgs}, + {} +}; + +static struct platform_driver ctcu_driver = { + .probe = ctcu_platform_probe, + .remove = ctcu_platform_remove, + .driver = { + .name = "coresight-ctcu", + .of_match_table = ctcu_match, + .pm = &ctcu_dev_pm_ops, + .suppress_bind_attrs = true, + }, +}; +module_platform_driver(ctcu_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CoreSight TMC Control Unit driver"); diff --git a/drivers/hwtracing/coresight/coresight-ctcu.h b/drivers/hwtracing/coresight/coresight-ctcu.h new file mode 100644 index 000000000000..e9594c38dd91 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-ctcu.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _CORESIGHT_CTCU_H +#define _CORESIGHT_CTCU_H +#include "coresight-trace-id.h" + +/* Maximum number of supported ETR devices for a single CTCU. */ +#define ETR_MAX_NUM 2 + +/** + * struct ctcu_etr_config + * @atid_offset: offset to the ATID0 Register. + * @port_num: in-port number of CTCU device that connected to ETR. + */ +struct ctcu_etr_config { + const u32 atid_offset; + const u32 port_num; +}; + +struct ctcu_config { + const struct ctcu_etr_config *etr_cfgs; + int num_etr_config; +}; + +struct ctcu_drvdata { + void __iomem *base; + struct clk *apb_clk; + struct device *dev; + struct coresight_device *csdev; + raw_spinlock_t spin_lock; + u32 atid_offset[ETR_MAX_NUM]; + /* refcnt for each traceid of each sink */ + u8 traceid_refcnt[ETR_MAX_NUM][CORESIGHT_TRACE_ID_RES_TOP]; +}; + +#endif diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c index e805617020d0..80f6265e3740 100644 --- a/drivers/hwtracing/coresight/coresight-cti-core.c +++ b/drivers/hwtracing/coresight/coresight-cti-core.c @@ -93,7 +93,7 @@ static int cti_enable_hw(struct cti_drvdata *drvdata) unsigned long flags; int rc = 0; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); /* no need to do anything if enabled or unpowered*/ if (config->hw_enabled || !config->hw_powered) @@ -108,7 +108,7 @@ static int cti_enable_hw(struct cti_drvdata *drvdata) config->hw_enabled = true; drvdata->config.enable_req_count++; - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return rc; cti_state_unchanged: @@ -116,7 +116,7 @@ cti_state_unchanged: /* cannot enable due to error */ cti_err_not_enabled: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return rc; } @@ -125,7 +125,7 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata) { struct cti_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); config->hw_powered = true; /* no need to do anything if no enable request */ @@ -138,12 +138,12 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata) cti_write_all_hw_regs(drvdata); config->hw_enabled = true; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return; /* did not re-enable due to no claim / no request */ cti_hp_not_enabled: - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); } /* disable hardware */ @@ -153,7 +153,7 @@ static int cti_disable_hw(struct cti_drvdata *drvdata) struct coresight_device *csdev = drvdata->csdev; int ret = 0; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); /* don't allow negative refcounts, return an error */ if (!drvdata->config.enable_req_count) { @@ -177,12 +177,12 @@ static int cti_disable_hw(struct cti_drvdata *drvdata) coresight_disclaim_device_unlocked(csdev); CS_LOCK(drvdata->base); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return ret; /* not disabled this call */ cti_not_disabled: - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return ret; } @@ -198,11 +198,11 @@ void cti_write_intack(struct device *dev, u32 ackval) struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); struct cti_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); /* write if enabled */ if (cti_active(config)) cti_write_single_reg(drvdata, CTIINTACK, ackval); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); } /* @@ -369,7 +369,7 @@ int cti_channel_trig_op(struct device *dev, enum cti_chan_op op, reg_offset = (direction == CTI_TRIG_IN ? CTIINEN(trigger_idx) : CTIOUTEN(trigger_idx)); - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); /* read - modify write - the trigger / channel enable value */ reg_value = direction == CTI_TRIG_IN ? config->ctiinen[trigger_idx] : @@ -388,7 +388,7 @@ int cti_channel_trig_op(struct device *dev, enum cti_chan_op op, /* write through if enabled */ if (cti_active(config)) cti_write_single_reg(drvdata, reg_offset, reg_value); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return 0; } @@ -406,7 +406,7 @@ int cti_channel_gate_op(struct device *dev, enum cti_chan_gate_op op, chan_bitmask = BIT(channel_idx); - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); reg_value = config->ctigate; switch (op) { case CTI_GATE_CHAN_ENABLE: @@ -426,7 +426,7 @@ int cti_channel_gate_op(struct device *dev, enum cti_chan_gate_op op, if (cti_active(config)) cti_write_single_reg(drvdata, CTIGATE, reg_value); } - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return err; } @@ -445,7 +445,7 @@ int cti_channel_setop(struct device *dev, enum cti_chan_set_op op, chan_bitmask = BIT(channel_idx); - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); reg_value = config->ctiappset; switch (op) { case CTI_CHAN_SET: @@ -473,7 +473,7 @@ int cti_channel_setop(struct device *dev, enum cti_chan_set_op op, if ((err == 0) && cti_active(config)) cti_write_single_reg(drvdata, reg_offset, reg_value); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return err; } @@ -676,7 +676,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd, if (WARN_ON_ONCE(drvdata->ctidev.cpu != cpu)) return NOTIFY_BAD; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); switch (cmd) { case CPU_PM_ENTER: @@ -716,7 +716,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd, } cti_notify_exit: - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return notify_res; } @@ -743,11 +743,11 @@ static int cti_dying_cpu(unsigned int cpu) if (!drvdata) return 0; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); drvdata->config.hw_powered = false; if (drvdata->config.hw_enabled) coresight_disclaim_device(drvdata->csdev); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return 0; } @@ -888,7 +888,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id) drvdata->ctidev.ctm_id = 0; INIT_LIST_HEAD(&drvdata->ctidev.trig_cons); - spin_lock_init(&drvdata->spinlock); + raw_spin_lock_init(&drvdata->spinlock); /* initialise CTI driver config values */ cti_set_default_config(dev, drvdata); @@ -982,7 +982,6 @@ MODULE_DEVICE_TABLE(amba, cti_ids); static struct amba_driver cti_driver = { .drv = { .name = "coresight-cti", - .owner = THIS_MODULE, .suppress_bind_attrs = true, }, .probe = cti_probe, diff --git a/drivers/hwtracing/coresight/coresight-cti-platform.c b/drivers/hwtracing/coresight/coresight-cti-platform.c index ccef04f27f12..d0ae10bf6128 100644 --- a/drivers/hwtracing/coresight/coresight-cti-platform.c +++ b/drivers/hwtracing/coresight/coresight-cti-platform.c @@ -416,20 +416,16 @@ static int cti_plat_create_impdef_connections(struct device *dev, struct cti_drvdata *drvdata) { int rc = 0; - struct fwnode_handle *fwnode = dev_fwnode(dev); - struct fwnode_handle *child = NULL; - if (IS_ERR_OR_NULL(fwnode)) + if (IS_ERR_OR_NULL(dev_fwnode(dev))) return -EINVAL; - fwnode_for_each_child_node(fwnode, child) { + device_for_each_child_node_scoped(dev, child) { if (cti_plat_node_name_eq(child, CTI_DT_CONNS)) - rc = cti_plat_create_connection(dev, drvdata, - child); + rc = cti_plat_create_connection(dev, drvdata, child); if (rc != 0) break; } - fwnode_handle_put(child); return rc; } diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c index d25dd2737b49..572b80ee96fb 100644 --- a/drivers/hwtracing/coresight/coresight-cti-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c @@ -84,11 +84,11 @@ static ssize_t enable_show(struct device *dev, bool enabled, powered; struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); enable_req = drvdata->config.enable_req_count; powered = drvdata->config.hw_powered; enabled = drvdata->config.hw_enabled; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); if (powered) return sprintf(buf, "%d\n", enabled); @@ -134,9 +134,9 @@ static ssize_t powered_show(struct device *dev, bool powered; struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); powered = drvdata->config.hw_powered; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return sprintf(buf, "%d\n", powered); } @@ -181,10 +181,10 @@ static ssize_t coresight_cti_reg_show(struct device *dev, u32 val = 0; pm_runtime_get_sync(dev->parent); - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); if (drvdata->config.hw_powered) val = readl_relaxed(drvdata->base + cti_attr->off); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); pm_runtime_put_sync(dev->parent); return sysfs_emit(buf, "0x%x\n", val); } @@ -202,10 +202,10 @@ static __maybe_unused ssize_t coresight_cti_reg_store(struct device *dev, return -EINVAL; pm_runtime_get_sync(dev->parent); - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); if (drvdata->config.hw_powered) cti_write_single_reg(drvdata, cti_attr->off, val); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); pm_runtime_put_sync(dev->parent); return size; } @@ -264,7 +264,7 @@ static ssize_t cti_reg32_show(struct device *dev, char *buf, struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); struct cti_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); if ((reg_offset >= 0) && cti_active(config)) { CS_UNLOCK(drvdata->base); val = readl_relaxed(drvdata->base + reg_offset); @@ -274,7 +274,7 @@ static ssize_t cti_reg32_show(struct device *dev, char *buf, } else if (pcached_val) { val = *pcached_val; } - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return sprintf(buf, "%#x\n", val); } @@ -293,7 +293,7 @@ static ssize_t cti_reg32_store(struct device *dev, const char *buf, if (kstrtoul(buf, 0, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); /* local store */ if (pcached_val) *pcached_val = (u32)val; @@ -301,7 +301,7 @@ static ssize_t cti_reg32_store(struct device *dev, const char *buf, /* write through if offset and enabled */ if ((reg_offset >= 0) && cti_active(config)) cti_write_single_reg(drvdata, reg_offset, val); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } @@ -349,9 +349,9 @@ static ssize_t inout_sel_store(struct device *dev, if (val > (CTIINOUTEN_MAX - 1)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); drvdata->config.ctiinout_sel = val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(inout_sel); @@ -364,10 +364,10 @@ static ssize_t inen_show(struct device *dev, int index; struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); index = drvdata->config.ctiinout_sel; val = drvdata->config.ctiinen[index]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return sprintf(buf, "%#lx\n", val); } @@ -383,14 +383,14 @@ static ssize_t inen_store(struct device *dev, if (kstrtoul(buf, 0, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); index = config->ctiinout_sel; config->ctiinen[index] = val; /* write through if enabled */ if (cti_active(config)) cti_write_single_reg(drvdata, CTIINEN(index), val); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(inen); @@ -403,10 +403,10 @@ static ssize_t outen_show(struct device *dev, int index; struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); index = drvdata->config.ctiinout_sel; val = drvdata->config.ctiouten[index]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return sprintf(buf, "%#lx\n", val); } @@ -422,14 +422,14 @@ static ssize_t outen_store(struct device *dev, if (kstrtoul(buf, 0, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); index = config->ctiinout_sel; config->ctiouten[index] = val; /* write through if enabled */ if (cti_active(config)) cti_write_single_reg(drvdata, CTIOUTEN(index), val); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(outen); @@ -463,7 +463,7 @@ static ssize_t appclear_store(struct device *dev, if (kstrtoul(buf, 0, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); /* a 1'b1 in appclr clears down the same bit in appset*/ config->ctiappset &= ~val; @@ -471,7 +471,7 @@ static ssize_t appclear_store(struct device *dev, /* write through if enabled */ if (cti_active(config)) cti_write_single_reg(drvdata, CTIAPPCLEAR, val); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_WO(appclear); @@ -487,12 +487,12 @@ static ssize_t apppulse_store(struct device *dev, if (kstrtoul(buf, 0, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); /* write through if enabled */ if (cti_active(config)) cti_write_single_reg(drvdata, CTIAPPPULSE, val); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_WO(apppulse); @@ -681,9 +681,9 @@ static ssize_t trig_filter_enable_show(struct device *dev, u32 val; struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); val = drvdata->config.trig_filter_enable; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return sprintf(buf, "%d\n", val); } @@ -697,9 +697,9 @@ static ssize_t trig_filter_enable_store(struct device *dev, if (kstrtoul(buf, 0, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); drvdata->config.trig_filter_enable = !!val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(trig_filter_enable); @@ -728,7 +728,7 @@ static ssize_t chan_xtrigs_reset_store(struct device *dev, struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); struct cti_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); /* clear the CTI trigger / channel programming registers */ for (i = 0; i < config->nr_trig_max; i++) { @@ -747,7 +747,7 @@ static ssize_t chan_xtrigs_reset_store(struct device *dev, if (cti_active(config)) cti_write_all_hw_regs(drvdata); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_WO(chan_xtrigs_reset); @@ -768,9 +768,9 @@ static ssize_t chan_xtrigs_sel_store(struct device *dev, if (val > (drvdata->config.nr_ctm_channels - 1)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); drvdata->config.xtrig_rchan_sel = val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } @@ -781,9 +781,9 @@ static ssize_t chan_xtrigs_sel_show(struct device *dev, unsigned long val; struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent); - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); val = drvdata->config.xtrig_rchan_sel; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return sprintf(buf, "%ld\n", val); } @@ -838,12 +838,12 @@ static ssize_t print_chan_list(struct device *dev, unsigned long inuse_bits = 0, chan_mask; /* scan regs to get bitmap of channels in use. */ - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); for (i = 0; i < config->nr_trig_max; i++) { inuse_bits |= config->ctiinen[i]; inuse_bits |= config->ctiouten[i]; } - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); /* inverse bits if printing free channels */ if (!inuse) diff --git a/drivers/hwtracing/coresight/coresight-cti.h b/drivers/hwtracing/coresight/coresight-cti.h index cb9ee616d01f..16e310e7e9d4 100644 --- a/drivers/hwtracing/coresight/coresight-cti.h +++ b/drivers/hwtracing/coresight/coresight-cti.h @@ -175,7 +175,7 @@ struct cti_drvdata { void __iomem *base; struct coresight_device *csdev; struct cti_device ctidev; - spinlock_t spinlock; + raw_spinlock_t spinlock; struct cti_config config; struct list_head node; void (*csdev_release)(struct device *dev); diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c index ac70c0b491be..aaa92b5081e3 100644 --- a/drivers/hwtracing/coresight/coresight-dummy.c +++ b/drivers/hwtracing/coresight/coresight-dummy.c @@ -11,18 +11,24 @@ #include <linux/pm_runtime.h> #include "coresight-priv.h" +#include "coresight-trace-id.h" struct dummy_drvdata { struct device *dev; struct coresight_device *csdev; + u8 traceid; }; DEFINE_CORESIGHT_DEVLIST(source_devs, "dummy_source"); DEFINE_CORESIGHT_DEVLIST(sink_devs, "dummy_sink"); static int dummy_source_enable(struct coresight_device *csdev, - struct perf_event *event, enum cs_mode mode) + struct perf_event *event, enum cs_mode mode, + __maybe_unused struct coresight_path *path) { + if (!coresight_take_mode(csdev, mode)) + return -EBUSY; + dev_dbg(csdev->dev.parent, "Dummy source enabled\n"); return 0; @@ -31,9 +37,20 @@ static int dummy_source_enable(struct coresight_device *csdev, static void dummy_source_disable(struct coresight_device *csdev, struct perf_event *event) { + coresight_set_mode(csdev, CS_MODE_DISABLED); dev_dbg(csdev->dev.parent, "Dummy source disabled\n"); } +static int dummy_source_trace_id(struct coresight_device *csdev, __maybe_unused enum cs_mode mode, + __maybe_unused struct coresight_device *sink) +{ + struct dummy_drvdata *drvdata; + + drvdata = dev_get_drvdata(csdev->dev.parent); + + return drvdata->traceid; +} + static int dummy_sink_enable(struct coresight_device *csdev, enum cs_mode mode, void *data) { @@ -55,7 +72,8 @@ static const struct coresight_ops_source dummy_source_ops = { }; static const struct coresight_ops dummy_source_cs_ops = { - .source_ops = &dummy_source_ops, + .trace_id = dummy_source_trace_id, + .source_ops = &dummy_source_ops, }; static const struct coresight_ops_sink dummy_sink_ops = { @@ -67,6 +85,32 @@ static const struct coresight_ops dummy_sink_cs_ops = { .sink_ops = &dummy_sink_ops, }; +/* User can get the trace id of dummy source from this node. */ +static ssize_t traceid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long val; + struct dummy_drvdata *drvdata = dev_get_drvdata(dev->parent); + + val = drvdata->traceid; + return sysfs_emit(buf, "%#lx\n", val); +} +static DEVICE_ATTR_RO(traceid); + +static struct attribute *coresight_dummy_attrs[] = { + &dev_attr_traceid.attr, + NULL, +}; + +static const struct attribute_group coresight_dummy_group = { + .attrs = coresight_dummy_attrs, +}; + +static const struct attribute_group *coresight_dummy_groups[] = { + &coresight_dummy_group, + NULL, +}; + static int dummy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -74,6 +118,11 @@ static int dummy_probe(struct platform_device *pdev) struct coresight_platform_data *pdata; struct dummy_drvdata *drvdata; struct coresight_desc desc = { 0 }; + int ret = 0, trace_id = 0; + + drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; if (of_device_is_compatible(node, "arm,coresight-dummy-source")) { @@ -85,6 +134,26 @@ static int dummy_probe(struct platform_device *pdev) desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS; desc.ops = &dummy_source_cs_ops; + desc.groups = coresight_dummy_groups; + + ret = coresight_get_static_trace_id(dev, &trace_id); + if (!ret) { + /* Get the static id if id is set in device tree. */ + ret = coresight_trace_id_get_static_system_id(trace_id); + if (ret < 0) { + dev_err(dev, "Fail to get static id.\n"); + return ret; + } + } else { + /* Get next available id if id is not set in device tree. */ + trace_id = coresight_trace_id_get_system_id(); + if (trace_id < 0) { + ret = trace_id; + return ret; + } + } + drvdata->traceid = (u8)trace_id; + } else if (of_device_is_compatible(node, "arm,coresight-dummy-sink")) { desc.name = coresight_alloc_device_name(&sink_devs, dev); if (!desc.name) @@ -99,27 +168,35 @@ static int dummy_probe(struct platform_device *pdev) } pdata = coresight_get_platform_data(dev); - if (IS_ERR(pdata)) - return PTR_ERR(pdata); + if (IS_ERR(pdata)) { + ret = PTR_ERR(pdata); + goto free_id; + } pdev->dev.platform_data = pdata; - drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); - if (!drvdata) - return -ENOMEM; - drvdata->dev = &pdev->dev; platform_set_drvdata(pdev, drvdata); desc.pdata = pdev->dev.platform_data; desc.dev = &pdev->dev; drvdata->csdev = coresight_register(&desc); - if (IS_ERR(drvdata->csdev)) - return PTR_ERR(drvdata->csdev); + if (IS_ERR(drvdata->csdev)) { + ret = PTR_ERR(drvdata->csdev); + goto free_id; + } pm_runtime_enable(dev); dev_dbg(dev, "Dummy device initialized\n"); - return 0; + ret = 0; + goto out; + +free_id: + if (IS_VALID_CS_TRACE_ID(drvdata->traceid)) + coresight_trace_id_put_system_id(drvdata->traceid); + +out: + return ret; } static void dummy_remove(struct platform_device *pdev) @@ -127,6 +204,8 @@ static void dummy_remove(struct platform_device *pdev) struct dummy_drvdata *drvdata = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; + if (IS_VALID_CS_TRACE_ID(drvdata->traceid)) + coresight_trace_id_put_system_id(drvdata->traceid); pm_runtime_disable(dev); coresight_unregister(drvdata->csdev); } @@ -139,7 +218,7 @@ static const struct of_device_id dummy_match[] = { static struct platform_driver dummy_driver = { .probe = dummy_probe, - .remove_new = dummy_remove, + .remove = dummy_remove, .driver = { .name = "coresight-dummy", .of_match_table = dummy_match, diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index 3aab182b562f..7948597d483d 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c @@ -84,7 +84,7 @@ struct etb_drvdata { struct clk *atclk; struct coresight_device *csdev; struct miscdevice miscdev; - spinlock_t spinlock; + raw_spinlock_t spinlock; local_t reading; pid_t pid; u8 *buf; @@ -145,7 +145,7 @@ static int etb_enable_sysfs(struct coresight_device *csdev) unsigned long flags; struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); /* Don't messup with perf sessions. */ if (coresight_get_mode(csdev) == CS_MODE_PERF) { @@ -163,7 +163,7 @@ static int etb_enable_sysfs(struct coresight_device *csdev) csdev->refcnt++; out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return ret; } @@ -176,7 +176,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data) struct perf_output_handle *handle = data; struct cs_buffers *buf = etm_perf_sink_config(handle); - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); /* No need to continue if the component is already in used by sysFS. */ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) { @@ -219,7 +219,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data) } out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return ret; } @@ -352,11 +352,11 @@ static int etb_disable(struct coresight_device *csdev) struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); unsigned long flags; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); csdev->refcnt--; if (csdev->refcnt) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return -EBUSY; } @@ -366,7 +366,7 @@ static int etb_disable(struct coresight_device *csdev) /* Dissociate from monitored process. */ drvdata->pid = -1; coresight_set_mode(csdev, CS_MODE_DISABLED); - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); dev_dbg(&csdev->dev, "ETB disabled\n"); return 0; @@ -443,7 +443,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev, capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); /* Don't do anything if another tracer is using this sink */ if (csdev->refcnt != 1) @@ -566,7 +566,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev, __etb_enable_hw(drvdata); CS_LOCK(drvdata->base); out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return to_read; } @@ -587,13 +587,13 @@ static void etb_dump(struct etb_drvdata *drvdata) { unsigned long flags; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) { __etb_disable_hw(drvdata); etb_dump_hw(drvdata); __etb_enable_hw(drvdata); } - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); dev_dbg(&drvdata->csdev->dev, "ETB dumped\n"); } @@ -652,7 +652,6 @@ static const struct file_operations etb_fops = { .open = etb_open, .read = etb_read, .release = etb_release, - .llseek = no_llseek, }; static struct attribute *coresight_etb_mgmt_attrs[] = { @@ -747,7 +746,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id) drvdata->base = base; desc.access = CSDEV_ACCESS_IOMEM(base); - spin_lock_init(&drvdata->spinlock); + raw_spin_lock_init(&drvdata->spinlock); drvdata->buffer_depth = etb_get_buffer_depth(drvdata); @@ -844,7 +843,6 @@ MODULE_DEVICE_TABLE(amba, etb_ids); static struct amba_driver etb_driver = { .drv = { .name = "coresight-etb10", - .owner = THIS_MODULE, .pm = &etb_dev_pm_ops, .suppress_bind_attrs = true, diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index c0c60e6a1703..f4cccd68e625 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -136,13 +136,13 @@ static const struct attribute_group *etm_pmu_attr_groups[] = { NULL, }; -static inline struct list_head ** +static inline struct coresight_path ** etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu) { return per_cpu_ptr(data->path, cpu); } -static inline struct list_head * +static inline struct coresight_path * etm_event_cpu_path(struct etm_event_data *data, int cpu) { return *etm_event_cpu_path_ptr(data, cpu); @@ -226,18 +226,27 @@ static void free_event_data(struct work_struct *work) cscfg_deactivate_config(event_data->cfg_hash); for_each_cpu(cpu, mask) { - struct list_head **ppath; + struct coresight_path **ppath; ppath = etm_event_cpu_path_ptr(event_data, cpu); - if (!(IS_ERR_OR_NULL(*ppath))) + if (!(IS_ERR_OR_NULL(*ppath))) { + struct coresight_device *sink = coresight_get_sink(*ppath); + + /* + * Mark perf event as done for trace id allocator, but don't call + * coresight_trace_id_put_cpu_id_map() on individual IDs. Perf sessions + * never free trace IDs to ensure that the ID associated with a CPU + * cannot change during their and other's concurrent sessions. Instead, + * a refcount is used so that the last event to call + * coresight_trace_id_perf_stop() frees all IDs. + */ + coresight_trace_id_perf_stop(&sink->perf_sink_id_map); + coresight_release_path(*ppath); + } *ppath = NULL; - coresight_trace_id_put_cpu_id(cpu); } - /* mark perf event as done for trace id allocator */ - coresight_trace_id_perf_stop(); - free_percpu(event_data->path); kfree(event_data); } @@ -267,7 +276,7 @@ static void *alloc_event_data(int cpu) * unused memory when dealing with single CPU trace scenarios is small * compared to the cost of searching through an optimized array. */ - event_data->path = alloc_percpu(struct list_head *); + event_data->path = alloc_percpu(struct coresight_path *); if (!event_data->path) { kfree(event_data); @@ -308,7 +317,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, { u32 id, cfg_hash; int cpu = event->cpu; - int trace_id; cpumask_t *mask; struct coresight_device *sink = NULL; struct coresight_device *user_sink = NULL, *last_sink = NULL; @@ -325,9 +333,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, sink = user_sink = coresight_get_sink_by_id(id); } - /* tell the trace ID allocator that a perf event is starting up */ - coresight_trace_id_perf_start(); - /* check if user wants a coresight configuration selected */ cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32); if (cfg_hash) { @@ -346,7 +351,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, * CPUs, we can handle it and fail the session. */ for_each_cpu(cpu, mask) { - struct list_head *path; + struct coresight_path *path; struct coresight_device *csdev; csdev = per_cpu(csdev_src, cpu); @@ -401,13 +406,14 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, } /* ensure we can allocate a trace ID for this CPU */ - trace_id = coresight_trace_id_get_cpu_id(cpu); - if (!IS_VALID_CS_TRACE_ID(trace_id)) { + coresight_path_assign_trace_id(path, CS_MODE_PERF); + if (!IS_VALID_CS_TRACE_ID(path->trace_id)) { cpumask_clear_cpu(cpu, mask); coresight_release_path(path); continue; } + coresight_trace_id_perf_start(&sink->perf_sink_id_map); *etm_event_cpu_path_ptr(event_data, cpu) = path; } @@ -451,7 +457,7 @@ static void etm_event_start(struct perf_event *event, int flags) struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); struct perf_output_handle *handle = &ctxt->handle; struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); - struct list_head *path; + struct coresight_path *path; u64 hw_id; if (!csdev) @@ -495,7 +501,7 @@ static void etm_event_start(struct perf_event *event, int flags) goto fail_end_stop; /* Finally enable the tracer */ - if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) + if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF, path)) goto fail_disable_path; /* @@ -504,10 +510,14 @@ static void etm_event_start(struct perf_event *event, int flags) */ if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) { cpumask_set_cpu(cpu, &event_data->aux_hwid_done); - hw_id = FIELD_PREP(CS_AUX_HW_ID_VERSION_MASK, - CS_AUX_HW_ID_CURR_VERSION); - hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, - coresight_trace_id_read_cpu_id(cpu)); + + hw_id = FIELD_PREP(CS_AUX_HW_ID_MAJOR_VERSION_MASK, + CS_AUX_HW_ID_MAJOR_VERSION); + hw_id |= FIELD_PREP(CS_AUX_HW_ID_MINOR_VERSION_MASK, + CS_AUX_HW_ID_MINOR_VERSION); + hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, path->trace_id); + hw_id |= FIELD_PREP(CS_AUX_HW_ID_SINK_ID_MASK, coresight_get_sink_id(sink)); + perf_report_aux_output_id(event, hw_id); } @@ -543,7 +553,7 @@ static void etm_event_stop(struct perf_event *event, int mode) struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt); struct perf_output_handle *handle = &ctxt->handle; struct etm_event_data *event_data; - struct list_head *path; + struct coresight_path *path; /* * If we still have access to the event_data via handle, diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h index bebbadee2ceb..5febbcdb8696 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.h +++ b/drivers/hwtracing/coresight/coresight-etm-perf.h @@ -59,10 +59,9 @@ struct etm_event_data { cpumask_t aux_hwid_done; void *snk_config; u32 cfg_hash; - struct list_head * __percpu *path; + struct coresight_path * __percpu *path; }; -#if IS_ENABLED(CONFIG_CORESIGHT) int etm_perf_symlink(struct coresight_device *csdev, bool link); int etm_perf_add_symlink_sink(struct coresight_device *csdev); void etm_perf_del_symlink_sink(struct coresight_device *csdev); @@ -77,23 +76,6 @@ static inline void *etm_perf_sink_config(struct perf_output_handle *handle) int etm_perf_add_symlink_cscfg(struct device *dev, struct cscfg_config_desc *config_desc); void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc); -#else -static inline int etm_perf_symlink(struct coresight_device *csdev, bool link) -{ return -EINVAL; } -int etm_perf_add_symlink_sink(struct coresight_device *csdev) -{ return -EINVAL; } -void etm_perf_del_symlink_sink(struct coresight_device *csdev) {} -static inline void *etm_perf_sink_config(struct perf_output_handle *handle) -{ - return NULL; -} -int etm_perf_add_symlink_cscfg(struct device *dev, - struct cscfg_config_desc *config_desc) -{ return -EINVAL; } -void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc) {} - -#endif /* CONFIG_CORESIGHT */ - int __init etm_perf_init(void); void etm_perf_exit(void); diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h index e02c3ea972c9..171f1384f7c0 100644 --- a/drivers/hwtracing/coresight/coresight-etm.h +++ b/drivers/hwtracing/coresight/coresight-etm.h @@ -284,6 +284,5 @@ extern const struct attribute_group *coresight_etm_groups[]; void etm_set_default(struct etm_config *config); void etm_config_trace_mode(struct etm_config *config); struct etm_config *get_etm_config(struct etm_drvdata *drvdata); -int etm_read_alloc_trace_id(struct etm_drvdata *drvdata); void etm_release_trace_id(struct etm_drvdata *drvdata); #endif diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c index 9d5c1391ffb1..8927bfaf3af2 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c @@ -455,64 +455,29 @@ static int etm_cpu_id(struct coresight_device *csdev) return drvdata->cpu; } -int etm_read_alloc_trace_id(struct etm_drvdata *drvdata) -{ - int trace_id; - - /* - * This will allocate a trace ID to the cpu, - * or return the one currently allocated. - * - * trace id function has its own lock - */ - trace_id = coresight_trace_id_get_cpu_id(drvdata->cpu); - if (IS_VALID_CS_TRACE_ID(trace_id)) - drvdata->traceid = (u8)trace_id; - else - dev_err(&drvdata->csdev->dev, - "Failed to allocate trace ID for %s on CPU%d\n", - dev_name(&drvdata->csdev->dev), drvdata->cpu); - return trace_id; -} - void etm_release_trace_id(struct etm_drvdata *drvdata) { coresight_trace_id_put_cpu_id(drvdata->cpu); } static int etm_enable_perf(struct coresight_device *csdev, - struct perf_event *event) + struct perf_event *event, + struct coresight_path *path) { struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - int trace_id; if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) return -EINVAL; /* Configure the tracer based on the session's specifics */ etm_parse_event_config(drvdata, event); - - /* - * perf allocates cpu ids as part of _setup_aux() - device needs to use - * the allocated ID. This reads the current version without allocation. - * - * This does not use the trace id lock to prevent lock_dep issues - * with perf locks - we know the ID cannot change until perf shuts down - * the session - */ - trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu); - if (!IS_VALID_CS_TRACE_ID(trace_id)) { - dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n", - dev_name(&drvdata->csdev->dev), drvdata->cpu); - return -EINVAL; - } - drvdata->traceid = (u8)trace_id; + drvdata->traceid = path->trace_id; /* And enable it */ return etm_enable_hw(drvdata); } -static int etm_enable_sysfs(struct coresight_device *csdev) +static int etm_enable_sysfs(struct coresight_device *csdev, struct coresight_path *path) { struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct etm_enable_arg arg = { }; @@ -520,10 +485,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev) spin_lock(&drvdata->spinlock); - /* sysfs needs to allocate and set a trace ID */ - ret = etm_read_alloc_trace_id(drvdata); - if (ret < 0) - goto unlock_enable_sysfs; + drvdata->traceid = path->trace_id; /* * Configure the ETM only if the CPU is online. If it isn't online @@ -544,7 +506,6 @@ static int etm_enable_sysfs(struct coresight_device *csdev) if (ret) etm_release_trace_id(drvdata); -unlock_enable_sysfs: spin_unlock(&drvdata->spinlock); if (!ret) @@ -553,7 +514,7 @@ unlock_enable_sysfs: } static int etm_enable(struct coresight_device *csdev, struct perf_event *event, - enum cs_mode mode) + enum cs_mode mode, struct coresight_path *path) { int ret; struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); @@ -565,10 +526,10 @@ static int etm_enable(struct coresight_device *csdev, struct perf_event *event, switch (mode) { case CS_MODE_SYSFS: - ret = etm_enable_sysfs(csdev); + ret = etm_enable_sysfs(csdev, path); break; case CS_MODE_PERF: - ret = etm_enable_perf(csdev, event); + ret = etm_enable_perf(csdev, event, path); break; default: ret = -EINVAL; @@ -703,6 +664,7 @@ static const struct coresight_ops_source etm_source_ops = { }; static const struct coresight_ops etm_cs_ops = { + .trace_id = coresight_etm_get_trace_id, .source_ops = &etm_source_ops, }; @@ -1008,7 +970,6 @@ MODULE_DEVICE_TABLE(amba, etm_ids); static struct amba_driver etm_driver = { .drv = { .name = "coresight-etm3x", - .owner = THIS_MODULE, .pm = &etm_dev_pm_ops, .suppress_bind_attrs = true, }, diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c index 68c644be9813..b9006451f515 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c @@ -1190,10 +1190,9 @@ static DEVICE_ATTR_RO(cpu); static ssize_t traceid_show(struct device *dev, struct device_attribute *attr, char *buf) { - int trace_id; struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); + int trace_id = coresight_etm_get_trace_id(drvdata->csdev, CS_MODE_SYSFS, NULL); - trace_id = etm_read_alloc_trace_id(drvdata); if (trace_id < 0) return trace_id; diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index c2ca4a02dfce..2b8f10463840 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -6,6 +6,7 @@ #include <linux/acpi.h> #include <linux/bitops.h> #include <linux/kernel.h> +#include <linux/kvm_host.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/types.h> @@ -23,7 +24,6 @@ #include <linux/cpu_pm.h> #include <linux/coresight.h> #include <linux/coresight-pmu.h> -#include <linux/pm_wakeup.h> #include <linux/amba/bus.h> #include <linux/seq_file.h> #include <linux/uaccess.h> @@ -232,25 +232,6 @@ static int etm4_cpu_id(struct coresight_device *csdev) return drvdata->cpu; } -int etm4_read_alloc_trace_id(struct etmv4_drvdata *drvdata) -{ - int trace_id; - - /* - * This will allocate a trace ID to the cpu, - * or return the one currently allocated. - * The trace id function has its own lock - */ - trace_id = coresight_trace_id_get_cpu_id(drvdata->cpu); - if (IS_VALID_CS_TRACE_ID(trace_id)) - drvdata->trcid = (u8)trace_id; - else - dev_err(&drvdata->csdev->dev, - "Failed to allocate trace ID for %s on CPU%d\n", - dev_name(&drvdata->csdev->dev), drvdata->cpu); - return trace_id; -} - void etm4_release_trace_id(struct etmv4_drvdata *drvdata) { coresight_trace_id_put_cpu_id(drvdata->cpu); @@ -268,10 +249,28 @@ struct etm4_enable_arg { */ static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata) { + u64 trfcr; + /* If the CPU doesn't support FEAT_TRF, nothing to do */ if (!drvdata->trfcr) return; - cpu_prohibit_trace(); + + trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE); + + write_trfcr(trfcr); + kvm_tracing_set_el1_configuration(trfcr); +} + +static u64 etm4x_get_kern_user_filter(struct etmv4_drvdata *drvdata) +{ + u64 trfcr = drvdata->trfcr; + + if (drvdata->config.mode & ETM_MODE_EXCL_KERN) + trfcr &= ~TRFCR_EL1_ExTRE; + if (drvdata->config.mode & ETM_MODE_EXCL_USER) + trfcr &= ~TRFCR_EL1_E0TRE; + + return trfcr; } /* @@ -286,18 +285,28 @@ static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata) */ static void etm4x_allow_trace(struct etmv4_drvdata *drvdata) { - u64 trfcr = drvdata->trfcr; + u64 trfcr, guest_trfcr; /* If the CPU doesn't support FEAT_TRF, nothing to do */ - if (!trfcr) + if (!drvdata->trfcr) return; - if (drvdata->config.mode & ETM_MODE_EXCL_KERN) - trfcr &= ~TRFCR_ELx_ExTRE; - if (drvdata->config.mode & ETM_MODE_EXCL_USER) - trfcr &= ~TRFCR_ELx_E0TRE; + if (drvdata->config.mode & ETM_MODE_EXCL_HOST) + trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE); + else + trfcr = etm4x_get_kern_user_filter(drvdata); write_trfcr(trfcr); + + /* Set filters for guests and pass to KVM */ + if (drvdata->config.mode & ETM_MODE_EXCL_GUEST) + guest_trfcr = drvdata->trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE); + else + guest_trfcr = etm4x_get_kern_user_filter(drvdata); + + /* TRFCR_EL1 doesn't have CX so mask it out. */ + guest_trfcr &= ~TRFCR_EL2_CX; + kvm_tracing_set_el1_configuration(guest_trfcr); } #ifdef CONFIG_ETM4X_IMPDEF_FEATURE @@ -399,6 +408,29 @@ static void etm4_check_arch_features(struct etmv4_drvdata *drvdata, } #endif /* CONFIG_ETM4X_IMPDEF_FEATURE */ +static void etm4x_sys_ins_barrier(struct csdev_access *csa, u32 offset, int pos, int val) +{ + if (!csa->io_mem) + isb(); +} + +/* + * etm4x_wait_status: Poll for TRCSTATR.<pos> == <val>. While using system + * instruction to access the trace unit, each access must be separated by a + * synchronization barrier. See ARM IHI0064H.b section "4.3.7 Synchronization of + * register updates", for system instructions section, in "Notes": + * + * "In particular, whenever disabling or enabling the trace unit, a poll of + * TRCSTATR needs explicit synchronization between each read of TRCSTATR" + */ +static int etm4x_wait_status(struct csdev_access *csa, int pos, int val) +{ + if (!csa->io_mem) + return coresight_timeout_action(csa, TRCSTATR, pos, val, + etm4x_sys_ins_barrier); + return coresight_timeout(csa, TRCSTATR, pos, val); +} + static int etm4_enable_hw(struct etmv4_drvdata *drvdata) { int i, rc; @@ -430,7 +462,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) isb(); /* wait for TRCSTATR.IDLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) + if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 1)) dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); if (drvdata->nr_pe) @@ -523,7 +555,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) isb(); /* wait for TRCSTATR.IDLE to go back down to '0' */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0)) + if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 0)) dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); @@ -655,6 +687,12 @@ static int etm4_parse_event_config(struct coresight_device *csdev, if (attr->exclude_user) config->mode = ETM_MODE_EXCL_USER; + if (attr->exclude_host) + config->mode |= ETM_MODE_EXCL_HOST; + + if (attr->exclude_guest) + config->mode |= ETM_MODE_EXCL_GUEST; + /* Always start from the default config */ etm4_set_default_config(config); @@ -752,9 +790,10 @@ out: } static int etm4_enable_perf(struct coresight_device *csdev, - struct perf_event *event) + struct perf_event *event, + struct coresight_path *path) { - int ret = 0, trace_id; + int ret = 0; struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) { @@ -767,22 +806,7 @@ static int etm4_enable_perf(struct coresight_device *csdev, if (ret) goto out; - /* - * perf allocates cpu ids as part of _setup_aux() - device needs to use - * the allocated ID. This reads the current version without allocation. - * - * This does not use the trace id lock to prevent lock_dep issues - * with perf locks - we know the ID cannot change until perf shuts down - * the session - */ - trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu); - if (!IS_VALID_CS_TRACE_ID(trace_id)) { - dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n", - dev_name(&drvdata->csdev->dev), drvdata->cpu); - ret = -EINVAL; - goto out; - } - drvdata->trcid = (u8)trace_id; + drvdata->trcid = path->trace_id; /* And enable it */ ret = etm4_enable_hw(drvdata); @@ -791,7 +815,7 @@ out: return ret; } -static int etm4_enable_sysfs(struct coresight_device *csdev) +static int etm4_enable_sysfs(struct coresight_device *csdev, struct coresight_path *path) { struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct etm4_enable_arg arg = { }; @@ -806,12 +830,9 @@ static int etm4_enable_sysfs(struct coresight_device *csdev) return ret; } - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); - /* sysfs needs to read and allocate a trace ID */ - ret = etm4_read_alloc_trace_id(drvdata); - if (ret < 0) - goto unlock_sysfs_enable; + drvdata->trcid = path->trace_id; /* * Executing etm4_enable_hw on the cpu whose ETM is being enabled @@ -828,8 +849,7 @@ static int etm4_enable_sysfs(struct coresight_device *csdev) if (ret) etm4_release_trace_id(drvdata); -unlock_sysfs_enable: - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); if (!ret) dev_dbg(&csdev->dev, "ETM tracing enabled\n"); @@ -837,7 +857,7 @@ unlock_sysfs_enable: } static int etm4_enable(struct coresight_device *csdev, struct perf_event *event, - enum cs_mode mode) + enum cs_mode mode, struct coresight_path *path) { int ret; @@ -848,10 +868,10 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event, switch (mode) { case CS_MODE_SYSFS: - ret = etm4_enable_sysfs(csdev); + ret = etm4_enable_sysfs(csdev, path); break; case CS_MODE_PERF: - ret = etm4_enable_perf(csdev, event); + ret = etm4_enable_perf(csdev, event, path); break; default: ret = -EINVAL; @@ -905,10 +925,25 @@ static void etm4_disable_hw(void *info) tsb_csync(); etm4x_relaxed_write32(csa, control, TRCPRGCTLR); + /* + * As recommended by section 4.3.7 ("Synchronization when using system + * instructions to progrom the trace unit") of ARM IHI 0064H.b, the + * self-hosted trace analyzer must perform a Context synchronization + * event between writing to the TRCPRGCTLR and reading the TRCSTATR. + */ + if (!csa->io_mem) + isb(); + /* wait for TRCSTATR.PMSTABLE to go to '1' */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) dev_err(etm_dev, "timeout while waiting for PM stable Trace Status\n"); + /* + * As recommended by section 4.3.7 (Synchronization of register updates) + * of ARM IHI 0064H.b. + */ + isb(); + /* read the status of the single shot comparators */ for (i = 0; i < drvdata->nr_ss_cmp; i++) { config->ss_status[i] = @@ -976,7 +1011,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev) * DYING hotplug callback is serviced by the ETM driver. */ cpus_read_lock(); - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); /* * Executing etm4_disable_hw on the cpu whose ETM is being disabled @@ -984,7 +1019,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev) */ smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); cpus_read_unlock(); /* @@ -1031,6 +1066,7 @@ static const struct coresight_ops_source etm4_source_ops = { }; static const struct coresight_ops etm4_cs_ops = { + .trace_id = coresight_etm_get_trace_id, .source_ops = &etm4_source_ops, }; @@ -1140,9 +1176,9 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata) * tracing at the kernel EL and EL0, forcing to use the * virtual time as the timestamp. */ - trfcr = (TRFCR_ELx_TS_VIRTUAL | - TRFCR_ELx_ExTRE | - TRFCR_ELx_E0TRE); + trfcr = (TRFCR_EL1_TS_VIRTUAL | + TRFCR_EL1_ExTRE | + TRFCR_EL1_E0TRE); /* If we are running at EL2, allow tracing the CONTEXTIDR_EL2. */ if (is_kernel_in_hyp_mode()) @@ -1180,7 +1216,7 @@ static void etm4_fixup_wrong_ccitmin(struct etmv4_drvdata *drvdata) * recorded value for 'drvdata->ccitmin' to workaround * this problem. */ - if (is_midr_in_range_list(read_cpuid_id(), etm_wrong_ccitmin_cpus)) { + if (is_midr_in_range_list(etm_wrong_ccitmin_cpus)) { if (drvdata->ccitmin == 256) drvdata->ccitmin = 4; } @@ -1240,6 +1276,8 @@ static void etm4_init_arch_data(void *info) drvdata->nr_event = FIELD_GET(TRCIDR0_NUMEVENT_MASK, etmidr0); /* QSUPP, bits[16:15] Q element support field */ drvdata->q_support = FIELD_GET(TRCIDR0_QSUPP_MASK, etmidr0); + if (drvdata->q_support) + drvdata->q_filt = !!(etmidr0 & TRCIDR0_QFILT); /* TSSIZE, bits[28:24] Global timestamp size field */ drvdata->ts_size = FIELD_GET(TRCIDR0_TSSIZE_MASK, etmidr0); @@ -1660,13 +1698,13 @@ static int etm4_starting_cpu(unsigned int cpu) if (!etmdrvdata[cpu]) return 0; - spin_lock(&etmdrvdata[cpu]->spinlock); + raw_spin_lock(&etmdrvdata[cpu]->spinlock); if (!etmdrvdata[cpu]->os_unlock) etm4_os_unlock(etmdrvdata[cpu]); if (coresight_get_mode(etmdrvdata[cpu]->csdev)) etm4_enable_hw(etmdrvdata[cpu]); - spin_unlock(&etmdrvdata[cpu]->spinlock); + raw_spin_unlock(&etmdrvdata[cpu]->spinlock); return 0; } @@ -1675,10 +1713,10 @@ static int etm4_dying_cpu(unsigned int cpu) if (!etmdrvdata[cpu]) return 0; - spin_lock(&etmdrvdata[cpu]->spinlock); + raw_spin_lock(&etmdrvdata[cpu]->spinlock); if (coresight_get_mode(etmdrvdata[cpu]->csdev)) etm4_disable_hw(etmdrvdata[cpu]); - spin_unlock(&etmdrvdata[cpu]->spinlock); + raw_spin_unlock(&etmdrvdata[cpu]->spinlock); return 0; } @@ -1708,7 +1746,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) etm4_os_lock(drvdata); /* wait for TRCSTATR.PMSTABLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) { + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) { dev_err(etm_dev, "timeout while waiting for PM Stable Status\n"); etm4_os_unlock(drvdata); @@ -1732,16 +1770,14 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) state->trcccctlr = etm4x_read32(csa, TRCCCCTLR); state->trcbbctlr = etm4x_read32(csa, TRCBBCTLR); state->trctraceidr = etm4x_read32(csa, TRCTRACEIDR); - state->trcqctlr = etm4x_read32(csa, TRCQCTLR); + if (drvdata->q_filt) + state->trcqctlr = etm4x_read32(csa, TRCQCTLR); state->trcvictlr = etm4x_read32(csa, TRCVICTLR); state->trcviiectlr = etm4x_read32(csa, TRCVIIECTLR); state->trcvissctlr = etm4x_read32(csa, TRCVISSCTLR); if (drvdata->nr_pe_cmp) state->trcvipcssctlr = etm4x_read32(csa, TRCVIPCSSCTLR); - state->trcvdctlr = etm4x_read32(csa, TRCVDCTLR); - state->trcvdsacctlr = etm4x_read32(csa, TRCVDSACCTLR); - state->trcvdarcctlr = etm4x_read32(csa, TRCVDARCCTLR); for (i = 0; i < drvdata->nrseqstate - 1; i++) state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i)); @@ -1758,7 +1794,8 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) state->trccntvr[i] = etm4x_read32(csa, TRCCNTVRn(i)); } - for (i = 0; i < drvdata->nr_resource * 2; i++) + /* Resource selector pair 0 is reserved */ + for (i = 2; i < drvdata->nr_resource * 2; i++) state->trcrsctlr[i] = etm4x_read32(csa, TRCRSCTLRn(i)); for (i = 0; i < drvdata->nr_ss_cmp; i++) { @@ -1800,7 +1837,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) state->trcpdcr = etm4x_read32(csa, TRCPDCR); /* wait for TRCSTATR.IDLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) { + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) { dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); etm4_os_unlock(drvdata); @@ -1843,8 +1880,10 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata) { int i; struct etmv4_save_state *state = drvdata->save_state; - struct csdev_access tmp_csa = CSDEV_ACCESS_IOMEM(drvdata->base); - struct csdev_access *csa = &tmp_csa; + struct csdev_access *csa = &drvdata->csdev->access; + + if (WARN_ON(!drvdata->csdev)) + return; etm4_cs_unlock(drvdata, csa); etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET); @@ -1863,16 +1902,14 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata) etm4x_relaxed_write32(csa, state->trcccctlr, TRCCCCTLR); etm4x_relaxed_write32(csa, state->trcbbctlr, TRCBBCTLR); etm4x_relaxed_write32(csa, state->trctraceidr, TRCTRACEIDR); - etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR); + if (drvdata->q_filt) + etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR); etm4x_relaxed_write32(csa, state->trcvictlr, TRCVICTLR); etm4x_relaxed_write32(csa, state->trcviiectlr, TRCVIIECTLR); etm4x_relaxed_write32(csa, state->trcvissctlr, TRCVISSCTLR); if (drvdata->nr_pe_cmp) etm4x_relaxed_write32(csa, state->trcvipcssctlr, TRCVIPCSSCTLR); - etm4x_relaxed_write32(csa, state->trcvdctlr, TRCVDCTLR); - etm4x_relaxed_write32(csa, state->trcvdsacctlr, TRCVDSACCTLR); - etm4x_relaxed_write32(csa, state->trcvdarcctlr, TRCVDARCCTLR); for (i = 0; i < drvdata->nrseqstate - 1; i++) etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i)); @@ -1889,7 +1926,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata) etm4x_relaxed_write32(csa, state->trccntvr[i], TRCCNTVRn(i)); } - for (i = 0; i < drvdata->nr_resource * 2; i++) + /* Resource selector pair 0 is reserved */ + for (i = 2; i < drvdata->nr_resource * 2; i++) etm4x_relaxed_write32(csa, state->trcrsctlr[i], TRCRSCTLRn(i)); for (i = 0; i < drvdata->nr_ss_cmp; i++) { @@ -2122,7 +2160,7 @@ static int etm4_probe(struct device *dev) return -ENOMEM; } - spin_lock_init(&drvdata->spinlock); + raw_spin_lock_init(&drvdata->spinlock); drvdata->cpu = coresight_get_cpu(dev); if (drvdata->cpu < 0) @@ -2213,6 +2251,9 @@ static int etm4_probe_platform_dev(struct platform_device *pdev) ret = etm4_probe(&pdev->dev); pm_runtime_put(&pdev->dev); + if (ret) + pm_runtime_disable(&pdev->dev); + return ret; } @@ -2344,7 +2385,6 @@ MODULE_DEVICE_TABLE(amba, etm4_ids); static struct amba_driver etm4x_amba_driver = { .drv = { .name = "coresight-etm4x", - .owner = THIS_MODULE, .suppress_bind_attrs = true, }, .probe = etm4_probe_amba, @@ -2394,7 +2434,7 @@ MODULE_DEVICE_TABLE(acpi, etm4x_acpi_ids); static struct platform_driver etm4_platform_driver = { .probe = etm4_probe_platform_dev, - .remove_new = etm4_remove_platform_dev, + .remove = etm4_remove_platform_dev, .driver = { .name = "coresight-etm4x", .of_match_table = etm4_sysreg_match, diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c index a9f19629f3f8..fdd0956fecb3 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -4,6 +4,7 @@ * Author: Mathieu Poirier <mathieu.poirier@linaro.org> */ +#include <linux/coresight.h> #include <linux/pid_namespace.h> #include <linux/pm_runtime.h> #include <linux/sysfs.h> @@ -174,7 +175,7 @@ static ssize_t reset_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); if (val) config->mode = 0x0; @@ -266,7 +267,7 @@ static ssize_t reset_store(struct device *dev, config->vmid_mask0 = 0x0; config->vmid_mask1 = 0x0; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); /* for sysfs - only release trace id when resetting */ etm4_release_trace_id(drvdata); @@ -300,7 +301,7 @@ static ssize_t mode_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); config->mode = val & ETMv4_MODE_ALL; if (drvdata->instrp0 == true) { @@ -437,7 +438,7 @@ static ssize_t mode_store(struct device *dev, if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER)) etm4_config_trace_mode(config); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } @@ -466,14 +467,14 @@ static ssize_t pe_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); if (val > drvdata->nr_pe) { - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return -EINVAL; } config->pe_sel = val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(pe); @@ -501,7 +502,7 @@ static ssize_t event_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); switch (drvdata->nr_event) { case 0x0: /* EVENT0, bits[7:0] */ @@ -522,7 +523,7 @@ static ssize_t event_store(struct device *dev, default: break; } - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(event); @@ -550,7 +551,7 @@ static ssize_t event_instren_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); /* start by clearing all instruction event enable bits */ config->eventctrl1 &= ~TRCEVENTCTL1R_INSTEN_MASK; switch (drvdata->nr_event) { @@ -578,7 +579,7 @@ static ssize_t event_instren_store(struct device *dev, default: break; } - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(event_instren); @@ -739,11 +740,11 @@ static ssize_t event_vinst_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); val &= TRCVICTLR_EVENT_MASK >> __bf_shf(TRCVICTLR_EVENT_MASK); config->vinst_ctrl &= ~TRCVICTLR_EVENT_MASK; config->vinst_ctrl |= FIELD_PREP(TRCVICTLR_EVENT_MASK, val); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(event_vinst); @@ -771,13 +772,13 @@ static ssize_t s_exlevel_vinst_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); /* clear all EXLEVEL_S bits */ config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_S_MASK; /* enable instruction tracing for corresponding exception level */ val &= drvdata->s_ex_level; config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_S_MASK); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(s_exlevel_vinst); @@ -806,13 +807,13 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); /* clear EXLEVEL_NS bits */ config->vinst_ctrl &= ~TRCVICTLR_EXLEVEL_NS_MASK; /* enable instruction tracing for corresponding exception level */ val &= drvdata->ns_ex_level; config->vinst_ctrl |= val << __bf_shf(TRCVICTLR_EXLEVEL_NS_MASK); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(ns_exlevel_vinst); @@ -846,9 +847,9 @@ static ssize_t addr_idx_store(struct device *dev, * Use spinlock to ensure index doesn't change while it gets * dereferenced multiple times within a spinlock block elsewhere. */ - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); config->addr_idx = val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(addr_idx); @@ -862,7 +863,7 @@ static ssize_t addr_instdatatype_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; val = FIELD_GET(TRCACATRn_TYPE_MASK, config->addr_acc[idx]); len = scnprintf(buf, PAGE_SIZE, "%s\n", @@ -870,7 +871,7 @@ static ssize_t addr_instdatatype_show(struct device *dev, (val == TRCACATRn_TYPE_DATA_LOAD_ADDR ? "data_load" : (val == TRCACATRn_TYPE_DATA_STORE_ADDR ? "data_store" : "data_load_store"))); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return len; } @@ -888,13 +889,13 @@ static ssize_t addr_instdatatype_store(struct device *dev, if (sscanf(buf, "%s", str) != 1) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; if (!strcmp(str, "instr")) /* TYPE, bits[1:0] */ config->addr_acc[idx] &= ~TRCACATRn_TYPE_MASK; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(addr_instdatatype); @@ -909,14 +910,14 @@ static ssize_t addr_single_show(struct device *dev, struct etmv4_config *config = &drvdata->config; idx = config->addr_idx; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return -EPERM; } val = (unsigned long)config->addr_val[idx]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -932,17 +933,17 @@ static ssize_t addr_single_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return -EPERM; } config->addr_val[idx] = (u64)val; config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(addr_single); @@ -956,23 +957,23 @@ static ssize_t addr_range_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; if (idx % 2 != 0) { - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return -EPERM; } if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE && config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return -EPERM; } val1 = (unsigned long)config->addr_val[idx]; val2 = (unsigned long)config->addr_val[idx + 1]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); } @@ -995,10 +996,10 @@ static ssize_t addr_range_store(struct device *dev, if (val1 > val2) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; if (idx % 2 != 0) { - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return -EPERM; } @@ -1006,7 +1007,7 @@ static ssize_t addr_range_store(struct device *dev, config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE && config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return -EPERM; } @@ -1023,7 +1024,7 @@ static ssize_t addr_range_store(struct device *dev, exclude = config->mode & ETM_MODE_EXCLUDE; etm4_set_mode_exclude(drvdata, exclude ? true : false); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(addr_range); @@ -1037,17 +1038,17 @@ static ssize_t addr_start_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || config->addr_type[idx] == ETM_ADDR_TYPE_START)) { - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return -EPERM; } val = (unsigned long)config->addr_val[idx]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -1063,22 +1064,22 @@ static ssize_t addr_start_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; if (!drvdata->nr_addr_cmp) { - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return -EINVAL; } if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || config->addr_type[idx] == ETM_ADDR_TYPE_START)) { - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return -EPERM; } config->addr_val[idx] = (u64)val; config->addr_type[idx] = ETM_ADDR_TYPE_START; config->vissctlr |= BIT(idx); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(addr_start); @@ -1092,17 +1093,17 @@ static ssize_t addr_stop_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return -EPERM; } val = (unsigned long)config->addr_val[idx]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -1118,22 +1119,22 @@ static ssize_t addr_stop_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; if (!drvdata->nr_addr_cmp) { - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return -EINVAL; } if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE || config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return -EPERM; } config->addr_val[idx] = (u64)val; config->addr_type[idx] = ETM_ADDR_TYPE_STOP; config->vissctlr |= BIT(idx + 16); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(addr_stop); @@ -1147,14 +1148,14 @@ static ssize_t addr_ctxtype_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; /* CONTEXTTYPE, bits[3:2] */ val = FIELD_GET(TRCACATRn_CONTEXTTYPE_MASK, config->addr_acc[idx]); len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" : (val == ETM_CTX_CTXID ? "ctxid" : (val == ETM_CTX_VMID ? "vmid" : "all"))); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return len; } @@ -1172,7 +1173,7 @@ static ssize_t addr_ctxtype_store(struct device *dev, if (sscanf(buf, "%s", str) != 1) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; if (!strcmp(str, "none")) /* start by clearing context type bits */ @@ -1199,7 +1200,7 @@ static ssize_t addr_ctxtype_store(struct device *dev, if (drvdata->numvmidc) config->addr_acc[idx] |= TRCACATRn_CONTEXTTYPE_VMID; } - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(addr_ctxtype); @@ -1213,11 +1214,11 @@ static ssize_t addr_context_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; /* context ID comparator bits[6:4] */ val = FIELD_GET(TRCACATRn_CONTEXT_MASK, config->addr_acc[idx]); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -1238,12 +1239,12 @@ static ssize_t addr_context_store(struct device *dev, drvdata->numcidc : drvdata->numvmidc)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; /* clear context ID comparator bits[6:4] */ config->addr_acc[idx] &= ~TRCACATRn_CONTEXT_MASK; config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_CONTEXT_MASK); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(addr_context); @@ -1257,10 +1258,10 @@ static ssize_t addr_exlevel_s_ns_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; val = FIELD_GET(TRCACATRn_EXLEVEL_MASK, config->addr_acc[idx]); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -1279,12 +1280,12 @@ static ssize_t addr_exlevel_s_ns_store(struct device *dev, if (val & ~(TRCACATRn_EXLEVEL_MASK >> __bf_shf(TRCACATRn_EXLEVEL_MASK))) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */ config->addr_acc[idx] &= ~TRCACATRn_EXLEVEL_MASK; config->addr_acc[idx] |= val << __bf_shf(TRCACATRn_EXLEVEL_MASK); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(addr_exlevel_s_ns); @@ -1307,7 +1308,7 @@ static ssize_t addr_cmp_view_show(struct device *dev, int size = 0; bool exclude = false; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->addr_idx; addr_v = config->addr_val[idx]; addr_ctrl = config->addr_acc[idx]; @@ -1322,7 +1323,7 @@ static ssize_t addr_cmp_view_show(struct device *dev, } exclude = config->viiectlr & BIT(idx / 2 + 16); } - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); if (addr_type) { size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx, addr_type_names[addr_type], addr_v); @@ -1366,9 +1367,9 @@ static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev, if (!drvdata->nr_pe_cmp) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); config->vipcssctlr = val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop); @@ -1402,9 +1403,9 @@ static ssize_t seq_idx_store(struct device *dev, * Use spinlock to ensure index doesn't change while it gets * dereferenced multiple times within a spinlock block elsewhere. */ - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); config->seq_idx = val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(seq_idx); @@ -1448,10 +1449,10 @@ static ssize_t seq_event_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->seq_idx; val = config->seq_ctrl[idx]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -1467,11 +1468,11 @@ static ssize_t seq_event_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->seq_idx; /* Seq control has two masks B[15:8] F[7:0] */ config->seq_ctrl[idx] = val & 0xFFFF; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(seq_event); @@ -1535,9 +1536,9 @@ static ssize_t cntr_idx_store(struct device *dev, * Use spinlock to ensure index doesn't change while it gets * dereferenced multiple times within a spinlock block elsewhere. */ - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); config->cntr_idx = val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(cntr_idx); @@ -1551,10 +1552,10 @@ static ssize_t cntrldvr_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->cntr_idx; val = config->cntrldvr[idx]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -1572,10 +1573,10 @@ static ssize_t cntrldvr_store(struct device *dev, if (val > ETM_CNTR_MAX_VAL) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->cntr_idx; config->cntrldvr[idx] = val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(cntrldvr); @@ -1589,10 +1590,10 @@ static ssize_t cntr_val_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->cntr_idx; val = config->cntr_val[idx]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -1610,10 +1611,10 @@ static ssize_t cntr_val_store(struct device *dev, if (val > ETM_CNTR_MAX_VAL) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->cntr_idx; config->cntr_val[idx] = val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(cntr_val); @@ -1627,10 +1628,10 @@ static ssize_t cntr_ctrl_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->cntr_idx; val = config->cntr_ctrl[idx]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -1646,10 +1647,10 @@ static ssize_t cntr_ctrl_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->cntr_idx; config->cntr_ctrl[idx] = val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(cntr_ctrl); @@ -1687,9 +1688,9 @@ static ssize_t res_idx_store(struct device *dev, * Use spinlock to ensure index doesn't change while it gets * dereferenced multiple times within a spinlock block elsewhere. */ - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); config->res_idx = val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(res_idx); @@ -1703,10 +1704,10 @@ static ssize_t res_ctrl_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->res_idx; val = config->res_ctrl[idx]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -1722,7 +1723,7 @@ static ssize_t res_ctrl_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->res_idx; /* For odd idx pair inversal bit is RES0 */ if (idx % 2 != 0) @@ -1732,7 +1733,7 @@ static ssize_t res_ctrl_store(struct device *dev, TRCRSCTLRn_INV | TRCRSCTLRn_GROUP_MASK | TRCRSCTLRn_SELECT_MASK); - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(res_ctrl); @@ -1761,9 +1762,9 @@ static ssize_t sshot_idx_store(struct device *dev, if (val >= drvdata->nr_ss_cmp) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); config->ss_idx = val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(sshot_idx); @@ -1776,9 +1777,9 @@ static ssize_t sshot_ctrl_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); val = config->ss_ctrl[config->ss_idx]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -1794,12 +1795,12 @@ static ssize_t sshot_ctrl_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->ss_idx; config->ss_ctrl[idx] = FIELD_PREP(TRCSSCCRn_SAC_ARC_RST_MASK, val); /* must clear bit 31 in related status register on programming */ config->ss_status[idx] &= ~TRCSSCSRn_STATUS; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(sshot_ctrl); @@ -1811,9 +1812,9 @@ static ssize_t sshot_status_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); val = config->ss_status[config->ss_idx]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } static DEVICE_ATTR_RO(sshot_status); @@ -1826,9 +1827,9 @@ static ssize_t sshot_pe_ctrl_show(struct device *dev, struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); struct etmv4_config *config = &drvdata->config; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); val = config->ss_pe_cmp[config->ss_idx]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -1844,12 +1845,12 @@ static ssize_t sshot_pe_ctrl_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->ss_idx; config->ss_pe_cmp[idx] = FIELD_PREP(TRCSSPCICRn_PC_MASK, val); /* must clear bit 31 in related status register on programming */ config->ss_status[idx] &= ~TRCSSCSRn_STATUS; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(sshot_pe_ctrl); @@ -1883,9 +1884,9 @@ static ssize_t ctxid_idx_store(struct device *dev, * Use spinlock to ensure index doesn't change while it gets * dereferenced multiple times within a spinlock block elsewhere. */ - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); config->ctxid_idx = val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(ctxid_idx); @@ -1906,10 +1907,10 @@ static ssize_t ctxid_pid_show(struct device *dev, if (task_active_pid_ns(current) != &init_pid_ns) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->ctxid_idx; val = (unsigned long)config->ctxid_pid[idx]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -1944,10 +1945,10 @@ static ssize_t ctxid_pid_store(struct device *dev, if (kstrtoul(buf, 16, &pid)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); idx = config->ctxid_idx; config->ctxid_pid[idx] = (u64)pid; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(ctxid_pid); @@ -1967,10 +1968,10 @@ static ssize_t ctxid_masks_show(struct device *dev, if (task_active_pid_ns(current) != &init_pid_ns) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); val1 = config->ctxid_mask0; val2 = config->ctxid_mask1; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); } @@ -2003,7 +2004,7 @@ static ssize_t ctxid_masks_store(struct device *dev, if ((drvdata->numcidc > 4) && (nr_inputs != 2)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); /* * each byte[0..3] controls mask value applied to ctxid * comparator[0..3] @@ -2075,7 +2076,7 @@ static ssize_t ctxid_masks_store(struct device *dev, mask >>= 0x8; } - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(ctxid_masks); @@ -2109,9 +2110,9 @@ static ssize_t vmid_idx_store(struct device *dev, * Use spinlock to ensure index doesn't change while it gets * dereferenced multiple times within a spinlock block elsewhere. */ - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); config->vmid_idx = val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(vmid_idx); @@ -2131,9 +2132,9 @@ static ssize_t vmid_val_show(struct device *dev, if (!task_is_in_init_pid_ns(current)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); val = (unsigned long)config->vmid_val[config->vmid_idx]; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } @@ -2161,9 +2162,9 @@ static ssize_t vmid_val_store(struct device *dev, if (kstrtoul(buf, 16, &val)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); config->vmid_val[config->vmid_idx] = (u64)val; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(vmid_val); @@ -2182,10 +2183,10 @@ static ssize_t vmid_masks_show(struct device *dev, if (!task_is_in_init_pid_ns(current)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); val1 = config->vmid_mask0; val2 = config->vmid_mask1; - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2); } @@ -2217,7 +2218,7 @@ static ssize_t vmid_masks_store(struct device *dev, if ((drvdata->numvmidc > 4) && (nr_inputs != 2)) return -EINVAL; - spin_lock(&drvdata->spinlock); + raw_spin_lock(&drvdata->spinlock); /* * each byte[0..3] controls mask value applied to vmid @@ -2290,7 +2291,7 @@ static ssize_t vmid_masks_store(struct device *dev, else mask >>= 0x8; } - spin_unlock(&drvdata->spinlock); + raw_spin_unlock(&drvdata->spinlock); return size; } static DEVICE_ATTR_RW(vmid_masks); @@ -2319,11 +2320,11 @@ static ssize_t ts_source_show(struct device *dev, goto out; } - switch (drvdata->trfcr & TRFCR_ELx_TS_MASK) { - case TRFCR_ELx_TS_VIRTUAL: - case TRFCR_ELx_TS_GUEST_PHYSICAL: - case TRFCR_ELx_TS_PHYSICAL: - val = FIELD_GET(TRFCR_ELx_TS_MASK, drvdata->trfcr); + switch (drvdata->trfcr & TRFCR_EL1_TS_MASK) { + case TRFCR_EL1_TS_VIRTUAL: + case TRFCR_EL1_TS_GUEST_PHYSICAL: + case TRFCR_EL1_TS_PHYSICAL: + val = FIELD_GET(TRFCR_EL1_TS_MASK, drvdata->trfcr); break; default: val = -1; @@ -2402,10 +2403,9 @@ static ssize_t trctraceid_show(struct device *dev, struct device_attribute *attr, char *buf) { - int trace_id; struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); + int trace_id = coresight_etm_get_trace_id(drvdata->csdev, CS_MODE_SYSFS, NULL); - trace_id = etm4_read_alloc_trace_id(drvdata); if (trace_id < 0) return trace_id; diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index 9ea678bc2e8e..bd7db36ba197 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -43,9 +43,6 @@ #define TRCVIIECTLR 0x084 #define TRCVISSCTLR 0x088 #define TRCVIPCSSCTLR 0x08C -#define TRCVDCTLR 0x0A0 -#define TRCVDSACCTLR 0x0A4 -#define TRCVDARCCTLR 0x0A8 /* Derived resources registers */ #define TRCSEQEVRn(n) (0x100 + (n * 4)) /* n = 0-2 */ #define TRCSEQRSTEVR 0x118 @@ -90,9 +87,6 @@ /* Address Comparator registers n = 0-15 */ #define TRCACVRn(n) (0x400 + (n * 8)) #define TRCACATRn(n) (0x480 + (n * 8)) -/* Data Value Comparator Value registers, n = 0-7 */ -#define TRCDVCVRn(n) (0x500 + (n * 16)) -#define TRCDVCMRn(n) (0x580 + (n * 16)) /* ContextID/Virtual ContextID comparators, n = 0-7 */ #define TRCCIDCVRn(n) (0x600 + (n * 8)) #define TRCVMIDCVRn(n) (0x640 + (n * 8)) @@ -141,6 +135,7 @@ #define TRCIDR0_TRCCCI BIT(7) #define TRCIDR0_RETSTACK BIT(9) #define TRCIDR0_NUMEVENT_MASK GENMASK(11, 10) +#define TRCIDR0_QFILT BIT(14) #define TRCIDR0_QSUPP_MASK GENMASK(16, 15) #define TRCIDR0_TSSIZE_MASK GENMASK(28, 24) @@ -272,9 +267,6 @@ /* List of registers accessible via System instructions */ #define ETM4x_ONLY_SYSREG_LIST(op, val) \ CASE_##op((val), TRCPROCSELR) \ - CASE_##op((val), TRCVDCTLR) \ - CASE_##op((val), TRCVDSACCTLR) \ - CASE_##op((val), TRCVDARCCTLR) \ CASE_##op((val), TRCOSLAR) #define ETM_COMMON_SYSREG_LIST(op, val) \ @@ -422,22 +414,6 @@ CASE_##op((val), TRCACATRn(13)) \ CASE_##op((val), TRCACATRn(14)) \ CASE_##op((val), TRCACATRn(15)) \ - CASE_##op((val), TRCDVCVRn(0)) \ - CASE_##op((val), TRCDVCVRn(1)) \ - CASE_##op((val), TRCDVCVRn(2)) \ - CASE_##op((val), TRCDVCVRn(3)) \ - CASE_##op((val), TRCDVCVRn(4)) \ - CASE_##op((val), TRCDVCVRn(5)) \ - CASE_##op((val), TRCDVCVRn(6)) \ - CASE_##op((val), TRCDVCVRn(7)) \ - CASE_##op((val), TRCDVCMRn(0)) \ - CASE_##op((val), TRCDVCMRn(1)) \ - CASE_##op((val), TRCDVCMRn(2)) \ - CASE_##op((val), TRCDVCMRn(3)) \ - CASE_##op((val), TRCDVCMRn(4)) \ - CASE_##op((val), TRCDVCMRn(5)) \ - CASE_##op((val), TRCDVCMRn(6)) \ - CASE_##op((val), TRCDVCMRn(7)) \ CASE_##op((val), TRCCIDCVRn(0)) \ CASE_##op((val), TRCCIDCVRn(1)) \ CASE_##op((val), TRCCIDCVRn(2)) \ @@ -841,7 +817,7 @@ enum etm_impdef_type { * @s_ex_level: Secure ELs where tracing is supported. */ struct etmv4_config { - u32 mode; + u64 mode; u32 pe_sel; u32 cfg; u32 eventctrl0; @@ -907,9 +883,6 @@ struct etmv4_save_state { u32 trcviiectlr; u32 trcvissctlr; u32 trcvipcssctlr; - u32 trcvdctlr; - u32 trcvdsacctlr; - u32 trcvdarcctlr; u32 trcseqevr[ETM_MAX_SEQ_STATES]; u32 trcseqrstevr; @@ -982,6 +955,7 @@ struct etmv4_save_state { * @os_unlock: True if access to management registers is allowed. * @instrp0: Tracing of load and store instructions * as P0 elements is supported. + * @q_filt: Q element filtering support, if Q elements are supported. * @trcbb: Indicates if the trace unit supports branch broadcast tracing. * @trccond: If the trace unit supports conditional * instruction tracing. @@ -1015,7 +989,7 @@ struct etmv4_drvdata { struct clk *pclk; void __iomem *base; struct coresight_device *csdev; - spinlock_t spinlock; + raw_spinlock_t spinlock; int cpu; u8 arch; u8 nr_pe; @@ -1044,6 +1018,7 @@ struct etmv4_drvdata { bool boot_enable; bool os_unlock; bool instrp0; + bool q_filt; bool trcbb; bool trccond; bool retstack; @@ -1091,6 +1066,5 @@ static inline bool etm4x_is_ete(struct etmv4_drvdata *drvdata) return drvdata->arch >= ETM_ARCH_ETE; } -int etm4_read_alloc_trace_id(struct etmv4_drvdata *drvdata); void etm4_release_trace_id(struct etmv4_drvdata *drvdata); #endif diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index ef1a0abfee4e..0541712b2bcb 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c @@ -36,6 +36,7 @@ DEFINE_CORESIGHT_DEVLIST(funnel_devs, "funnel"); * struct funnel_drvdata - specifics associated to a funnel component * @base: memory mapped base address for this component. * @atclk: optional clock for the core parts of the funnel. + * @pclk: APB clock if present, otherwise NULL * @csdev: component vitals needed by the framework. * @priority: port selection order. * @spinlock: serialize enable/disable operations. @@ -43,9 +44,10 @@ DEFINE_CORESIGHT_DEVLIST(funnel_devs, "funnel"); struct funnel_drvdata { void __iomem *base; struct clk *atclk; + struct clk *pclk; struct coresight_device *csdev; unsigned long priority; - spinlock_t spinlock; + raw_spinlock_t spinlock; }; static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port) @@ -83,16 +85,16 @@ static int funnel_enable(struct coresight_device *csdev, unsigned long flags; bool first_enable = false; - spin_lock_irqsave(&drvdata->spinlock, flags); - if (atomic_read(&in->dest_refcnt) == 0) { + raw_spin_lock_irqsave(&drvdata->spinlock, flags); + if (in->dest_refcnt == 0) { if (drvdata->base) rc = dynamic_funnel_enable_hw(drvdata, in->dest_port); if (!rc) first_enable = true; } if (!rc) - atomic_inc(&in->dest_refcnt); - spin_unlock_irqrestore(&drvdata->spinlock, flags); + in->dest_refcnt++; + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); if (first_enable) dev_dbg(&csdev->dev, "FUNNEL inport %d enabled\n", @@ -127,13 +129,13 @@ static void funnel_disable(struct coresight_device *csdev, unsigned long flags; bool last_disable = false; - spin_lock_irqsave(&drvdata->spinlock, flags); - if (atomic_dec_return(&in->dest_refcnt) == 0) { + raw_spin_lock_irqsave(&drvdata->spinlock, flags); + if (--in->dest_refcnt == 0) { if (drvdata->base) dynamic_funnel_disable_hw(drvdata, in->dest_port); last_disable = true; } - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); if (last_disable) dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n", @@ -236,6 +238,10 @@ static int funnel_probe(struct device *dev, struct resource *res) return ret; } + drvdata->pclk = coresight_get_enable_apb_pclk(dev); + if (IS_ERR(drvdata->pclk)) + return -ENODEV; + /* * Map the device base for dynamic-funnel, which has been * validated by AMBA core. @@ -260,7 +266,7 @@ static int funnel_probe(struct device *dev, struct resource *res) } dev->platform_data = pdata; - spin_lock_init(&drvdata->spinlock); + raw_spin_lock_init(&drvdata->spinlock); desc.type = CORESIGHT_DEV_TYPE_LINK; desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG; desc.ops = &funnel_cs_ops; @@ -272,12 +278,13 @@ static int funnel_probe(struct device *dev, struct resource *res) goto out_disable_clk; } - pm_runtime_put(dev); ret = 0; out_disable_clk: if (ret && !IS_ERR_OR_NULL(drvdata->atclk)) clk_disable_unprepare(drvdata->atclk); + if (ret && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_disable_unprepare(drvdata->pclk); return ret; } @@ -298,6 +305,9 @@ static int funnel_runtime_suspend(struct device *dev) if (drvdata && !IS_ERR(drvdata->atclk)) clk_disable_unprepare(drvdata->atclk); + if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_disable_unprepare(drvdata->pclk); + return 0; } @@ -308,6 +318,8 @@ static int funnel_runtime_resume(struct device *dev) if (drvdata && !IS_ERR(drvdata->atclk)) clk_prepare_enable(drvdata->atclk); + if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_prepare_enable(drvdata->pclk); return 0; } #endif @@ -316,55 +328,61 @@ static const struct dev_pm_ops funnel_dev_pm_ops = { SET_RUNTIME_PM_OPS(funnel_runtime_suspend, funnel_runtime_resume, NULL) }; -static int static_funnel_probe(struct platform_device *pdev) +static int funnel_platform_probe(struct platform_device *pdev) { + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); int ret; pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - /* Static funnel do not have programming base */ - ret = funnel_probe(&pdev->dev, NULL); - - if (ret) { - pm_runtime_put_noidle(&pdev->dev); + ret = funnel_probe(&pdev->dev, res); + pm_runtime_put(&pdev->dev); + if (ret) pm_runtime_disable(&pdev->dev); - } return ret; } -static void static_funnel_remove(struct platform_device *pdev) +static void funnel_platform_remove(struct platform_device *pdev) { + struct funnel_drvdata *drvdata = dev_get_drvdata(&pdev->dev); + + if (WARN_ON(!drvdata)) + return; + funnel_remove(&pdev->dev); pm_runtime_disable(&pdev->dev); + if (!IS_ERR_OR_NULL(drvdata->pclk)) + clk_put(drvdata->pclk); } -static const struct of_device_id static_funnel_match[] = { +static const struct of_device_id funnel_match[] = { {.compatible = "arm,coresight-static-funnel"}, {} }; -MODULE_DEVICE_TABLE(of, static_funnel_match); +MODULE_DEVICE_TABLE(of, funnel_match); #ifdef CONFIG_ACPI -static const struct acpi_device_id static_funnel_ids[] = { - {"ARMHC9FE", 0, 0, 0}, +static const struct acpi_device_id funnel_acpi_ids[] = { + {"ARMHC9FE", 0, 0, 0}, /* ARM Coresight Static Funnel */ + {"ARMHC9FF", 0, 0, 0}, /* ARM CoreSight Dynamic Funnel */ {}, }; -MODULE_DEVICE_TABLE(acpi, static_funnel_ids); +MODULE_DEVICE_TABLE(acpi, funnel_acpi_ids); #endif -static struct platform_driver static_funnel_driver = { - .probe = static_funnel_probe, - .remove_new = static_funnel_remove, - .driver = { - .name = "coresight-static-funnel", +static struct platform_driver funnel_driver = { + .probe = funnel_platform_probe, + .remove = funnel_platform_remove, + .driver = { + .name = "coresight-funnel", /* THIS_MODULE is taken care of by platform_driver_register() */ - .of_match_table = static_funnel_match, - .acpi_match_table = ACPI_PTR(static_funnel_ids), + .of_match_table = funnel_match, + .acpi_match_table = ACPI_PTR(funnel_acpi_ids), .pm = &funnel_dev_pm_ops, .suppress_bind_attrs = true, }, @@ -373,7 +391,13 @@ static struct platform_driver static_funnel_driver = { static int dynamic_funnel_probe(struct amba_device *adev, const struct amba_id *id) { - return funnel_probe(&adev->dev, &adev->res); + int ret; + + ret = funnel_probe(&adev->dev, &adev->res); + if (!ret) + pm_runtime_put(&adev->dev); + + return ret; } static void dynamic_funnel_remove(struct amba_device *adev) @@ -399,7 +423,6 @@ MODULE_DEVICE_TABLE(amba, dynamic_funnel_ids); static struct amba_driver dynamic_funnel_driver = { .drv = { .name = "coresight-dynamic-funnel", - .owner = THIS_MODULE, .pm = &funnel_dev_pm_ops, .suppress_bind_attrs = true, }, @@ -410,27 +433,12 @@ static struct amba_driver dynamic_funnel_driver = { static int __init funnel_init(void) { - int ret; - - ret = platform_driver_register(&static_funnel_driver); - if (ret) { - pr_info("Error registering platform driver\n"); - return ret; - } - - ret = amba_driver_register(&dynamic_funnel_driver); - if (ret) { - pr_info("Error registering amba driver\n"); - platform_driver_unregister(&static_funnel_driver); - } - - return ret; + return coresight_init_driver("funnel", &dynamic_funnel_driver, &funnel_driver); } static void __exit funnel_exit(void) { - platform_driver_unregister(&static_funnel_driver); - amba_driver_unregister(&dynamic_funnel_driver); + coresight_remove_driver(&dynamic_funnel_driver, &funnel_driver); } module_init(funnel_init); diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c index 9d550f5697fa..8192ba3279f0 100644 --- a/drivers/hwtracing/coresight/coresight-platform.c +++ b/drivers/hwtracing/coresight/coresight-platform.c @@ -243,6 +243,27 @@ static int of_coresight_parse_endpoint(struct device *dev, conn.dest_fwnode = fwnode_handle_get(rdev_fwnode); conn.dest_port = rendpoint.port; + /* + * Get the firmware node of the filter source through the + * reference. This could be used to filter the source in + * building path. + */ + conn.filter_src_fwnode = + fwnode_find_reference(&ep->fwnode, "filter-source", 0); + if (IS_ERR(conn.filter_src_fwnode)) { + conn.filter_src_fwnode = NULL; + } else { + conn.filter_src_dev = + coresight_find_csdev_by_fwnode(conn.filter_src_fwnode); + if (conn.filter_src_dev && + !coresight_is_device_source(conn.filter_src_dev)) { + dev_warn(dev, "port %d: Filter handle is not a trace source : %s\n", + conn.src_port, dev_name(&conn.filter_src_dev->dev)); + conn.filter_src_dev = NULL; + conn.filter_src_fwnode = NULL; + } + } + new_conn = coresight_add_out_conn(dev, pdata, &conn); if (IS_ERR_VALUE(new_conn)) { fwnode_handle_put(conn.dest_fwnode); @@ -275,7 +296,7 @@ static int of_get_coresight_platform_data(struct device *dev, */ if (!parent) { /* - * Avoid warnings in of_graph_get_next_endpoint() + * Avoid warnings in for_each_endpoint_of_node() * if the device doesn't have any graph connections */ if (!of_graph_is_present(node)) @@ -286,7 +307,7 @@ static int of_get_coresight_platform_data(struct device *dev, } /* Iterate through each output port to discover topology */ - while ((ep = of_graph_get_next_endpoint(parent, ep))) { + for_each_endpoint_of_node(parent, ep) { /* * Legacy binding mixes input/output ports under the * same parent. So, skip the input ports if we are dealing @@ -297,8 +318,10 @@ static int of_get_coresight_platform_data(struct device *dev, continue; ret = of_coresight_parse_endpoint(dev, ep, pdata); - if (ret) + if (ret) { + of_node_put(ep); return ret; + } } return 0; @@ -794,6 +817,12 @@ int coresight_get_cpu(struct device *dev) } EXPORT_SYMBOL_GPL(coresight_get_cpu); +int coresight_get_static_trace_id(struct device *dev, u32 *id) +{ + return fwnode_property_read_u32(dev_fwnode(dev), "arm,static-trace-id", id); +} +EXPORT_SYMBOL_GPL(coresight_get_static_trace_id); + struct coresight_platform_data * coresight_get_platform_data(struct device *dev) { diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index eb365236f9a9..82644aff8d2b 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -13,7 +13,7 @@ #include <linux/pm_runtime.h> extern struct mutex coresight_mutex; -extern struct device_type coresight_dev_type[]; +extern const struct device_type coresight_dev_type[]; /* * Coresight management registers (0xf00-0xfcc) @@ -42,6 +42,9 @@ extern struct device_type coresight_dev_type[]; #define ETM_MODE_EXCL_KERN BIT(30) #define ETM_MODE_EXCL_USER BIT(31) +#define ETM_MODE_EXCL_HOST BIT(32) +#define ETM_MODE_EXCL_GUEST BIT(33) + struct cs_pair_attribute { struct device_attribute attr; u32 lo_off; @@ -129,16 +132,16 @@ static inline void CS_UNLOCK(void __iomem *addr) } while (0); } -void coresight_disable_path(struct list_head *path); -int coresight_enable_path(struct list_head *path, enum cs_mode mode, +void coresight_disable_path(struct coresight_path *path); +int coresight_enable_path(struct coresight_path *path, enum cs_mode mode, void *sink_data); -struct coresight_device *coresight_get_sink(struct list_head *path); +struct coresight_device *coresight_get_sink(struct coresight_path *path); struct coresight_device *coresight_get_sink_by_id(u32 id); struct coresight_device * coresight_find_default_sink(struct coresight_device *csdev); -struct list_head *coresight_build_path(struct coresight_device *csdev, - struct coresight_device *sink); -void coresight_release_path(struct list_head *path); +struct coresight_path *coresight_build_path(struct coresight_device *csdev, + struct coresight_device *sink); +void coresight_release_path(struct coresight_path *path); int coresight_add_sysfs_link(struct coresight_sysfs_link *info); void coresight_remove_sysfs_link(struct coresight_sysfs_link *info); int coresight_create_conns_sysfs_group(struct coresight_device *csdev); @@ -148,6 +151,9 @@ int coresight_make_links(struct coresight_device *orig, struct coresight_device *target); void coresight_remove_links(struct coresight_device *orig, struct coresight_connection *conn); +u32 coresight_get_sink_id(struct coresight_device *csdev); +void coresight_path_assign_trace_id(struct coresight_path *path, + enum cs_mode mode); #if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM3X) extern int etm_readl_cp14(u32 off, unsigned int *val); @@ -222,6 +228,16 @@ static inline void *coresight_get_uci_data(const struct amba_id *id) return uci_id->data; } +static inline void *coresight_get_uci_data_from_amba(const struct amba_id *table, u32 pid) +{ + while (table->mask) { + if ((pid & table->mask) == table->id) + return coresight_get_uci_data(table); + table++; + }; + return NULL; +} + void coresight_release_platform_data(struct coresight_device *csdev, struct device *dev, struct coresight_platform_data *pdata); diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index 73452d9dc13b..ee7ee79f6cf7 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c @@ -31,6 +31,7 @@ DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator"); * @base: memory mapped base address for this component. Also indicates * whether this one is programmable or not. * @atclk: optional clock for the core parts of the replicator. + * @pclk: APB clock if present, otherwise NULL * @csdev: component vitals needed by the framework * @spinlock: serialize enable/disable operations. * @check_idfilter_val: check if the context is lost upon clock removal. @@ -38,8 +39,9 @@ DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator"); struct replicator_drvdata { void __iomem *base; struct clk *atclk; + struct clk *pclk; struct coresight_device *csdev; - spinlock_t spinlock; + raw_spinlock_t spinlock; bool check_idfilter_val; }; @@ -123,8 +125,8 @@ static int replicator_enable(struct coresight_device *csdev, unsigned long flags; bool first_enable = false; - spin_lock_irqsave(&drvdata->spinlock, flags); - if (atomic_read(&out->src_refcnt) == 0) { + raw_spin_lock_irqsave(&drvdata->spinlock, flags); + if (out->src_refcnt == 0) { if (drvdata->base) rc = dynamic_replicator_enable(drvdata, in->dest_port, out->src_port); @@ -132,8 +134,8 @@ static int replicator_enable(struct coresight_device *csdev, first_enable = true; } if (!rc) - atomic_inc(&out->src_refcnt); - spin_unlock_irqrestore(&drvdata->spinlock, flags); + out->src_refcnt++; + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); if (first_enable) dev_dbg(&csdev->dev, "REPLICATOR enabled\n"); @@ -177,14 +179,14 @@ static void replicator_disable(struct coresight_device *csdev, unsigned long flags; bool last_disable = false; - spin_lock_irqsave(&drvdata->spinlock, flags); - if (atomic_dec_return(&out->src_refcnt) == 0) { + raw_spin_lock_irqsave(&drvdata->spinlock, flags); + if (--out->src_refcnt == 0) { if (drvdata->base) dynamic_replicator_disable(drvdata, in->dest_port, out->src_port); last_disable = true; } - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); if (last_disable) dev_dbg(&csdev->dev, "REPLICATOR disabled\n"); @@ -243,6 +245,10 @@ static int replicator_probe(struct device *dev, struct resource *res) return ret; } + drvdata->pclk = coresight_get_enable_apb_pclk(dev); + if (IS_ERR(drvdata->pclk)) + return -ENODEV; + /* * Map the device base for dynamic-replicator, which has been * validated by AMBA core @@ -271,7 +277,7 @@ static int replicator_probe(struct device *dev, struct resource *res) } dev->platform_data = pdata; - spin_lock_init(&drvdata->spinlock); + raw_spin_lock_init(&drvdata->spinlock); desc.type = CORESIGHT_DEV_TYPE_LINK; desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT; desc.ops = &replicator_cs_ops; @@ -285,11 +291,12 @@ static int replicator_probe(struct device *dev, struct resource *res) } replicator_reset(drvdata); - pm_runtime_put(dev); out_disable_clk: if (ret && !IS_ERR_OR_NULL(drvdata->atclk)) clk_disable_unprepare(drvdata->atclk); + if (ret && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_disable_unprepare(drvdata->pclk); return ret; } @@ -301,29 +308,34 @@ static int replicator_remove(struct device *dev) return 0; } -static int static_replicator_probe(struct platform_device *pdev) +static int replicator_platform_probe(struct platform_device *pdev) { + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); int ret; pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - /* Static replicators do not have programming base */ - ret = replicator_probe(&pdev->dev, NULL); - - if (ret) { - pm_runtime_put_noidle(&pdev->dev); + ret = replicator_probe(&pdev->dev, res); + pm_runtime_put(&pdev->dev); + if (ret) pm_runtime_disable(&pdev->dev); - } return ret; } -static void static_replicator_remove(struct platform_device *pdev) +static void replicator_platform_remove(struct platform_device *pdev) { + struct replicator_drvdata *drvdata = dev_get_drvdata(&pdev->dev); + + if (WARN_ON(!drvdata)) + return; + replicator_remove(&pdev->dev); pm_runtime_disable(&pdev->dev); + if (!IS_ERR_OR_NULL(drvdata->pclk)) + clk_put(drvdata->pclk); } #ifdef CONFIG_PM @@ -334,6 +346,8 @@ static int replicator_runtime_suspend(struct device *dev) if (drvdata && !IS_ERR(drvdata->atclk)) clk_disable_unprepare(drvdata->atclk); + if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_disable_unprepare(drvdata->pclk); return 0; } @@ -344,6 +358,8 @@ static int replicator_runtime_resume(struct device *dev) if (drvdata && !IS_ERR(drvdata->atclk)) clk_prepare_enable(drvdata->atclk); + if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_prepare_enable(drvdata->pclk); return 0; } #endif @@ -353,31 +369,32 @@ static const struct dev_pm_ops replicator_dev_pm_ops = { replicator_runtime_resume, NULL) }; -static const struct of_device_id static_replicator_match[] = { +static const struct of_device_id replicator_match[] = { {.compatible = "arm,coresight-replicator"}, {.compatible = "arm,coresight-static-replicator"}, {} }; -MODULE_DEVICE_TABLE(of, static_replicator_match); +MODULE_DEVICE_TABLE(of, replicator_match); #ifdef CONFIG_ACPI -static const struct acpi_device_id static_replicator_acpi_ids[] = { +static const struct acpi_device_id replicator_acpi_ids[] = { {"ARMHC985", 0, 0, 0}, /* ARM CoreSight Static Replicator */ + {"ARMHC98D", 0, 0, 0}, /* ARM CoreSight Dynamic Replicator */ {} }; -MODULE_DEVICE_TABLE(acpi, static_replicator_acpi_ids); +MODULE_DEVICE_TABLE(acpi, replicator_acpi_ids); #endif -static struct platform_driver static_replicator_driver = { - .probe = static_replicator_probe, - .remove_new = static_replicator_remove, +static struct platform_driver replicator_driver = { + .probe = replicator_platform_probe, + .remove = replicator_platform_remove, .driver = { - .name = "coresight-static-replicator", + .name = "coresight-replicator", /* THIS_MODULE is taken care of by platform_driver_register() */ - .of_match_table = of_match_ptr(static_replicator_match), - .acpi_match_table = ACPI_PTR(static_replicator_acpi_ids), + .of_match_table = of_match_ptr(replicator_match), + .acpi_match_table = ACPI_PTR(replicator_acpi_ids), .pm = &replicator_dev_pm_ops, .suppress_bind_attrs = true, }, @@ -386,7 +403,13 @@ static struct platform_driver static_replicator_driver = { static int dynamic_replicator_probe(struct amba_device *adev, const struct amba_id *id) { - return replicator_probe(&adev->dev, &adev->res); + int ret; + + ret = replicator_probe(&adev->dev, &adev->res); + if (!ret) + pm_runtime_put(&adev->dev); + + return ret; } static void dynamic_replicator_remove(struct amba_device *adev) @@ -406,7 +429,6 @@ static struct amba_driver dynamic_replicator_driver = { .drv = { .name = "coresight-dynamic-replicator", .pm = &replicator_dev_pm_ops, - .owner = THIS_MODULE, .suppress_bind_attrs = true, }, .probe = dynamic_replicator_probe, @@ -416,27 +438,12 @@ static struct amba_driver dynamic_replicator_driver = { static int __init replicator_init(void) { - int ret; - - ret = platform_driver_register(&static_replicator_driver); - if (ret) { - pr_info("Error registering platform driver\n"); - return ret; - } - - ret = amba_driver_register(&dynamic_replicator_driver); - if (ret) { - pr_info("Error registering amba driver\n"); - platform_driver_unregister(&static_replicator_driver); - } - - return ret; + return coresight_init_driver("replicator", &dynamic_replicator_driver, &replicator_driver); } static void __exit replicator_exit(void) { - platform_driver_unregister(&static_replicator_driver); - amba_driver_unregister(&dynamic_replicator_driver); + coresight_remove_driver(&dynamic_replicator_driver, &replicator_driver); } module_init(replicator_init); diff --git a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h index 53840a2c41f2..303d71911870 100644 --- a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h +++ b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h @@ -21,13 +21,4 @@ static inline void write_trfcr(u64 val) isb(); } -static inline u64 cpu_prohibit_trace(void) -{ - u64 trfcr = read_trfcr(); - - /* Prohibit tracing at EL0 & the kernel EL */ - write_trfcr(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE)); - /* Return the original value of the TRFCR */ - return trfcr; -} #endif /* __CORESIGHT_SELF_HOSTED_TRACE_H */ diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c index 974d37e5f94c..26f9339f38b9 100644 --- a/drivers/hwtracing/coresight/coresight-stm.c +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -29,6 +29,7 @@ #include <linux/perf_event.h> #include <linux/pm_runtime.h> #include <linux/stm.h> +#include <linux/platform_device.h> #include "coresight-priv.h" #include "coresight-trace-id.h" @@ -115,6 +116,7 @@ DEFINE_CORESIGHT_DEVLIST(stm_devs, "stm"); * struct stm_drvdata - specifics associated to an STM component * @base: memory mapped base address for this component. * @atclk: optional clock for the core parts of the STM. + * @pclk: APB clock if present, otherwise NULL * @csdev: component vitals needed by the framework. * @spinlock: only one at a time pls. * @chs: the channels accociated to this STM. @@ -131,6 +133,7 @@ DEFINE_CORESIGHT_DEVLIST(stm_devs, "stm"); struct stm_drvdata { void __iomem *base; struct clk *atclk; + struct clk *pclk; struct coresight_device *csdev; spinlock_t spinlock; struct channel_space chs; @@ -191,7 +194,8 @@ static void stm_enable_hw(struct stm_drvdata *drvdata) } static int stm_enable(struct coresight_device *csdev, struct perf_event *event, - enum cs_mode mode) + enum cs_mode mode, + __maybe_unused struct coresight_path *path) { struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); @@ -277,12 +281,23 @@ static void stm_disable(struct coresight_device *csdev, } } +static int stm_trace_id(struct coresight_device *csdev, __maybe_unused enum cs_mode mode, + __maybe_unused struct coresight_device *sink) +{ + struct stm_drvdata *drvdata; + + drvdata = dev_get_drvdata(csdev->dev.parent); + + return drvdata->traceid; +} + static const struct coresight_ops_source stm_source_ops = { .enable = stm_enable, .disable = stm_disable, }; static const struct coresight_ops stm_cs_ops = { + .trace_id = stm_trace_id, .source_ops = &stm_source_ops, }; @@ -800,14 +815,22 @@ static void stm_init_generic_data(struct stm_drvdata *drvdata, drvdata->stm.set_options = stm_generic_set_options; } -static int stm_probe(struct amba_device *adev, const struct amba_id *id) +static const struct amba_id stm_ids[]; + +static char *stm_csdev_name(struct coresight_device *csdev) +{ + u32 stm_pid = coresight_get_pid(&csdev->access); + void *uci_data = coresight_get_uci_data_from_amba(stm_ids, stm_pid); + + return uci_data ? (char *)uci_data : "STM"; +} + +static int __stm_probe(struct device *dev, struct resource *res) { int ret, trace_id; void __iomem *base; - struct device *dev = &adev->dev; struct coresight_platform_data *pdata = NULL; struct stm_drvdata *drvdata; - struct resource *res = &adev->res; struct resource ch_res; struct coresight_desc desc = { 0 }; @@ -819,12 +842,16 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id) if (!drvdata) return -ENOMEM; - drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ + drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */ if (!IS_ERR(drvdata->atclk)) { ret = clk_prepare_enable(drvdata->atclk); if (ret) return ret; } + + drvdata->pclk = coresight_get_enable_apb_pclk(dev); + if (IS_ERR(drvdata->pclk)) + return -ENODEV; dev_set_drvdata(dev, drvdata); base = devm_ioremap_resource(dev, res); @@ -872,7 +899,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id) ret = PTR_ERR(pdata); goto stm_unregister; } - adev->dev.platform_data = pdata; + dev->platform_data = pdata; desc.type = CORESIGHT_DEV_TYPE_SOURCE; desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE; @@ -893,10 +920,8 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id) } drvdata->traceid = (u8)trace_id; - pm_runtime_put(&adev->dev); - dev_info(&drvdata->csdev->dev, "%s initialized\n", - (char *)coresight_get_uci_data(id)); + stm_csdev_name(drvdata->csdev)); return 0; cs_unregister: @@ -907,9 +932,20 @@ stm_unregister: return ret; } -static void stm_remove(struct amba_device *adev) +static int stm_probe(struct amba_device *adev, const struct amba_id *id) { - struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev); + int ret; + + ret = __stm_probe(&adev->dev, &adev->res); + if (!ret) + pm_runtime_put(&adev->dev); + + return ret; +} + +static void __stm_remove(struct device *dev) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(dev); coresight_trace_id_put_system_id(drvdata->traceid); coresight_unregister(drvdata->csdev); @@ -917,6 +953,11 @@ static void stm_remove(struct amba_device *adev) stm_unregister_device(&drvdata->stm); } +static void stm_remove(struct amba_device *adev) +{ + __stm_remove(&adev->dev); +} + #ifdef CONFIG_PM static int stm_runtime_suspend(struct device *dev) { @@ -925,6 +966,8 @@ static int stm_runtime_suspend(struct device *dev) if (drvdata && !IS_ERR(drvdata->atclk)) clk_disable_unprepare(drvdata->atclk); + if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_disable_unprepare(drvdata->pclk); return 0; } @@ -935,6 +978,8 @@ static int stm_runtime_resume(struct device *dev) if (drvdata && !IS_ERR(drvdata->atclk)) clk_prepare_enable(drvdata->atclk); + if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_prepare_enable(drvdata->pclk); return 0; } #endif @@ -954,7 +999,6 @@ MODULE_DEVICE_TABLE(amba, stm_ids); static struct amba_driver stm_driver = { .drv = { .name = "coresight-stm", - .owner = THIS_MODULE, .pm = &stm_dev_pm_ops, .suppress_bind_attrs = true, }, @@ -963,7 +1007,66 @@ static struct amba_driver stm_driver = { .id_table = stm_ids, }; -module_amba_driver(stm_driver); +static int stm_platform_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int ret = 0; + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + ret = __stm_probe(&pdev->dev, res); + pm_runtime_put(&pdev->dev); + if (ret) + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static void stm_platform_remove(struct platform_device *pdev) +{ + struct stm_drvdata *drvdata = dev_get_drvdata(&pdev->dev); + + if (WARN_ON(!drvdata)) + return; + + __stm_remove(&pdev->dev); + pm_runtime_disable(&pdev->dev); + if (!IS_ERR_OR_NULL(drvdata->pclk)) + clk_put(drvdata->pclk); +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id stm_acpi_ids[] = { + {"ARMHC502", 0, 0, 0}, /* ARM CoreSight STM */ + {}, +}; +MODULE_DEVICE_TABLE(acpi, stm_acpi_ids); +#endif + +static struct platform_driver stm_platform_driver = { + .probe = stm_platform_probe, + .remove = stm_platform_remove, + .driver = { + .name = "coresight-stm-platform", + .acpi_match_table = ACPI_PTR(stm_acpi_ids), + .suppress_bind_attrs = true, + .pm = &stm_dev_pm_ops, + }, +}; + +static int __init stm_init(void) +{ + return coresight_init_driver("stm", &stm_driver, &stm_platform_driver); +} + +static void __exit stm_exit(void) +{ + coresight_remove_driver(&stm_driver, &stm_platform_driver); +} +module_init(stm_init); +module_exit(stm_exit); MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>"); MODULE_DESCRIPTION("Arm CoreSight System Trace Macrocell driver"); diff --git a/drivers/hwtracing/coresight/coresight-syscfg-configfs.c b/drivers/hwtracing/coresight/coresight-syscfg-configfs.c index 433ede94dd63..213b4159b062 100644 --- a/drivers/hwtracing/coresight/coresight-syscfg-configfs.c +++ b/drivers/hwtracing/coresight/coresight-syscfg-configfs.c @@ -160,7 +160,7 @@ static struct configfs_attribute *cscfg_config_view_attrs[] = { NULL, }; -static struct config_item_type cscfg_config_view_type = { +static const struct config_item_type cscfg_config_view_type = { .ct_owner = THIS_MODULE, .ct_attrs = cscfg_config_view_attrs, }; @@ -170,7 +170,7 @@ static struct configfs_attribute *cscfg_config_preset_attrs[] = { NULL, }; -static struct config_item_type cscfg_config_preset_type = { +static const struct config_item_type cscfg_config_preset_type = { .ct_owner = THIS_MODULE, .ct_attrs = cscfg_config_preset_attrs, }; @@ -272,7 +272,7 @@ static struct configfs_attribute *cscfg_feature_view_attrs[] = { NULL, }; -static struct config_item_type cscfg_feature_view_type = { +static const struct config_item_type cscfg_feature_view_type = { .ct_owner = THIS_MODULE, .ct_attrs = cscfg_feature_view_attrs, }; @@ -309,7 +309,7 @@ static struct configfs_attribute *cscfg_param_view_attrs[] = { NULL, }; -static struct config_item_type cscfg_param_view_type = { +static const struct config_item_type cscfg_param_view_type = { .ct_owner = THIS_MODULE, .ct_attrs = cscfg_param_view_attrs, }; @@ -380,7 +380,7 @@ static struct config_group *cscfg_create_feature_group(struct cscfg_feature_desc return &feat_view->group; } -static struct config_item_type cscfg_configs_type = { +static const struct config_item_type cscfg_configs_type = { .ct_owner = THIS_MODULE, }; @@ -414,7 +414,7 @@ void cscfg_configfs_del_config(struct cscfg_config_desc *config_desc) } } -static struct config_item_type cscfg_features_type = { +static const struct config_item_type cscfg_features_type = { .ct_owner = THIS_MODULE, }; diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c index 11138a9762b0..a70c1454b410 100644 --- a/drivers/hwtracing/coresight/coresight-syscfg.c +++ b/drivers/hwtracing/coresight/coresight-syscfg.c @@ -89,9 +89,9 @@ static int cscfg_add_csdev_cfg(struct coresight_device *csdev, } /* if matched features, add config to device.*/ if (config_csdev) { - spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); + raw_spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); list_add(&config_csdev->node, &csdev->config_csdev_list); - spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); + raw_spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); } return 0; @@ -194,9 +194,9 @@ static int cscfg_load_feat_csdev(struct coresight_device *csdev, /* add to internal csdev feature list & initialise using reset call */ cscfg_reset_feat(feat_csdev); - spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); + raw_spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); list_add(&feat_csdev->node, &csdev->feature_csdev_list); - spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); + raw_spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); return 0; } @@ -765,7 +765,7 @@ static int cscfg_list_add_csdev(struct coresight_device *csdev, INIT_LIST_HEAD(&csdev->feature_csdev_list); INIT_LIST_HEAD(&csdev->config_csdev_list); - spin_lock_init(&csdev->cscfg_csdev_lock); + raw_spin_lock_init(&csdev->cscfg_csdev_lock); return 0; } @@ -855,7 +855,7 @@ void cscfg_csdev_reset_feats(struct coresight_device *csdev) struct cscfg_feature_csdev *feat_csdev; unsigned long flags; - spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); + raw_spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); if (list_empty(&csdev->feature_csdev_list)) goto unlock_exit; @@ -863,7 +863,7 @@ void cscfg_csdev_reset_feats(struct coresight_device *csdev) cscfg_reset_feat(feat_csdev); unlock_exit: - spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); + raw_spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); } EXPORT_SYMBOL_GPL(cscfg_csdev_reset_feats); @@ -1059,7 +1059,7 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev, * Look for matching configuration - set the active configuration * context if found. */ - spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); + raw_spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); list_for_each_entry(config_csdev_item, &csdev->config_csdev_list, node) { config_desc = config_csdev_item->config_desc; if ((atomic_read(&config_desc->active_cnt)) && @@ -1069,7 +1069,7 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev, break; } } - spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); + raw_spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); /* * If found, attempt to enable @@ -1090,12 +1090,12 @@ int cscfg_csdev_enable_active_config(struct coresight_device *csdev, * * Set enabled if OK, err if not. */ - spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); + raw_spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); if (csdev->active_cscfg_ctxt) config_csdev_active->enabled = true; else err = -EBUSY; - spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); + raw_spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); } } return err; @@ -1124,7 +1124,7 @@ void cscfg_csdev_disable_active_config(struct coresight_device *csdev) * If it was not enabled, we have no work to do, otherwise mark as disabled. * Clear the active config pointer. */ - spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); + raw_spin_lock_irqsave(&csdev->cscfg_csdev_lock, flags); config_csdev = (struct cscfg_config_csdev *)csdev->active_cscfg_ctxt; if (config_csdev) { if (!config_csdev->enabled) @@ -1133,7 +1133,7 @@ void cscfg_csdev_disable_active_config(struct coresight_device *csdev) config_csdev->enabled = false; } csdev->active_cscfg_ctxt = NULL; - spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); + raw_spin_unlock_irqrestore(&csdev->cscfg_csdev_lock, flags); /* true if there was an enabled active config */ if (config_csdev) diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c index f9444e2cb1d9..feadaf065b53 100644 --- a/drivers/hwtracing/coresight/coresight-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-sysfs.c @@ -9,6 +9,7 @@ #include <linux/kernel.h> #include "coresight-priv.h" +#include "coresight-trace-id.h" /* * Use IDR to map the hash of the source's device name @@ -21,7 +22,7 @@ static DEFINE_IDR(path_idr); * When operating Coresight drivers from the sysFS interface, only a single * path can exist from a tracer (associated to a CPU) to a sink. */ -static DEFINE_PER_CPU(struct list_head *, tracer_path); +static DEFINE_PER_CPU(struct coresight_path *, tracer_path); ssize_t coresight_simple_show_pair(struct device *_dev, struct device_attribute *attr, char *buf) @@ -52,7 +53,8 @@ ssize_t coresight_simple_show32(struct device *_dev, EXPORT_SYMBOL_GPL(coresight_simple_show32); static int coresight_enable_source_sysfs(struct coresight_device *csdev, - enum cs_mode mode, void *data) + enum cs_mode mode, + struct coresight_path *path) { int ret; @@ -63,7 +65,7 @@ static int coresight_enable_source_sysfs(struct coresight_device *csdev, */ lockdep_assert_held(&coresight_mutex); if (coresight_get_mode(csdev) != CS_MODE_SYSFS) { - ret = source_ops(csdev)->enable(csdev, data, mode); + ret = source_ops(csdev)->enable(csdev, NULL, mode, path); if (ret) return ret; } @@ -166,7 +168,7 @@ int coresight_enable_sysfs(struct coresight_device *csdev) { int cpu, ret = 0; struct coresight_device *sink; - struct list_head *path; + struct coresight_path *path; enum coresight_dev_subtype_source subtype; u32 hash; @@ -208,11 +210,15 @@ int coresight_enable_sysfs(struct coresight_device *csdev) goto out; } + coresight_path_assign_trace_id(path, CS_MODE_SYSFS); + if (!IS_VALID_CS_TRACE_ID(path->trace_id)) + goto err_path; + ret = coresight_enable_path(path, CS_MODE_SYSFS, NULL); if (ret) goto err_path; - ret = coresight_enable_source_sysfs(csdev, CS_MODE_SYSFS, NULL); + ret = coresight_enable_source_sysfs(csdev, CS_MODE_SYSFS, path); if (ret) goto err_source; @@ -261,7 +267,7 @@ EXPORT_SYMBOL_GPL(coresight_enable_sysfs); void coresight_disable_sysfs(struct coresight_device *csdev) { int cpu, ret; - struct list_head *path = NULL; + struct coresight_path *path = NULL; u32 hash; mutex_lock(&coresight_mutex); @@ -377,7 +383,7 @@ static struct attribute *coresight_source_attrs[] = { }; ATTRIBUTE_GROUPS(coresight_source); -struct device_type coresight_dev_type[] = { +const struct device_type coresight_dev_type[] = { [CORESIGHT_DEV_TYPE_SINK] = { .name = "sink", .groups = coresight_sink_groups, diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c index 72005b0c633e..a7814e8e657b 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-core.c +++ b/drivers/hwtracing/coresight/coresight-tmc-core.c @@ -4,6 +4,7 @@ * Description: CoreSight Trace Memory Controller driver */ +#include <linux/acpi.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> @@ -22,8 +23,10 @@ #include <linux/spinlock.h> #include <linux/pm_runtime.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/coresight.h> #include <linux/amba/bus.h> +#include <linux/platform_device.h> #include "coresight-priv.h" #include "coresight-tmc.h" @@ -102,6 +105,128 @@ u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata) return mask; } +static bool is_tmc_crashdata_valid(struct tmc_drvdata *drvdata) +{ + struct tmc_crash_metadata *mdata; + + if (!tmc_has_reserved_buffer(drvdata) || + !tmc_has_crash_mdata_buffer(drvdata)) + return false; + + mdata = drvdata->crash_mdata.vaddr; + + /* Check version match */ + if (mdata->version != CS_CRASHDATA_VERSION) + return false; + + /* Check for valid metadata */ + if (!mdata->valid) { + dev_dbg(&drvdata->csdev->dev, + "Data invalid in tmc crash metadata\n"); + return false; + } + + /* + * Buffer address given by metadata for retrieval of trace data + * from previous boot is expected to be same as the reserved + * trace buffer memory region provided through DTS + */ + if (drvdata->resrv_buf.paddr != mdata->trace_paddr) { + dev_dbg(&drvdata->csdev->dev, + "Trace buffer address of previous boot invalid\n"); + return false; + } + + /* Check data integrity of metadata */ + if (mdata->crc32_mdata != find_crash_metadata_crc(mdata)) { + dev_err(&drvdata->csdev->dev, + "CRC mismatch in tmc crash metadata\n"); + return false; + } + /* Check data integrity of tracedata */ + if (mdata->crc32_tdata != find_crash_tracedata_crc(drvdata, mdata)) { + dev_err(&drvdata->csdev->dev, + "CRC mismatch in tmc crash tracedata\n"); + return false; + } + + return true; +} + +static inline ssize_t tmc_get_resvbuf_trace(struct tmc_drvdata *drvdata, + loff_t pos, size_t len, char **bufpp) +{ + s64 offset; + ssize_t actual = len; + struct tmc_resrv_buf *rbuf = &drvdata->resrv_buf; + + if (pos + actual > rbuf->len) + actual = rbuf->len - pos; + if (actual <= 0) + return 0; + + /* Compute the offset from which we read the data */ + offset = rbuf->offset + pos; + if (offset >= rbuf->size) + offset -= rbuf->size; + + /* Adjust the length to limit this transaction to end of buffer */ + actual = (actual < (rbuf->size - offset)) ? + actual : rbuf->size - offset; + + *bufpp = (char *)rbuf->vaddr + offset; + + return actual; +} + +static int tmc_prepare_crashdata(struct tmc_drvdata *drvdata) +{ + char *bufp; + ssize_t len; + u32 status, size; + u64 rrp, rwp, dba; + struct tmc_resrv_buf *rbuf; + struct tmc_crash_metadata *mdata; + + mdata = drvdata->crash_mdata.vaddr; + rbuf = &drvdata->resrv_buf; + + rrp = mdata->tmc_rrp; + rwp = mdata->tmc_rwp; + dba = mdata->tmc_dba; + status = mdata->tmc_sts; + size = mdata->tmc_ram_size << 2; + + /* Sync the buffer pointers */ + rbuf->offset = rrp - dba; + if (status & TMC_STS_FULL) + rbuf->len = size; + else + rbuf->len = rwp - rrp; + + /* Additional sanity checks for validating metadata */ + if ((rbuf->offset > size) || + (rbuf->len > size)) { + dev_dbg(&drvdata->csdev->dev, + "Offset and length invalid in tmc crash metadata\n"); + return -EINVAL; + } + + if (status & TMC_STS_FULL) { + len = tmc_get_resvbuf_trace(drvdata, 0x0, + CORESIGHT_BARRIER_PKT_SIZE, &bufp); + if (len >= CORESIGHT_BARRIER_PKT_SIZE) { + coresight_insert_barrier_packet(bufp); + /* Recalculate crc */ + mdata->crc32_tdata = find_crash_tracedata_crc(drvdata, + mdata); + mdata->crc32_mdata = find_crash_metadata_crc(mdata); + } + } + + return 0; +} + static int tmc_read_prepare(struct tmc_drvdata *drvdata) { int ret = 0; @@ -218,7 +343,84 @@ static const struct file_operations tmc_fops = { .open = tmc_open, .read = tmc_read, .release = tmc_release, - .llseek = no_llseek, +}; + +static int tmc_crashdata_open(struct inode *inode, struct file *file) +{ + int err = 0; + unsigned long flags; + struct tmc_resrv_buf *rbuf; + struct tmc_crash_metadata *mdata; + struct tmc_drvdata *drvdata = container_of(file->private_data, + struct tmc_drvdata, + crashdev); + + mdata = drvdata->crash_mdata.vaddr; + rbuf = &drvdata->resrv_buf; + + raw_spin_lock_irqsave(&drvdata->spinlock, flags); + if (mdata->valid) + rbuf->reading = true; + else + err = -ENOENT; + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); + if (err) + goto exit; + + nonseekable_open(inode, file); + dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__); +exit: + return err; +} + +static ssize_t tmc_crashdata_read(struct file *file, char __user *data, + size_t len, loff_t *ppos) +{ + char *bufp; + ssize_t actual; + struct tmc_drvdata *drvdata = container_of(file->private_data, + struct tmc_drvdata, + crashdev); + + actual = tmc_get_resvbuf_trace(drvdata, *ppos, len, &bufp); + if (actual <= 0) + return 0; + + if (copy_to_user(data, bufp, actual)) { + dev_dbg(&drvdata->csdev->dev, + "%s: copy_to_user failed\n", __func__); + return -EFAULT; + } + + *ppos += actual; + dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual); + + return actual; +} + +static int tmc_crashdata_release(struct inode *inode, struct file *file) +{ + int ret = 0; + unsigned long flags; + struct tmc_resrv_buf *rbuf; + struct tmc_drvdata *drvdata = container_of(file->private_data, + struct tmc_drvdata, + crashdev); + + rbuf = &drvdata->resrv_buf; + raw_spin_lock_irqsave(&drvdata->spinlock, flags); + rbuf->reading = false; + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); + + dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__); + return ret; +} + +static const struct file_operations tmc_crashdata_fops = { + .owner = THIS_MODULE, + .open = tmc_crashdata_open, + .read = tmc_crashdata_read, + .release = tmc_crashdata_release, }; static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid) @@ -330,9 +532,40 @@ static ssize_t buffer_size_store(struct device *dev, static DEVICE_ATTR_RW(buffer_size); +static ssize_t stop_on_flush_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return sprintf(buf, "%#x\n", drvdata->stop_on_flush); +} + +static ssize_t stop_on_flush_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + int ret; + u8 val; + struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); + + ret = kstrtou8(buf, 0, &val); + if (ret) + return ret; + if (val) + drvdata->stop_on_flush = true; + else + drvdata->stop_on_flush = false; + + return size; +} + +static DEVICE_ATTR_RW(stop_on_flush); + + static struct attribute *coresight_tmc_attrs[] = { &dev_attr_trigger_cntr.attr, &dev_attr_buffer_size.attr, + &dev_attr_stop_on_flush.attr, NULL, }; @@ -360,7 +593,32 @@ static const struct attribute_group *coresight_etr_groups[] = { static inline bool tmc_etr_can_use_sg(struct device *dev) { - return fwnode_property_present(dev->fwnode, "arm,scatter-gather"); + int ret; + u8 val_u8; + + /* + * Presence of the property 'arm,scatter-gather' is checked + * on the platform for the feature support, rather than its + * value. + */ + if (is_of_node(dev->fwnode)) { + return fwnode_property_present(dev->fwnode, "arm,scatter-gather"); + } else if (is_acpi_device_node(dev->fwnode)) { + /* + * TMC_DEVID_NOSCAT test in tmc_etr_setup_caps(), has already ensured + * this property is only checked for Coresight SoC 400 TMC configured + * as ETR. + */ + ret = fwnode_property_read_u8(dev->fwnode, "arm-armhc97c-sg-enable", &val_u8); + if (!ret) + return !!val_u8; + + if (fwnode_property_present(dev->fwnode, "arm,scatter-gather")) { + pr_warn_once("Deprecated ACPI property - arm,scatter-gather\n"); + return true; + } + } + return false; } static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata) @@ -370,16 +628,84 @@ static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata) return (auth & TMC_AUTH_NSID_MASK) == 0x3; } +static const struct amba_id tmc_ids[]; + +static int of_tmc_get_reserved_resource_by_name(struct device *dev, + const char *name, + struct resource *res) +{ + int index, rc = -ENODEV; + struct device_node *node; + + if (!is_of_node(dev->fwnode)) + return -ENODEV; + + index = of_property_match_string(dev->of_node, "memory-region-names", + name); + if (index < 0) + return rc; + + node = of_parse_phandle(dev->of_node, "memory-region", index); + if (!node) + return rc; + + if (!of_address_to_resource(node, 0, res) && + res->start != 0 && resource_size(res) != 0) + rc = 0; + of_node_put(node); + + return rc; +} + +static void tmc_get_reserved_region(struct device *parent) +{ + struct tmc_drvdata *drvdata = dev_get_drvdata(parent); + struct resource res; + + if (of_tmc_get_reserved_resource_by_name(parent, "tracedata", &res)) + return; + + drvdata->resrv_buf.vaddr = memremap(res.start, + resource_size(&res), + MEMREMAP_WC); + if (IS_ERR_OR_NULL(drvdata->resrv_buf.vaddr)) { + dev_err(parent, "Reserved trace buffer mapping failed\n"); + return; + } + + drvdata->resrv_buf.paddr = res.start; + drvdata->resrv_buf.size = resource_size(&res); + + if (of_tmc_get_reserved_resource_by_name(parent, "metadata", &res)) + return; + + drvdata->crash_mdata.vaddr = memremap(res.start, + resource_size(&res), + MEMREMAP_WC); + if (IS_ERR_OR_NULL(drvdata->crash_mdata.vaddr)) { + dev_err(parent, "Metadata memory mapping failed\n"); + return; + } + + drvdata->crash_mdata.paddr = res.start; + drvdata->crash_mdata.size = resource_size(&res); +} + /* Detect and initialise the capabilities of a TMC ETR */ -static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps) +static int tmc_etr_setup_caps(struct device *parent, u32 devid, + struct csdev_access *access) { int rc; - u32 dma_mask = 0; + u32 tmc_pid, dma_mask = 0; struct tmc_drvdata *drvdata = dev_get_drvdata(parent); + void *dev_caps; if (!tmc_etr_has_non_secure_access(drvdata)) return -EACCES; + tmc_pid = coresight_get_pid(access); + dev_caps = coresight_get_uci_data_from_amba(tmc_ids, tmc_pid); + /* Set the unadvertised capabilities */ tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps); @@ -437,24 +763,33 @@ static u32 tmc_etr_get_max_burst_size(struct device *dev) return burst_size; } -static int tmc_probe(struct amba_device *adev, const struct amba_id *id) +static void register_crash_dev_interface(struct tmc_drvdata *drvdata, + const char *name) +{ + drvdata->crashdev.name = + devm_kasprintf(&drvdata->csdev->dev, GFP_KERNEL, "%s_%s", "crash", name); + drvdata->crashdev.minor = MISC_DYNAMIC_MINOR; + drvdata->crashdev.fops = &tmc_crashdata_fops; + if (misc_register(&drvdata->crashdev)) { + dev_dbg(&drvdata->csdev->dev, + "Failed to setup user interface for crashdata\n"); + drvdata->crashdev.fops = NULL; + } else + dev_info(&drvdata->csdev->dev, + "Valid crash tracedata found\n"); +} + +static int __tmc_probe(struct device *dev, struct resource *res) { int ret = 0; u32 devid; void __iomem *base; - struct device *dev = &adev->dev; struct coresight_platform_data *pdata = NULL; - struct tmc_drvdata *drvdata; - struct resource *res = &adev->res; + struct tmc_drvdata *drvdata = dev_get_drvdata(dev); struct coresight_desc desc = { 0 }; struct coresight_dev_list *dev_list = NULL; ret = -ENOMEM; - drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); - if (!drvdata) - goto out; - - dev_set_drvdata(dev, drvdata); /* Validity for the resource is already checked by the AMBA core */ base = devm_ioremap_resource(dev, res); @@ -466,7 +801,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) drvdata->base = base; desc.access = CSDEV_ACCESS_IOMEM(base); - spin_lock_init(&drvdata->spinlock); + raw_spin_lock_init(&drvdata->spinlock); devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); drvdata->config_type = BMVAL(devid, 6, 7); @@ -482,6 +817,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4; } + tmc_get_reserved_region(dev); + desc.dev = dev; switch (drvdata->config_type) { @@ -497,8 +834,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) desc.type = CORESIGHT_DEV_TYPE_SINK; desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM; desc.ops = &tmc_etr_cs_ops; - ret = tmc_etr_setup_caps(dev, devid, - coresight_get_uci_data(id)); + ret = tmc_etr_setup_caps(dev, devid, &desc.access); if (ret) goto out; idr_init(&drvdata->idr); @@ -530,7 +866,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) ret = PTR_ERR(pdata); goto out; } - adev->dev.platform_data = pdata; + dev->platform_data = pdata; desc.pdata = pdata; drvdata->csdev = coresight_register(&desc); @@ -543,11 +879,32 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) drvdata->miscdev.minor = MISC_DYNAMIC_MINOR; drvdata->miscdev.fops = &tmc_fops; ret = misc_register(&drvdata->miscdev); - if (ret) + if (ret) { coresight_unregister(drvdata->csdev); - else - pm_runtime_put(&adev->dev); + goto out; + } + out: + if (is_tmc_crashdata_valid(drvdata) && + !tmc_prepare_crashdata(drvdata)) + register_crash_dev_interface(drvdata, desc.name); + return ret; +} + +static int tmc_probe(struct amba_device *adev, const struct amba_id *id) +{ + struct tmc_drvdata *drvdata; + int ret; + + drvdata = devm_kzalloc(&adev->dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + amba_set_drvdata(adev, drvdata); + ret = __tmc_probe(&adev->dev, &adev->res); + if (!ret) + pm_runtime_put(&adev->dev); + return ret; } @@ -556,7 +913,7 @@ static void tmc_shutdown(struct amba_device *adev) unsigned long flags; struct tmc_drvdata *drvdata = amba_get_drvdata(adev); - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); if (coresight_get_mode(drvdata->csdev) == CS_MODE_DISABLED) goto out; @@ -570,12 +927,12 @@ static void tmc_shutdown(struct amba_device *adev) * the system is going down after this. */ out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); } -static void tmc_remove(struct amba_device *adev) +static void __tmc_remove(struct device *dev) { - struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev); + struct tmc_drvdata *drvdata = dev_get_drvdata(dev); /* * Since misc_open() holds a refcount on the f_ops, which is @@ -583,9 +940,16 @@ static void tmc_remove(struct amba_device *adev) * handler to this device is closed. */ misc_deregister(&drvdata->miscdev); + if (drvdata->crashdev.fops) + misc_deregister(&drvdata->crashdev); coresight_unregister(drvdata->csdev); } +static void tmc_remove(struct amba_device *adev) +{ + __tmc_remove(&adev->dev); +} + static const struct amba_id tmc_ids[] = { CS_AMBA_ID(0x000bb961), /* Coresight SoC 600 TMC-ETR/ETS */ @@ -602,7 +966,6 @@ MODULE_DEVICE_TABLE(amba, tmc_ids); static struct amba_driver tmc_driver = { .drv = { .name = "coresight-tmc", - .owner = THIS_MODULE, .suppress_bind_attrs = true, }, .probe = tmc_probe, @@ -611,7 +974,101 @@ static struct amba_driver tmc_driver = { .id_table = tmc_ids, }; -module_amba_driver(tmc_driver); +static int tmc_platform_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct tmc_drvdata *drvdata; + int ret = 0; + + drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + + drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev); + if (IS_ERR(drvdata->pclk)) + return -ENODEV; + + dev_set_drvdata(&pdev->dev, drvdata); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + ret = __tmc_probe(&pdev->dev, res); + pm_runtime_put(&pdev->dev); + if (ret) + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static void tmc_platform_remove(struct platform_device *pdev) +{ + struct tmc_drvdata *drvdata = dev_get_drvdata(&pdev->dev); + + if (WARN_ON(!drvdata)) + return; + + __tmc_remove(&pdev->dev); + pm_runtime_disable(&pdev->dev); + if (!IS_ERR_OR_NULL(drvdata->pclk)) + clk_put(drvdata->pclk); +} + +#ifdef CONFIG_PM +static int tmc_runtime_suspend(struct device *dev) +{ + struct tmc_drvdata *drvdata = dev_get_drvdata(dev); + + if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_disable_unprepare(drvdata->pclk); + return 0; +} + +static int tmc_runtime_resume(struct device *dev) +{ + struct tmc_drvdata *drvdata = dev_get_drvdata(dev); + + if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_prepare_enable(drvdata->pclk); + return 0; +} +#endif + +static const struct dev_pm_ops tmc_dev_pm_ops = { + SET_RUNTIME_PM_OPS(tmc_runtime_suspend, tmc_runtime_resume, NULL) +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id tmc_acpi_ids[] = { + {"ARMHC501", 0, 0, 0}, /* ARM CoreSight ETR */ + {"ARMHC97C", 0, 0, 0}, /* ARM CoreSight SoC-400 TMC, SoC-600 ETF/ETB */ + {}, +}; +MODULE_DEVICE_TABLE(acpi, tmc_acpi_ids); +#endif + +static struct platform_driver tmc_platform_driver = { + .probe = tmc_platform_probe, + .remove = tmc_platform_remove, + .driver = { + .name = "coresight-tmc-platform", + .acpi_match_table = ACPI_PTR(tmc_acpi_ids), + .suppress_bind_attrs = true, + .pm = &tmc_dev_pm_ops, + }, +}; + +static int __init tmc_init(void) +{ + return coresight_init_driver("tmc", &tmc_driver, &tmc_platform_driver); +} + +static void __exit tmc_exit(void) +{ + coresight_remove_driver(&tmc_driver, &tmc_platform_driver); +} +module_init(tmc_init); +module_exit(tmc_exit); MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>"); MODULE_DESCRIPTION("Arm CoreSight Trace Memory Controller driver"); diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c index d4f641cd9de6..d858740001c2 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -19,6 +19,7 @@ static int tmc_set_etf_buffer(struct coresight_device *csdev, static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata) { int rc = 0; + u32 ffcr; CS_UNLOCK(drvdata->base); @@ -32,10 +33,12 @@ static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata) } writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE); - writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | - TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | - TMC_FFCR_TRIGON_TRIGIN, - drvdata->base + TMC_FFCR); + + ffcr = TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | TMC_FFCR_FON_FLIN | + TMC_FFCR_FON_TRIG_EVT | TMC_FFCR_TRIGON_TRIGIN; + if (drvdata->stop_on_flush) + ffcr |= TMC_FFCR_STOP_ON_FLUSH; + writel_relaxed(ffcr, drvdata->base + TMC_FFCR); writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); tmc_enable_hw(drvdata); @@ -182,9 +185,9 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev) * If we don't have a buffer release the lock and allocate memory. * Otherwise keep the lock and move along. */ - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); if (!drvdata->buf) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); /* Allocating the memory here while outside of the spinlock */ buf = kzalloc(drvdata->size, GFP_KERNEL); @@ -192,7 +195,7 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev) return -ENOMEM; /* Let's try again */ - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); } if (drvdata->reading) { @@ -225,7 +228,6 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev) used = true; drvdata->buf = buf; } - ret = tmc_etb_enable_hw(drvdata); if (!ret) { coresight_set_mode(csdev, CS_MODE_SYSFS); @@ -235,7 +237,7 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev) used = false; } out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); /* Free memory outside the spinlock if need be */ if (!used) @@ -253,7 +255,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data) struct perf_output_handle *handle = data; struct cs_buffers *buf = etm_perf_sink_config(handle); - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); do { ret = -EINVAL; if (drvdata->reading) @@ -296,7 +298,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data) csdev->refcnt++; } } while (0); - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return ret; } @@ -331,16 +333,16 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev) unsigned long flags; struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); if (drvdata->reading) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return -EBUSY; } csdev->refcnt--; if (csdev->refcnt) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return -EBUSY; } @@ -351,7 +353,7 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev) drvdata->pid = -1; coresight_set_mode(csdev, CS_MODE_DISABLED); - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n"); return 0; @@ -366,9 +368,9 @@ static int tmc_enable_etf_link(struct coresight_device *csdev, struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); bool first_enable = false; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); if (drvdata->reading) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return -EBUSY; } @@ -381,7 +383,7 @@ static int tmc_enable_etf_link(struct coresight_device *csdev, } if (!ret) csdev->refcnt++; - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); if (first_enable) dev_dbg(&csdev->dev, "TMC-ETF enabled\n"); @@ -396,9 +398,9 @@ static void tmc_disable_etf_link(struct coresight_device *csdev, struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); bool last_disable = false; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); if (drvdata->reading) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return; } @@ -408,7 +410,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev, coresight_set_mode(csdev, CS_MODE_DISABLED); last_disable = true; } - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); if (last_disable) dev_dbg(&csdev->dev, "TMC-ETF disabled\n"); @@ -488,7 +490,7 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev, if (WARN_ON_ONCE(coresight_get_mode(csdev) != CS_MODE_PERF)) return 0; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); /* Don't do anything if another tracer is using this sink */ if (csdev->refcnt != 1) @@ -585,11 +587,86 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev, */ CS_LOCK(drvdata->base); out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return to_read; } +static int tmc_panic_sync_etf(struct coresight_device *csdev) +{ + u32 val; + struct tmc_crash_metadata *mdata; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr; + + /* Make sure we have valid reserved memory */ + if (!tmc_has_reserved_buffer(drvdata) || + !tmc_has_crash_mdata_buffer(drvdata)) + return 0; + + tmc_crashdata_set_invalid(drvdata); + + CS_UNLOCK(drvdata->base); + + /* Proceed only if ETF is enabled or configured as sink */ + val = readl(drvdata->base + TMC_CTL); + if (!(val & TMC_CTL_CAPT_EN)) + goto out; + val = readl(drvdata->base + TMC_MODE); + if (val != TMC_MODE_CIRCULAR_BUFFER) + goto out; + + val = readl(drvdata->base + TMC_FFSR); + /* Do manual flush and stop only if its not auto-stopped */ + if (!(val & TMC_FFSR_FT_STOPPED)) { + dev_dbg(&csdev->dev, + "%s: Triggering manual flush\n", __func__); + tmc_flush_and_stop(drvdata); + } else + tmc_wait_for_tmcready(drvdata); + + /* Sync registers from hardware to metadata region */ + mdata->tmc_sts = readl(drvdata->base + TMC_STS); + mdata->tmc_mode = readl(drvdata->base + TMC_MODE); + mdata->tmc_ffcr = readl(drvdata->base + TMC_FFCR); + mdata->tmc_ffsr = readl(drvdata->base + TMC_FFSR); + + /* Sync Internal SRAM to reserved trace buffer region */ + drvdata->buf = drvdata->resrv_buf.vaddr; + tmc_etb_dump_hw(drvdata); + /* Store as per RSZ register convention */ + mdata->tmc_ram_size = drvdata->len >> 2; + + /* Other fields for processing trace buffer reads */ + mdata->tmc_rrp = 0; + mdata->tmc_dba = 0; + mdata->tmc_rwp = drvdata->len; + mdata->trace_paddr = drvdata->resrv_buf.paddr; + + mdata->version = CS_CRASHDATA_VERSION; + + /* + * Make sure all previous writes are ordered, + * before we mark valid + */ + dmb(sy); + mdata->valid = true; + /* + * Below order need to maintained, since crc of metadata + * is dependent on first + */ + mdata->crc32_tdata = find_crash_tracedata_crc(drvdata, mdata); + mdata->crc32_mdata = find_crash_metadata_crc(mdata); + + tmc_disable_hw(drvdata); + + dev_dbg(&csdev->dev, "%s: success\n", __func__); +out: + CS_UNLOCK(drvdata->base); + return 0; +} + static const struct coresight_ops_sink tmc_etf_sink_ops = { .enable = tmc_enable_etf_sink, .disable = tmc_disable_etf_sink, @@ -603,6 +680,10 @@ static const struct coresight_ops_link tmc_etf_link_ops = { .disable = tmc_disable_etf_link, }; +static const struct coresight_ops_panic tmc_etf_sync_ops = { + .sync = tmc_panic_sync_etf, +}; + const struct coresight_ops tmc_etb_cs_ops = { .sink_ops = &tmc_etf_sink_ops, }; @@ -610,6 +691,7 @@ const struct coresight_ops tmc_etb_cs_ops = { const struct coresight_ops tmc_etf_cs_ops = { .sink_ops = &tmc_etf_sink_ops, .link_ops = &tmc_etf_link_ops, + .panic_ops = &tmc_etf_sync_ops, }; int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) @@ -623,7 +705,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) drvdata->config_type != TMC_CONFIG_TYPE_ETF)) return -EINVAL; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); if (drvdata->reading) { ret = -EBUSY; @@ -655,7 +737,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata) drvdata->reading = true; out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return ret; } @@ -672,14 +754,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) drvdata->config_type != TMC_CONFIG_TYPE_ETF)) return -EINVAL; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); /* Re-enable the TMC if need be */ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) { /* There is no point in reading a TMC in HW FIFO mode */ mode = readl_relaxed(drvdata->base + TMC_MODE); if (mode != TMC_MODE_CIRCULAR_BUFFER) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return -EINVAL; } /* @@ -693,7 +775,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) memset(drvdata->buf, 0, drvdata->size); rc = __tmc_etb_enable_hw(drvdata); if (rc) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return rc; } } else { @@ -706,7 +788,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) } drvdata->reading = false; - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); /* * Free allocated memory outside of the spinlock. There is no need diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index e75428fa1592..76a8cb29b68a 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -30,13 +30,15 @@ struct etr_buf_hw { bool has_iommu; bool has_etr_sg; bool has_catu; + bool has_resrv; }; /* * etr_perf_buffer - Perf buffer used for ETR * @drvdata - The ETR drvdaga this buffer has been allocated for. * @etr_buf - Actual buffer used by the ETR - * @pid - The PID this etr_perf_buffer belongs to. + * @pid - The PID of the session owner that etr_perf_buffer + * belongs to. * @snaphost - Perf session mode * @nr_pages - Number of pages in the ring buffer. * @pages - Array of Pages in the ring buffer. @@ -261,6 +263,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table) { tmc_free_table_pages(sg_table); tmc_free_data_pages(sg_table); + kfree(sg_table); } EXPORT_SYMBOL_GPL(tmc_free_sg_table); @@ -342,7 +345,6 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev, rc = tmc_alloc_table_pages(sg_table); if (rc) { tmc_free_sg_table(sg_table); - kfree(sg_table); return ERR_PTR(rc); } @@ -695,6 +697,75 @@ static const struct etr_buf_operations etr_flat_buf_ops = { }; /* + * tmc_etr_alloc_resrv_buf: Allocate a contiguous DMA buffer from reserved region. + */ +static int tmc_etr_alloc_resrv_buf(struct tmc_drvdata *drvdata, + struct etr_buf *etr_buf, int node, + void **pages) +{ + struct etr_flat_buf *resrv_buf; + struct device *real_dev = drvdata->csdev->dev.parent; + + /* We cannot reuse existing pages for resrv buf */ + if (pages) + return -EINVAL; + + resrv_buf = kzalloc(sizeof(*resrv_buf), GFP_KERNEL); + if (!resrv_buf) + return -ENOMEM; + + resrv_buf->daddr = dma_map_resource(real_dev, drvdata->resrv_buf.paddr, + drvdata->resrv_buf.size, + DMA_FROM_DEVICE, 0); + if (dma_mapping_error(real_dev, resrv_buf->daddr)) { + dev_err(real_dev, "failed to map source buffer address\n"); + kfree(resrv_buf); + return -ENOMEM; + } + + resrv_buf->vaddr = drvdata->resrv_buf.vaddr; + resrv_buf->size = etr_buf->size = drvdata->resrv_buf.size; + resrv_buf->dev = &drvdata->csdev->dev; + etr_buf->hwaddr = resrv_buf->daddr; + etr_buf->mode = ETR_MODE_RESRV; + etr_buf->private = resrv_buf; + return 0; +} + +static void tmc_etr_free_resrv_buf(struct etr_buf *etr_buf) +{ + struct etr_flat_buf *resrv_buf = etr_buf->private; + + if (resrv_buf && resrv_buf->daddr) { + struct device *real_dev = resrv_buf->dev->parent; + + dma_unmap_resource(real_dev, resrv_buf->daddr, + resrv_buf->size, DMA_FROM_DEVICE, 0); + } + kfree(resrv_buf); +} + +static void tmc_etr_sync_resrv_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp) +{ + /* + * Adjust the buffer to point to the beginning of the trace data + * and update the available trace data. + */ + etr_buf->offset = rrp - etr_buf->hwaddr; + if (etr_buf->full) + etr_buf->len = etr_buf->size; + else + etr_buf->len = rwp - rrp; +} + +static const struct etr_buf_operations etr_resrv_buf_ops = { + .alloc = tmc_etr_alloc_resrv_buf, + .free = tmc_etr_free_resrv_buf, + .sync = tmc_etr_sync_resrv_buf, + .get_data = tmc_etr_get_data_flat_buf, +}; + +/* * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters * appropriately. */ @@ -800,6 +871,7 @@ static const struct etr_buf_operations *etr_buf_ops[] = { [ETR_MODE_FLAT] = &etr_flat_buf_ops, [ETR_MODE_ETR_SG] = &etr_sg_buf_ops, [ETR_MODE_CATU] = NULL, + [ETR_MODE_RESRV] = &etr_resrv_buf_ops }; void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu) @@ -825,6 +897,7 @@ static inline int tmc_etr_mode_alloc_buf(int mode, case ETR_MODE_FLAT: case ETR_MODE_ETR_SG: case ETR_MODE_CATU: + case ETR_MODE_RESRV: if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc) rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf, node, pages); @@ -843,6 +916,7 @@ static void get_etr_buf_hw(struct device *dev, struct etr_buf_hw *buf_hw) buf_hw->has_iommu = iommu_get_domain_for_dev(dev->parent); buf_hw->has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG); buf_hw->has_catu = !!tmc_etr_get_catu_device(drvdata); + buf_hw->has_resrv = tmc_has_reserved_buffer(drvdata); } static bool etr_can_use_flat_mode(struct etr_buf_hw *buf_hw, ssize_t etr_buf_size) @@ -986,7 +1060,7 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata) static int __tmc_etr_enable_hw(struct tmc_drvdata *drvdata) { - u32 axictl, sts; + u32 axictl, sts, ffcr; struct etr_buf *etr_buf = drvdata->etr_buf; int rc = 0; @@ -1032,10 +1106,12 @@ static int __tmc_etr_enable_hw(struct tmc_drvdata *drvdata) writel_relaxed(sts, drvdata->base + TMC_STS); } - writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | - TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT | - TMC_FFCR_TRIGON_TRIGIN, - drvdata->base + TMC_FFCR); + ffcr = TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | TMC_FFCR_FON_FLIN | + TMC_FFCR_FON_TRIG_EVT | TMC_FFCR_TRIGON_TRIGIN; + if (drvdata->stop_on_flush) + ffcr |= TMC_FFCR_STOP_ON_FLUSH; + writel_relaxed(ffcr, drvdata->base + TMC_FFCR); + writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG); tmc_enable_hw(drvdata); @@ -1175,10 +1251,10 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev) * buffer, provided the size matches. Any allocation has to be done * with the lock released. */ - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); sysfs_buf = READ_ONCE(drvdata->sysfs_buf); if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); /* Allocate memory with the locks released */ free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata); @@ -1186,7 +1262,7 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev) return new_buf; /* Let's try again */ - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); } if (drvdata->reading || coresight_get_mode(csdev) == CS_MODE_PERF) { @@ -1205,7 +1281,7 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev) } out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); /* Free memory outside the spinlock if need be */ if (free_buf) @@ -1223,7 +1299,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) if (IS_ERR(sysfs_buf)) return PTR_ERR(sysfs_buf); - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); /* * In sysFS mode we can have multiple writers per sink. Since this @@ -1242,7 +1318,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) } out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); if (!ret) dev_dbg(&csdev->dev, "TMC-ETR enabled\n"); @@ -1561,17 +1637,17 @@ tmc_update_etr_buffer(struct coresight_device *csdev, struct etr_perf_buffer *etr_perf = config; struct etr_buf *etr_buf = etr_perf->etr_buf; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); /* Don't do anything if another tracer is using this sink */ if (csdev->refcnt != 1) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); goto out; } if (WARN_ON(drvdata->perf_buf != etr_buf)) { lost = true; - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); goto out; } @@ -1581,7 +1657,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev, tmc_sync_etr_buf(drvdata); CS_LOCK(drvdata->base); - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); lost = etr_buf->full; offset = etr_buf->offset; @@ -1650,7 +1726,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data) struct perf_output_handle *handle = data; struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle); - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); /* Don't use this sink if it is already claimed by sysFS */ if (coresight_get_mode(csdev) == CS_MODE_SYSFS) { rc = -EBUSY; @@ -1662,7 +1738,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data) goto unlock_out; } - /* Get a handle on the pid of the process to monitor */ + /* Get a handle on the pid of the session owner */ pid = etr_perf->pid; /* Do not proceed if this device is associated with another session */ @@ -1690,7 +1766,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data) } unlock_out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return rc; } @@ -1712,16 +1788,16 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev) unsigned long flags; struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); if (drvdata->reading) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return -EBUSY; } csdev->refcnt--; if (csdev->refcnt) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return -EBUSY; } @@ -1734,12 +1810,80 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev) /* Reset perf specific data */ drvdata->perf_buf = NULL; - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); dev_dbg(&csdev->dev, "TMC-ETR disabled\n"); return 0; } +static int tmc_panic_sync_etr(struct coresight_device *csdev) +{ + u32 val; + struct tmc_crash_metadata *mdata; + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + + mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr; + + if (!drvdata->etr_buf) + return 0; + + /* Being in RESRV mode implies valid reserved memory as well */ + if (drvdata->etr_buf->mode != ETR_MODE_RESRV) + return 0; + + if (!tmc_has_crash_mdata_buffer(drvdata)) + return 0; + + CS_UNLOCK(drvdata->base); + + /* Proceed only if ETR is enabled */ + val = readl(drvdata->base + TMC_CTL); + if (!(val & TMC_CTL_CAPT_EN)) + goto out; + + val = readl(drvdata->base + TMC_FFSR); + /* Do manual flush and stop only if its not auto-stopped */ + if (!(val & TMC_FFSR_FT_STOPPED)) { + dev_dbg(&csdev->dev, + "%s: Triggering manual flush\n", __func__); + tmc_flush_and_stop(drvdata); + } else + tmc_wait_for_tmcready(drvdata); + + /* Sync registers from hardware to metadata region */ + mdata->tmc_ram_size = readl(drvdata->base + TMC_RSZ); + mdata->tmc_sts = readl(drvdata->base + TMC_STS); + mdata->tmc_mode = readl(drvdata->base + TMC_MODE); + mdata->tmc_ffcr = readl(drvdata->base + TMC_FFCR); + mdata->tmc_ffsr = readl(drvdata->base + TMC_FFSR); + mdata->tmc_rrp = tmc_read_rrp(drvdata); + mdata->tmc_rwp = tmc_read_rwp(drvdata); + mdata->tmc_dba = tmc_read_dba(drvdata); + mdata->trace_paddr = drvdata->resrv_buf.paddr; + mdata->version = CS_CRASHDATA_VERSION; + + /* + * Make sure all previous writes are ordered, + * before we mark valid + */ + dmb(sy); + mdata->valid = true; + /* + * Below order need to maintained, since crc of metadata + * is dependent on first + */ + mdata->crc32_tdata = find_crash_tracedata_crc(drvdata, mdata); + mdata->crc32_mdata = find_crash_metadata_crc(mdata); + + tmc_disable_hw(drvdata); + + dev_dbg(&csdev->dev, "%s: success\n", __func__); +out: + CS_UNLOCK(drvdata->base); + + return 0; +} + static const struct coresight_ops_sink tmc_etr_sink_ops = { .enable = tmc_enable_etr_sink, .disable = tmc_disable_etr_sink, @@ -1748,8 +1892,13 @@ static const struct coresight_ops_sink tmc_etr_sink_ops = { .free_buffer = tmc_free_etr_buffer, }; +static const struct coresight_ops_panic tmc_etr_sync_ops = { + .sync = tmc_panic_sync_etr, +}; + const struct coresight_ops tmc_etr_cs_ops = { .sink_ops = &tmc_etr_sink_ops, + .panic_ops = &tmc_etr_sync_ops, }; int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) @@ -1761,7 +1910,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) return -EINVAL; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); if (drvdata->reading) { ret = -EBUSY; goto out; @@ -1783,7 +1932,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) drvdata->reading = true; out: - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); return ret; } @@ -1797,7 +1946,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) return -EINVAL; - spin_lock_irqsave(&drvdata->spinlock, flags); + raw_spin_lock_irqsave(&drvdata->spinlock, flags); /* RE-enable the TMC if need be */ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) { @@ -1817,7 +1966,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) } drvdata->reading = false; - spin_unlock_irqrestore(&drvdata->spinlock, flags); + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); /* Free allocated memory out side of the spinlock */ if (sysfs_buf) @@ -1830,6 +1979,7 @@ static const char *const buf_modes_str[] = { [ETR_MODE_FLAT] = "flat", [ETR_MODE_ETR_SG] = "tmc-sg", [ETR_MODE_CATU] = "catu", + [ETR_MODE_RESRV] = "resrv", [ETR_MODE_AUTO] = "auto", }; @@ -1848,6 +1998,9 @@ static ssize_t buf_modes_available_show(struct device *dev, if (buf_hw.has_catu) size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_CATU]); + if (buf_hw.has_resrv) + size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_RESRV]); + size += sysfs_emit_at(buf, size, "\n"); return size; } @@ -1861,6 +2014,26 @@ static ssize_t buf_mode_preferred_show(struct device *dev, return sysfs_emit(buf, "%s\n", buf_modes_str[drvdata->etr_mode]); } +static int buf_mode_set_resrv(struct tmc_drvdata *drvdata) +{ + int err = -EBUSY; + unsigned long flags; + struct tmc_resrv_buf *rbuf; + + rbuf = &drvdata->resrv_buf; + + /* Ensure there are no active crashdata read sessions */ + raw_spin_lock_irqsave(&drvdata->spinlock, flags); + if (!rbuf->reading) { + tmc_crashdata_set_invalid(drvdata); + rbuf->len = 0; + drvdata->etr_mode = ETR_MODE_RESRV; + err = 0; + } + raw_spin_unlock_irqrestore(&drvdata->spinlock, flags); + return err; +} + static ssize_t buf_mode_preferred_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) @@ -1875,6 +2048,8 @@ static ssize_t buf_mode_preferred_store(struct device *dev, drvdata->etr_mode = ETR_MODE_ETR_SG; else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_CATU]) && buf_hw.has_catu) drvdata->etr_mode = ETR_MODE_CATU; + else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_RESRV]) && buf_hw.has_resrv) + return buf_mode_set_resrv(drvdata) ? : size; else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_AUTO])) drvdata->etr_mode = ETR_MODE_AUTO; else diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h index cef979c897e6..6541a27a018e 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.h +++ b/drivers/hwtracing/coresight/coresight-tmc.h @@ -12,6 +12,7 @@ #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/refcount.h> +#include <linux/crc32.h> #define TMC_RSZ 0x004 #define TMC_STS 0x00c @@ -76,6 +77,9 @@ #define TMC_AXICTL_AXCACHE_OS (0xf << 2) #define TMC_AXICTL_ARCACHE_OS (0xf << 16) +/* TMC_FFSR - 0x300 */ +#define TMC_FFSR_FT_STOPPED BIT(1) + /* TMC_FFCR - 0x304 */ #define TMC_FFCR_FLUSHMAN_BIT 6 #define TMC_FFCR_EN_FMT BIT(0) @@ -94,6 +98,9 @@ #define TMC_AUTH_NSID_MASK GENMASK(1, 0) +/* Major version 1 Minor version 0 */ +#define CS_CRASHDATA_VERSION (1 << 16) + enum tmc_config_type { TMC_CONFIG_TYPE_ETB, TMC_CONFIG_TYPE_ETR, @@ -131,10 +138,30 @@ enum tmc_mem_intf_width { #define CORESIGHT_SOC_600_ETR_CAPS \ (TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE) +/* TMC metadata region for ETR and ETF configurations */ +struct tmc_crash_metadata { + uint32_t crc32_mdata; /* crc of metadata */ + uint32_t crc32_tdata; /* crc of tracedata */ + uint32_t version; /* 31:16 Major version, 15:0 Minor version */ + uint32_t valid; /* Indicate if this ETF/ETR was enabled */ + uint32_t tmc_ram_size; /* Ram Size register */ + uint32_t tmc_sts; /* Status register */ + uint32_t tmc_mode; /* Mode register */ + uint32_t tmc_ffcr; /* Formatter and flush control register */ + uint32_t tmc_ffsr; /* Formatter and flush status register */ + uint32_t reserved32; + uint64_t tmc_rrp; /* Ram Read pointer register */ + uint64_t tmc_rwp; /* Ram Write pointer register */ + uint64_t tmc_dba; /* Data buffer address register */ + uint64_t trace_paddr; /* Phys address of trace buffer */ + uint64_t reserved64[3]; +}; + enum etr_mode { ETR_MODE_FLAT, /* Uses contiguous flat buffer */ ETR_MODE_ETR_SG, /* Uses in-built TMC ETR SG mechanism */ ETR_MODE_CATU, /* Use SG mechanism in CATU */ + ETR_MODE_RESRV, /* Use reserved region contiguous buffer */ ETR_MODE_AUTO, /* Use the default mechanism */ }; @@ -165,13 +192,35 @@ struct etr_buf { }; /** + * @paddr : Start address of reserved memory region. + * @vaddr : Corresponding CPU virtual address. + * @size : Size of reserved memory region. + * @offset : Offset of the trace data in the buffer for consumption. + * @reading : Flag to indicate if reading is active + * @len : Available trace data @buf (may round up to the beginning). + */ +struct tmc_resrv_buf { + phys_addr_t paddr; + void *vaddr; + size_t size; + unsigned long offset; + bool reading; + s64 len; +}; + +/** * struct tmc_drvdata - specifics associated to an TMC component + * @pclk: APB clock if present, otherwise NULL * @base: memory mapped base address for this component. * @csdev: component vitals needed by the framework. * @miscdev: specifics to handle "/dev/xyz.tmc" entry. + * @crashdev: specifics to handle "/dev/crash_tmc_xyz" entry for reading + * crash tracedata. * @spinlock: only one at a time pls. - * @pid: Process ID of the process being monitored by the session - * that is using this component. + * @pid: Process ID of the process that owns the session that is using + * this component. For example this would be the pid of the Perf + * process. + * @stop_on_flush: Stop on flush trigger user configuration. * @buf: Snapshot of the trace data for ETF/ETB. * @etr_buf: details of buffer used in TMC-ETR * @len: size of the available trace for ETF/ETB. @@ -187,14 +236,23 @@ struct etr_buf { * @idr_mutex: Access serialisation for idr. * @sysfs_buf: SYSFS buffer for ETR. * @perf_buf: PERF buffer for ETR. + * @resrv_buf: Used by ETR as hardware trace buffer and for trace data + * retention (after crash) only when ETR_MODE_RESRV buffer + * mode is enabled. Used by ETF for trace data retention + * (after crash) by default. + * @crash_mdata: Reserved memory for storing tmc crash metadata. + * Used by ETR/ETF. */ struct tmc_drvdata { + struct clk *pclk; void __iomem *base; struct coresight_device *csdev; struct miscdevice miscdev; - spinlock_t spinlock; + struct miscdevice crashdev; + raw_spinlock_t spinlock; pid_t pid; bool reading; + bool stop_on_flush; union { char *buf; /* TMC ETB */ struct etr_buf *etr_buf; /* TMC ETR */ @@ -211,6 +269,8 @@ struct tmc_drvdata { struct mutex idr_mutex; struct etr_buf *sysfs_buf; struct etr_buf *perf_buf; + struct tmc_resrv_buf resrv_buf; + struct tmc_resrv_buf crash_mdata; }; struct etr_buf_operations { @@ -260,6 +320,7 @@ void tmc_flush_and_stop(struct tmc_drvdata *drvdata); void tmc_enable_hw(struct tmc_drvdata *drvdata); void tmc_disable_hw(struct tmc_drvdata *drvdata); u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata); +int tmc_read_prepare_crashdata(struct tmc_drvdata *drvdata); /* ETB/ETF functions */ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata); @@ -322,12 +383,58 @@ void tmc_sg_table_sync_data_range(struct tmc_sg_table *table, u64 offset, u64 size); ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table, u64 offset, size_t len, char **bufpp); + static inline unsigned long tmc_sg_table_buf_size(struct tmc_sg_table *sg_table) { return (unsigned long)sg_table->data_pages.nr_pages << PAGE_SHIFT; } +static inline bool tmc_has_reserved_buffer(struct tmc_drvdata *drvdata) +{ + if (drvdata->resrv_buf.vaddr && + drvdata->resrv_buf.size) + return true; + return false; +} + +static inline bool tmc_has_crash_mdata_buffer(struct tmc_drvdata *drvdata) +{ + if (drvdata->crash_mdata.vaddr && + drvdata->crash_mdata.size) + return true; + return false; +} + +static inline void tmc_crashdata_set_invalid(struct tmc_drvdata *drvdata) +{ + struct tmc_crash_metadata *mdata; + + mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr; + + if (tmc_has_crash_mdata_buffer(drvdata)) + mdata->valid = false; +} + +static inline uint32_t find_crash_metadata_crc(struct tmc_crash_metadata *md) +{ + unsigned long crc_size; + + crc_size = sizeof(struct tmc_crash_metadata) - + offsetof(struct tmc_crash_metadata, crc32_tdata); + return crc32_le(0, (void *)&md->crc32_tdata, crc_size); +} + +static inline uint32_t find_crash_tracedata_crc(struct tmc_drvdata *drvdata, + struct tmc_crash_metadata *md) +{ + unsigned long crc_size; + + /* Take CRC of configured buffer size to keep it simple */ + crc_size = md->tmc_ram_size << 2; + return crc32_le(0, (void *)drvdata->resrv_buf.vaddr, crc_size); +} + struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata); void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu); diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c index 7739bc7adc44..0633f04beb24 100644 --- a/drivers/hwtracing/coresight/coresight-tpda.c +++ b/drivers/hwtracing/coresight/coresight-tpda.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/amba/bus.h> @@ -24,7 +24,7 @@ DEFINE_CORESIGHT_DEVLIST(tpda_devs, "tpda"); static bool coresight_device_is_tpdm(struct coresight_device *csdev) { - return (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) && + return (coresight_is_device_source(csdev)) && (csdev->subtype.source_subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM); } @@ -68,11 +68,12 @@ static int tpdm_read_element_size(struct tpda_drvdata *drvdata, int rc = -EINVAL; struct tpdm_drvdata *tpdm_data = dev_get_drvdata(csdev->dev.parent); - if (tpdm_has_dsb_dataset(tpdm_data)) { + if (tpdm_data->dsb) { rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent), "qcom,dsb-element-bits", &drvdata->dsb_esize); } - if (tpdm_has_cmb_dataset(tpdm_data)) { + + if (tpdm_data->cmb) { rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent), "qcom,cmb-element-bits", &drvdata->cmb_esize); } @@ -110,6 +111,16 @@ static int tpda_get_element_size(struct tpda_drvdata *drvdata, csdev->pdata->in_conns[i]->dest_port != inport) continue; + /* + * If this port has a hardcoded filter, use the source + * device directly. + */ + if (csdev->pdata->in_conns[i]->filter_src_fwnode) { + in = csdev->pdata->in_conns[i]->filter_src_dev; + if (!in) + continue; + } + if (coresight_device_is_tpdm(in)) { if (drvdata->dsb_esize || drvdata->cmb_esize) return -EEXIST; @@ -124,7 +135,6 @@ static int tpda_get_element_size(struct tpda_drvdata *drvdata, } } - return rc; } @@ -190,10 +200,10 @@ static int tpda_enable(struct coresight_device *csdev, int ret = 0; spin_lock(&drvdata->spinlock); - if (atomic_read(&in->dest_refcnt) == 0) { + if (in->dest_refcnt == 0) { ret = __tpda_enable(drvdata, in->dest_port); if (!ret) { - atomic_inc(&in->dest_refcnt); + in->dest_refcnt++; csdev->refcnt++; dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port); } @@ -223,7 +233,7 @@ static void tpda_disable(struct coresight_device *csdev, struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); spin_lock(&drvdata->spinlock); - if (atomic_dec_return(&in->dest_refcnt) == 0) { + if (--in->dest_refcnt == 0) { __tpda_disable(drvdata, in->dest_port); csdev->refcnt--; } @@ -232,12 +242,23 @@ static void tpda_disable(struct coresight_device *csdev, dev_dbg(drvdata->dev, "TPDA inport %d disabled\n", in->dest_port); } +static int tpda_trace_id(struct coresight_device *csdev, __maybe_unused enum cs_mode mode, + __maybe_unused struct coresight_device *sink) +{ + struct tpda_drvdata *drvdata; + + drvdata = dev_get_drvdata(csdev->dev.parent); + + return drvdata->atid; +} + static const struct coresight_ops_link tpda_link_ops = { .enable = tpda_enable, .disable = tpda_disable, }; static const struct coresight_ops tpda_cs_ops = { + .trace_id = tpda_trace_id, .link_ops = &tpda_link_ops, }; @@ -322,7 +343,7 @@ static void tpda_remove(struct amba_device *adev) * Different TPDA has different periph id. * The difference is 0-7 bits' value. So ignore 0-7 bits. */ -static struct amba_id tpda_ids[] = { +static const struct amba_id tpda_ids[] = { { .id = 0x000f0f00, .mask = 0x000fff00, @@ -333,7 +354,6 @@ static struct amba_id tpda_ids[] = { static struct amba_driver tpda_driver = { .drv = { .name = "coresight-tpda", - .owner = THIS_MODULE, .suppress_bind_attrs = true, }, .probe = tpda_probe, diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c index a9708ab0d488..7214e65097ec 100644 --- a/drivers/hwtracing/coresight/coresight-tpdm.c +++ b/drivers/hwtracing/coresight/coresight-tpdm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/amba/bus.h> @@ -21,6 +21,21 @@ DEFINE_CORESIGHT_DEVLIST(tpdm_devs, "tpdm"); +static bool tpdm_has_dsb_dataset(struct tpdm_drvdata *drvdata) +{ + return (drvdata->datasets & TPDM_PIDR0_DS_DSB); +} + +static bool tpdm_has_cmb_dataset(struct tpdm_drvdata *drvdata) +{ + return (drvdata->datasets & TPDM_PIDR0_DS_CMB); +} + +static bool tpdm_has_mcmb_dataset(struct tpdm_drvdata *drvdata) +{ + return (drvdata->datasets & TPDM_PIDR0_DS_MCMB); +} + /* Read dataset array member with the index number */ static ssize_t tpdm_simple_dataset_show(struct device *dev, struct device_attribute *attr, @@ -198,7 +213,7 @@ static umode_t tpdm_cmb_is_visible(struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); - if (drvdata && tpdm_has_cmb_dataset(drvdata)) + if (drvdata && drvdata->cmb) return attr->mode; return 0; @@ -237,6 +252,18 @@ static umode_t tpdm_cmb_msr_is_visible(struct kobject *kobj, return 0; } +static umode_t tpdm_mcmb_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (drvdata && tpdm_has_mcmb_dataset(drvdata)) + return attr->mode; + + return 0; +} + static void tpdm_reset_datasets(struct tpdm_drvdata *drvdata) { if (tpdm_has_dsb_dataset(drvdata)) { @@ -388,7 +415,7 @@ static void tpdm_enable_cmb(struct tpdm_drvdata *drvdata) { u32 val, i; - if (!tpdm_has_cmb_dataset(drvdata)) + if (!drvdata->cmb) return; /* Configure pattern registers */ @@ -415,6 +442,19 @@ static void tpdm_enable_cmb(struct tpdm_drvdata *drvdata) val |= TPDM_CMB_CR_MODE; else val &= ~TPDM_CMB_CR_MODE; + + if (tpdm_has_mcmb_dataset(drvdata)) { + val &= ~TPDM_CMB_CR_XTRIG_LNSEL; + /* Set the lane participates in the output pattern */ + val |= FIELD_PREP(TPDM_CMB_CR_XTRIG_LNSEL, + drvdata->cmb->mcmb.trig_lane); + + /* Set the enablement of the lane */ + val &= ~TPDM_CMB_CR_E_LN; + val |= FIELD_PREP(TPDM_CMB_CR_E_LN, + drvdata->cmb->mcmb.lane_select); + } + /* Set the enable bit of CMB control register to 1 */ val |= TPDM_CMB_CR_ENA; writel_relaxed(val, drvdata->base + TPDM_CMB_CR); @@ -439,7 +479,8 @@ static void __tpdm_enable(struct tpdm_drvdata *drvdata) } static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event, - enum cs_mode mode) + enum cs_mode mode, + __maybe_unused struct coresight_path *path) { struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); @@ -449,6 +490,11 @@ static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event, return -EBUSY; } + if (!coresight_take_mode(csdev, mode)) { + spin_unlock(&drvdata->spinlock); + return -EBUSY; + } + __tpdm_enable(drvdata); drvdata->enable = true; spin_unlock(&drvdata->spinlock); @@ -474,7 +520,7 @@ static void tpdm_disable_cmb(struct tpdm_drvdata *drvdata) { u32 val; - if (!tpdm_has_cmb_dataset(drvdata)) + if (!drvdata->cmb) return; val = readl_relaxed(drvdata->base + TPDM_CMB_CR); @@ -506,6 +552,7 @@ static void tpdm_disable(struct coresight_device *csdev, } __tpdm_disable(drvdata); + coresight_set_mode(csdev, CS_MODE_DISABLED); drvdata->enable = false; spin_unlock(&drvdata->spinlock); @@ -535,12 +582,14 @@ static int tpdm_datasets_setup(struct tpdm_drvdata *drvdata) if (!drvdata->dsb) return -ENOMEM; } - if (tpdm_has_cmb_dataset(drvdata) && (!drvdata->cmb)) { + if ((tpdm_has_cmb_dataset(drvdata) || tpdm_has_mcmb_dataset(drvdata)) + && (!drvdata->cmb)) { drvdata->cmb = devm_kzalloc(drvdata->dev, sizeof(*drvdata->cmb), GFP_KERNEL); if (!drvdata->cmb) return -ENOMEM; } + tpdm_reset_datasets(drvdata); return 0; @@ -633,8 +682,7 @@ static ssize_t dsb_mode_store(struct device *dev, struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); unsigned long val; - if ((kstrtoul(buf, 0, &val)) || (val < 0) || - (val & ~TPDM_DSB_MODE_MASK)) + if ((kstrtoul(buf, 0, &val)) || (val & ~TPDM_DSB_MODE_MASK)) return -EINVAL; spin_lock(&drvdata->spinlock); @@ -984,6 +1032,62 @@ static ssize_t cmb_trig_ts_store(struct device *dev, } static DEVICE_ATTR_RW(cmb_trig_ts); +static ssize_t mcmb_trig_lane_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return sysfs_emit(buf, "%u\n", + (unsigned int)drvdata->cmb->mcmb.trig_lane); +} + +static ssize_t mcmb_trig_lane_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + + if ((kstrtoul(buf, 0, &val)) || (val >= TPDM_MCMB_MAX_LANES)) + return -EINVAL; + + guard(spinlock)(&drvdata->spinlock); + drvdata->cmb->mcmb.trig_lane = val; + + return size; +} +static DEVICE_ATTR_RW(mcmb_trig_lane); + +static ssize_t mcmb_lanes_select_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return sysfs_emit(buf, "%u\n", + (unsigned int)drvdata->cmb->mcmb.lane_select); +} + +static ssize_t mcmb_lanes_select_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + + if (kstrtoul(buf, 0, &val) || (val & ~TPDM_MCMB_E_LN_MASK)) + return -EINVAL; + + guard(spinlock)(&drvdata->spinlock); + drvdata->cmb->mcmb.lane_select = val & TPDM_MCMB_E_LN_MASK; + + return size; +} +static DEVICE_ATTR_RW(mcmb_lanes_select); + static struct attribute *tpdm_dsb_edge_attrs[] = { &dev_attr_ctrl_idx.attr, &dev_attr_ctrl_val.attr, @@ -1146,6 +1250,12 @@ static struct attribute *tpdm_cmb_msr_attrs[] = { NULL, }; +static struct attribute *tpdm_mcmb_attrs[] = { + &dev_attr_mcmb_trig_lane.attr, + &dev_attr_mcmb_lanes_select.attr, + NULL, +}; + static struct attribute *tpdm_dsb_attrs[] = { &dev_attr_dsb_mode.attr, &dev_attr_dsb_trig_ts.attr, @@ -1212,6 +1322,11 @@ static struct attribute_group tpdm_cmb_msr_grp = { .name = "cmb_msr", }; +static struct attribute_group tpdm_mcmb_attr_grp = { + .attrs = tpdm_mcmb_attrs, + .is_visible = tpdm_mcmb_is_visible, +}; + static const struct attribute_group *tpdm_attr_grps[] = { &tpdm_attr_grp, &tpdm_dsb_attr_grp, @@ -1223,6 +1338,7 @@ static const struct attribute_group *tpdm_attr_grps[] = { &tpdm_cmb_trig_patt_grp, &tpdm_cmb_patt_grp, &tpdm_cmb_msr_grp, + &tpdm_mcmb_attr_grp, NULL, }; @@ -1299,10 +1415,10 @@ static void tpdm_remove(struct amba_device *adev) * Different TPDM has different periph id. * The difference is 0-7 bits' value. So ignore 0-7 bits. */ -static struct amba_id tpdm_ids[] = { +static const struct amba_id tpdm_ids[] = { { - .id = 0x000f0e00, - .mask = 0x000fff00, + .id = 0x001f0e00, + .mask = 0x00ffff00, }, { 0, 0, NULL }, }; @@ -1310,7 +1426,6 @@ static struct amba_id tpdm_ids[] = { static struct amba_driver tpdm_driver = { .drv = { .name = "coresight-tpdm", - .owner = THIS_MODULE, .suppress_bind_attrs = true, }, .probe = tpdm_probe, diff --git a/drivers/hwtracing/coresight/coresight-tpdm.h b/drivers/hwtracing/coresight/coresight-tpdm.h index e08d212642e3..b11754389734 100644 --- a/drivers/hwtracing/coresight/coresight-tpdm.h +++ b/drivers/hwtracing/coresight/coresight-tpdm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _CORESIGHT_CORESIGHT_TPDM_H @@ -9,7 +9,7 @@ /* The max number of the datasets that TPDM supports */ #define TPDM_DATASETS 7 -/* CMB Subunit Registers */ +/* CMB/MCMB Subunit Registers */ #define TPDM_CMB_CR (0xA00) /* CMB subunit timestamp insertion enable register */ #define TPDM_CMB_TIER (0xA04) @@ -28,6 +28,10 @@ #define TPDM_CMB_CR_ENA BIT(0) /* Trace collection mode for CMB subunit */ #define TPDM_CMB_CR_MODE BIT(1) +/* MCMB trigger lane select */ +#define TPDM_CMB_CR_XTRIG_LNSEL GENMASK(20, 18) +/* MCMB lane enablement */ +#define TPDM_CMB_CR_E_LN GENMASK(17, 10) /* Timestamp control for pattern match */ #define TPDM_CMB_TIER_PATT_TSENAB BIT(0) /* CMB CTI timestamp request */ @@ -41,6 +45,12 @@ /* MAX number of DSB MSR */ #define TPDM_CMB_MAX_MSR 32 +/* MAX lanes in the output pattern for MCMB configurations*/ +#define TPDM_MCMB_MAX_LANES 8 + +/* Filter bit 0~7 from the value for CR_E_LN */ +#define TPDM_MCMB_E_LN_MASK GENMASK(7, 0) + /* DSB Subunit Registers */ #define TPDM_DSB_CR (0x780) #define TPDM_DSB_TIER (0x784) @@ -112,11 +122,13 @@ * PERIPHIDR0[0] : Fix to 1 if ImplDef subunit present, else 0 * PERIPHIDR0[1] : Fix to 1 if DSB subunit present, else 0 * PERIPHIDR0[2] : Fix to 1 if CMB subunit present, else 0 + * PERIPHIDR0[6] : Fix to 1 if MCMB subunit present, else 0 */ #define TPDM_PIDR0_DS_IMPDEF BIT(0) #define TPDM_PIDR0_DS_DSB BIT(1) #define TPDM_PIDR0_DS_CMB BIT(2) +#define TPDM_PIDR0_DS_MCMB BIT(6) #define TPDM_DSB_MAX_LINES 256 /* MAX number of EDCR registers */ @@ -256,6 +268,9 @@ struct dsb_dataset { * @patt_ts: Indicates if pattern match for timestamp is enabled. * @trig_ts: Indicates if CTI trigger for timestamp is enabled. * @ts_all: Indicates if timestamp is enabled for all packets. + * struct mcmb_dataset + * @mcmb_trig_lane: Save data for trigger lane + * @mcmb_lane_select: Save data for lane enablement */ struct cmb_dataset { u32 trace_mode; @@ -267,6 +282,10 @@ struct cmb_dataset { bool patt_ts; bool trig_ts; bool ts_all; + struct { + u8 trig_lane; + u8 lane_select; + } mcmb; }; /** @@ -324,14 +343,4 @@ struct tpdm_dataset_attribute { enum dataset_mem mem; u32 idx; }; - -static bool tpdm_has_dsb_dataset(struct tpdm_drvdata *drvdata) -{ - return (drvdata->datasets & TPDM_PIDR0_DS_DSB); -} - -static bool tpdm_has_cmb_dataset(struct tpdm_drvdata *drvdata) -{ - return (drvdata->datasets & TPDM_PIDR0_DS_CMB); -} #endif /* _CORESIGHT_CORESIGHT_TPDM_H */ diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index 29024f880fda..97ef36f03ec2 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c @@ -5,17 +5,19 @@ * Description: CoreSight Trace Port Interface Unit driver */ +#include <linux/acpi.h> +#include <linux/amba/bus.h> #include <linux/atomic.h> -#include <linux/kernel.h> -#include <linux/init.h> +#include <linux/clk.h> +#include <linux/coresight.h> #include <linux/device.h> -#include <linux/io.h> #include <linux/err.h> -#include <linux/slab.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <linux/coresight.h> -#include <linux/amba/bus.h> -#include <linux/clk.h> +#include <linux/slab.h> #include "coresight-priv.h" @@ -52,11 +54,13 @@ DEFINE_CORESIGHT_DEVLIST(tpiu_devs, "tpiu"); /* * @base: memory mapped base address for this component. * @atclk: optional clock for the core parts of the TPIU. + * @pclk: APB clock if present, otherwise NULL * @csdev: component vitals needed by the framework. */ struct tpiu_drvdata { void __iomem *base; struct clk *atclk; + struct clk *pclk; struct coresight_device *csdev; spinlock_t spinlock; }; @@ -122,14 +126,12 @@ static const struct coresight_ops tpiu_cs_ops = { .sink_ops = &tpiu_sink_ops, }; -static int tpiu_probe(struct amba_device *adev, const struct amba_id *id) +static int __tpiu_probe(struct device *dev, struct resource *res) { int ret; void __iomem *base; - struct device *dev = &adev->dev; struct coresight_platform_data *pdata = NULL; struct tpiu_drvdata *drvdata; - struct resource *res = &adev->res; struct coresight_desc desc = { 0 }; desc.name = coresight_alloc_device_name(&tpiu_devs, dev); @@ -142,12 +144,16 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id) spin_lock_init(&drvdata->spinlock); - drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ + drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */ if (!IS_ERR(drvdata->atclk)) { ret = clk_prepare_enable(drvdata->atclk); if (ret) return ret; } + + drvdata->pclk = coresight_get_enable_apb_pclk(dev); + if (IS_ERR(drvdata->pclk)) + return -ENODEV; dev_set_drvdata(dev, drvdata); /* Validity for the resource is already checked by the AMBA core */ @@ -173,21 +179,34 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id) desc.dev = dev; drvdata->csdev = coresight_register(&desc); - if (!IS_ERR(drvdata->csdev)) { - pm_runtime_put(&adev->dev); + if (!IS_ERR(drvdata->csdev)) return 0; - } return PTR_ERR(drvdata->csdev); } -static void tpiu_remove(struct amba_device *adev) +static int tpiu_probe(struct amba_device *adev, const struct amba_id *id) { - struct tpiu_drvdata *drvdata = dev_get_drvdata(&adev->dev); + int ret; + + ret = __tpiu_probe(&adev->dev, &adev->res); + if (!ret) + pm_runtime_put(&adev->dev); + return ret; +} + +static void __tpiu_remove(struct device *dev) +{ + struct tpiu_drvdata *drvdata = dev_get_drvdata(dev); coresight_unregister(drvdata->csdev); } +static void tpiu_remove(struct amba_device *adev) +{ + __tpiu_remove(&adev->dev); +} + #ifdef CONFIG_PM static int tpiu_runtime_suspend(struct device *dev) { @@ -196,6 +215,8 @@ static int tpiu_runtime_suspend(struct device *dev) if (drvdata && !IS_ERR(drvdata->atclk)) clk_disable_unprepare(drvdata->atclk); + if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_disable_unprepare(drvdata->pclk); return 0; } @@ -206,6 +227,8 @@ static int tpiu_runtime_resume(struct device *dev) if (drvdata && !IS_ERR(drvdata->atclk)) clk_prepare_enable(drvdata->atclk); + if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) + clk_prepare_enable(drvdata->pclk); return 0; } #endif @@ -236,7 +259,6 @@ MODULE_DEVICE_TABLE(amba, tpiu_ids); static struct amba_driver tpiu_driver = { .drv = { .name = "coresight-tpiu", - .owner = THIS_MODULE, .pm = &tpiu_dev_pm_ops, .suppress_bind_attrs = true, }, @@ -245,7 +267,66 @@ static struct amba_driver tpiu_driver = { .id_table = tpiu_ids, }; -module_amba_driver(tpiu_driver); +static int tpiu_platform_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int ret; + + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + ret = __tpiu_probe(&pdev->dev, res); + pm_runtime_put(&pdev->dev); + if (ret) + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static void tpiu_platform_remove(struct platform_device *pdev) +{ + struct tpiu_drvdata *drvdata = dev_get_drvdata(&pdev->dev); + + if (WARN_ON(!drvdata)) + return; + + __tpiu_remove(&pdev->dev); + pm_runtime_disable(&pdev->dev); + if (!IS_ERR_OR_NULL(drvdata->pclk)) + clk_put(drvdata->pclk); +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id tpiu_acpi_ids[] = { + {"ARMHC979", 0, 0, 0}, /* ARM CoreSight TPIU */ + {} +}; +MODULE_DEVICE_TABLE(acpi, tpiu_acpi_ids); +#endif + +static struct platform_driver tpiu_platform_driver = { + .probe = tpiu_platform_probe, + .remove = tpiu_platform_remove, + .driver = { + .name = "coresight-tpiu-platform", + .acpi_match_table = ACPI_PTR(tpiu_acpi_ids), + .suppress_bind_attrs = true, + .pm = &tpiu_dev_pm_ops, + }, +}; + +static int __init tpiu_init(void) +{ + return coresight_init_driver("tpiu", &tpiu_driver, &tpiu_platform_driver); +} + +static void __exit tpiu_exit(void) +{ + coresight_remove_driver(&tpiu_driver, &tpiu_platform_driver); +} +module_init(tpiu_init); +module_exit(tpiu_exit); MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>"); MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>"); diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c index af5b4ef59cea..7ed337d54d3e 100644 --- a/drivers/hwtracing/coresight/coresight-trace-id.c +++ b/drivers/hwtracing/coresight/coresight-trace-id.c @@ -3,6 +3,7 @@ * Copyright (c) 2022, Linaro Limited, All rights reserved. * Author: Mike Leach <mike.leach@linaro.org> */ +#include <linux/coresight.h> #include <linux/coresight-pmu.h> #include <linux/cpumask.h> #include <linux/kernel.h> @@ -11,18 +12,18 @@ #include "coresight-trace-id.h" -/* Default trace ID map. Used on systems that don't require per sink mappings */ -static struct coresight_trace_id_map id_map_default; +enum trace_id_flags { + TRACE_ID_ANY = 0x0, + TRACE_ID_PREFER_ODD = 0x1, + TRACE_ID_REQ_STATIC = 0x2, +}; -/* maintain a record of the mapping of IDs and pending releases per cpu */ -static DEFINE_PER_CPU(atomic_t, cpu_id) = ATOMIC_INIT(0); -static cpumask_t cpu_id_release_pending; - -/* perf session active counter */ -static atomic_t perf_cs_etm_session_active = ATOMIC_INIT(0); - -/* lock to protect id_map and cpu data */ -static DEFINE_SPINLOCK(id_map_lock); +/* Default trace ID map. Used in sysfs mode and for system sources */ +static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0); +static struct coresight_trace_id_map id_map_default = { + .cpu_map = &id_map_default_cpu_ids, + .lock = __RAW_SPIN_LOCK_UNLOCKED(id_map_default.lock) +}; /* #define TRACE_ID_DEBUG 1 */ #if defined(TRACE_ID_DEBUG) || defined(CONFIG_COMPILE_TEST) @@ -32,7 +33,6 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map, { pr_debug("%s id_map::\n", func_name); pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids); - pr_debug("Pend = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->pend_rel_ids); } #define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__) #define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id) @@ -46,9 +46,9 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map, #endif /* unlocked read of current trace ID value for given CPU */ -static int _coresight_trace_id_read_cpu_id(int cpu) +static int _coresight_trace_id_read_cpu_id(int cpu, struct coresight_trace_id_map *id_map) { - return atomic_read(&per_cpu(cpu_id, cpu)); + return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu)); } /* look for next available odd ID, return 0 if none found */ @@ -80,21 +80,25 @@ static int coresight_trace_id_find_odd_id(struct coresight_trace_id_map *id_map) * Otherwise allocate next available ID. */ static int coresight_trace_id_alloc_new_id(struct coresight_trace_id_map *id_map, - int preferred_id, bool prefer_odd_id) + int preferred_id, unsigned int flags) { int id = 0; /* for backwards compatibility, cpu IDs may use preferred value */ - if (IS_VALID_CS_TRACE_ID(preferred_id) && - !test_bit(preferred_id, id_map->used_ids)) { - id = preferred_id; - goto trace_id_allocated; - } else if (prefer_odd_id) { + if (IS_VALID_CS_TRACE_ID(preferred_id)) { + if (!test_bit(preferred_id, id_map->used_ids)) { + id = preferred_id; + goto trace_id_allocated; + } else if (flags & TRACE_ID_REQ_STATIC) + return -EBUSY; + } else if (flags & TRACE_ID_PREFER_ODD) { /* may use odd ids to avoid preferred legacy cpu IDs */ id = coresight_trace_id_find_odd_id(id_map); if (id) goto trace_id_allocated; - } + } else if (!IS_VALID_CS_TRACE_ID(preferred_id) && + (flags & TRACE_ID_REQ_STATIC)) + return -EINVAL; /* * skip reserved bit 0, look at bitmap length of @@ -119,49 +123,33 @@ static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_ma clear_bit(id, id_map->used_ids); } -static void coresight_trace_id_set_pend_rel(int id, struct coresight_trace_id_map *id_map) -{ - if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id)) - return; - set_bit(id, id_map->pend_rel_ids); -} - /* - * release all pending IDs for all current maps & clear CPU associations - * - * This currently operates on the default id map, but may be extended to - * operate on all registered id maps if per sink id maps are used. + * Release all IDs and clear CPU associations. */ -static void coresight_trace_id_release_all_pending(void) +static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map) { - struct coresight_trace_id_map *id_map = &id_map_default; unsigned long flags; - int cpu, bit; + int cpu; - spin_lock_irqsave(&id_map_lock, flags); - for_each_set_bit(bit, id_map->pend_rel_ids, CORESIGHT_TRACE_ID_RES_TOP) { - clear_bit(bit, id_map->used_ids); - clear_bit(bit, id_map->pend_rel_ids); - } - for_each_cpu(cpu, &cpu_id_release_pending) { - atomic_set(&per_cpu(cpu_id, cpu), 0); - cpumask_clear_cpu(cpu, &cpu_id_release_pending); - } - spin_unlock_irqrestore(&id_map_lock, flags); + raw_spin_lock_irqsave(&id_map->lock, flags); + bitmap_zero(id_map->used_ids, CORESIGHT_TRACE_IDS_MAX); + for_each_possible_cpu(cpu) + atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0); + raw_spin_unlock_irqrestore(&id_map->lock, flags); DUMP_ID_MAP(id_map); } -static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map) +static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map) { unsigned long flags; int id; - spin_lock_irqsave(&id_map_lock, flags); + raw_spin_lock_irqsave(&id_map->lock, flags); /* check for existing allocation for this CPU */ - id = _coresight_trace_id_read_cpu_id(cpu); + id = _coresight_trace_id_read_cpu_id(cpu, id_map); if (id) - goto get_cpu_id_clr_pend; + goto get_cpu_id_out_unlock; /* * Find a new ID. @@ -175,62 +163,50 @@ static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_ */ id = coresight_trace_id_alloc_new_id(id_map, CORESIGHT_LEGACY_CPU_TRACE_ID(cpu), - false); + TRACE_ID_ANY); if (!IS_VALID_CS_TRACE_ID(id)) goto get_cpu_id_out_unlock; /* allocate the new id to the cpu */ - atomic_set(&per_cpu(cpu_id, cpu), id); - -get_cpu_id_clr_pend: - /* we are (re)using this ID - so ensure it is not marked for release */ - cpumask_clear_cpu(cpu, &cpu_id_release_pending); - clear_bit(id, id_map->pend_rel_ids); + atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id); get_cpu_id_out_unlock: - spin_unlock_irqrestore(&id_map_lock, flags); + raw_spin_unlock_irqrestore(&id_map->lock, flags); DUMP_ID_CPU(cpu, id); DUMP_ID_MAP(id_map); return id; } -static void coresight_trace_id_map_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map) +static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map) { unsigned long flags; int id; /* check for existing allocation for this CPU */ - id = _coresight_trace_id_read_cpu_id(cpu); + id = _coresight_trace_id_read_cpu_id(cpu, id_map); if (!id) return; - spin_lock_irqsave(&id_map_lock, flags); + raw_spin_lock_irqsave(&id_map->lock, flags); - if (atomic_read(&perf_cs_etm_session_active)) { - /* set release at pending if perf still active */ - coresight_trace_id_set_pend_rel(id, id_map); - cpumask_set_cpu(cpu, &cpu_id_release_pending); - } else { - /* otherwise clear id */ - coresight_trace_id_free(id, id_map); - atomic_set(&per_cpu(cpu_id, cpu), 0); - } + coresight_trace_id_free(id, id_map); + atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0); - spin_unlock_irqrestore(&id_map_lock, flags); + raw_spin_unlock_irqrestore(&id_map->lock, flags); DUMP_ID_CPU(cpu, id); DUMP_ID_MAP(id_map); } -static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map) +static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map, + int preferred_id, unsigned int traceid_flags) { unsigned long flags; int id; - spin_lock_irqsave(&id_map_lock, flags); - /* prefer odd IDs for system components to avoid legacy CPU IDS */ - id = coresight_trace_id_alloc_new_id(id_map, 0, true); - spin_unlock_irqrestore(&id_map_lock, flags); + raw_spin_lock_irqsave(&id_map->lock, flags); + id = coresight_trace_id_alloc_new_id(id_map, preferred_id, traceid_flags); + raw_spin_unlock_irqrestore(&id_map->lock, flags); DUMP_ID(id); DUMP_ID_MAP(id_map); @@ -241,9 +217,9 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map * { unsigned long flags; - spin_lock_irqsave(&id_map_lock, flags); + raw_spin_lock_irqsave(&id_map->lock, flags); coresight_trace_id_free(id, id_map); - spin_unlock_irqrestore(&id_map_lock, flags); + raw_spin_unlock_irqrestore(&id_map->lock, flags); DUMP_ID(id); DUMP_ID_MAP(id_map); @@ -253,45 +229,72 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map * int coresight_trace_id_get_cpu_id(int cpu) { - return coresight_trace_id_map_get_cpu_id(cpu, &id_map_default); + return _coresight_trace_id_get_cpu_id(cpu, &id_map_default); } EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id); +int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map) +{ + return _coresight_trace_id_get_cpu_id(cpu, id_map); +} +EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id_map); + void coresight_trace_id_put_cpu_id(int cpu) { - coresight_trace_id_map_put_cpu_id(cpu, &id_map_default); + _coresight_trace_id_put_cpu_id(cpu, &id_map_default); } EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id); +void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map) +{ + _coresight_trace_id_put_cpu_id(cpu, id_map); +} +EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id_map); + int coresight_trace_id_read_cpu_id(int cpu) { - return _coresight_trace_id_read_cpu_id(cpu); + return _coresight_trace_id_read_cpu_id(cpu, &id_map_default); } EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id); +int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map) +{ + return _coresight_trace_id_read_cpu_id(cpu, id_map); +} +EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id_map); + int coresight_trace_id_get_system_id(void) { - return coresight_trace_id_map_get_system_id(&id_map_default); + /* prefer odd IDs for system components to avoid legacy CPU IDS */ + return coresight_trace_id_map_get_system_id(&id_map_default, 0, + TRACE_ID_PREFER_ODD); } EXPORT_SYMBOL_GPL(coresight_trace_id_get_system_id); +int coresight_trace_id_get_static_system_id(int trace_id) +{ + return coresight_trace_id_map_get_system_id(&id_map_default, + trace_id, TRACE_ID_REQ_STATIC); +} +EXPORT_SYMBOL_GPL(coresight_trace_id_get_static_system_id); + void coresight_trace_id_put_system_id(int id) { coresight_trace_id_map_put_system_id(&id_map_default, id); } EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id); -void coresight_trace_id_perf_start(void) +void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map) { - atomic_inc(&perf_cs_etm_session_active); - PERF_SESSION(atomic_read(&perf_cs_etm_session_active)); + atomic_inc(&id_map->perf_cs_etm_session_active); + PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active)); } EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start); -void coresight_trace_id_perf_stop(void) +void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map) { - if (!atomic_dec_return(&perf_cs_etm_session_active)) - coresight_trace_id_release_all_pending(); - PERF_SESSION(atomic_read(&perf_cs_etm_session_active)); + if (!atomic_dec_return(&id_map->perf_cs_etm_session_active)) + coresight_trace_id_release_all(id_map); + PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active)); } EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop); diff --git a/drivers/hwtracing/coresight/coresight-trace-id.h b/drivers/hwtracing/coresight/coresight-trace-id.h index 3797777d367e..db68e1ec56b6 100644 --- a/drivers/hwtracing/coresight/coresight-trace-id.h +++ b/drivers/hwtracing/coresight/coresight-trace-id.h @@ -17,9 +17,10 @@ * released when done. * * In order to ensure that a consistent cpu / ID matching is maintained - * throughout a perf cs_etm event session - a session in progress flag will - * be maintained, and released IDs not cleared until the perf session is - * complete. This allows the same CPU to be re-allocated its prior ID. + * throughout a perf cs_etm event session - a session in progress flag will be + * maintained for each sink, and IDs are cleared when all the perf sessions + * complete. This allows the same CPU to be re-allocated its prior ID when + * events are scheduled in and out. * * * Trace ID maps will be created and initialised to prevent architecturally @@ -32,10 +33,6 @@ #include <linux/bitops.h> #include <linux/types.h> - -/* architecturally we have 128 IDs some of which are reserved */ -#define CORESIGHT_TRACE_IDS_MAX 128 - /* ID 0 is reserved */ #define CORESIGHT_TRACE_ID_RES_0 0 @@ -47,23 +44,6 @@ ((id > CORESIGHT_TRACE_ID_RES_0) && (id < CORESIGHT_TRACE_ID_RES_TOP)) /** - * Trace ID map. - * - * @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs. - * Initialised so that the reserved IDs are permanently marked as - * in use. - * @pend_rel_ids: CPU IDs that have been released by the trace source but not - * yet marked as available, to allow re-allocation to the same - * CPU during a perf session. - */ -struct coresight_trace_id_map { - DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX); - DECLARE_BITMAP(pend_rel_ids, CORESIGHT_TRACE_IDS_MAX); -}; - -/* Allocate and release IDs for a single default trace ID map */ - -/** * Read and optionally allocate a CoreSight trace ID and associate with a CPU. * * Function will read the current trace ID for the associated CPU, @@ -79,19 +59,27 @@ struct coresight_trace_id_map { int coresight_trace_id_get_cpu_id(int cpu); /** + * Version of coresight_trace_id_get_cpu_id() that allows the ID map to operate + * on to be provided. + */ +int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map); + +/** * Release an allocated trace ID associated with the CPU. * - * This will release the CoreSight trace ID associated with the CPU, - * unless a perf session is in operation. - * - * If a perf session is in operation then the ID will be marked as pending - * release. + * This will release the CoreSight trace ID associated with the CPU. * * @cpu: The CPU index to release the associated trace ID. */ void coresight_trace_id_put_cpu_id(int cpu); /** + * Version of coresight_trace_id_put_cpu_id() that allows the ID map to operate + * on to be provided. + */ +void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map); + +/** * Read the current allocated CoreSight Trace ID value for the CPU. * * Fast read of the current value that does not allocate if no ID allocated @@ -112,6 +100,12 @@ void coresight_trace_id_put_cpu_id(int cpu); int coresight_trace_id_read_cpu_id(int cpu); /** + * Version of coresight_trace_id_read_cpu_id() that allows the ID map to operate + * on to be provided. + */ +int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map); + +/** * Allocate a CoreSight trace ID for a system component. * * Unconditionally allocates a Trace ID, without associating the ID with a CPU. @@ -123,6 +117,15 @@ int coresight_trace_id_read_cpu_id(int cpu); int coresight_trace_id_get_system_id(void); /** + * Allocate a CoreSight static trace ID for a system component. + * + * Used to allocate static IDs for system trace sources such as dummy source. + * + * return: Trace ID or -EINVAL if allocation is impossible. + */ +int coresight_trace_id_get_static_system_id(int id); + +/** * Release an allocated system trace ID. * * Unconditionally release a trace ID allocated to a system component. @@ -136,21 +139,21 @@ void coresight_trace_id_put_system_id(int id); /** * Notify the Trace ID allocator that a perf session is starting. * - * Increase the perf session reference count - called by perf when setting up - * a trace event. + * Increase the perf session reference count - called by perf when setting up a + * trace event. * - * This reference count is used by the ID allocator to ensure that trace IDs - * associated with a CPU cannot change or be released during a perf session. + * Perf sessions never free trace IDs to ensure that the ID associated with a + * CPU cannot change during their and other's concurrent sessions. Instead, + * this refcount is used so that the last event to finish always frees all IDs. */ -void coresight_trace_id_perf_start(void); +void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map); /** * Notify the ID allocator that a perf session is stopping. * - * Decrease the perf session reference count. - * if this causes the count to go to zero, then all Trace IDs marked as pending - * release, will be released. + * Decrease the perf session reference count. If this causes the count to go to + * zero, then all Trace IDs will be released. */ -void coresight_trace_id_perf_stop(void); +void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map); #endif /* _CORESIGHT_TRACE_ID_H */ diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c index 6136776482e6..fff67aac8418 100644 --- a/drivers/hwtracing/coresight/coresight-trbe.c +++ b/drivers/hwtracing/coresight/coresight-trbe.c @@ -17,6 +17,8 @@ #include <asm/barrier.h> #include <asm/cpufeature.h> +#include <linux/kvm_host.h> +#include <linux/vmalloc.h> #include "coresight-self-hosted-trace.h" #include "coresight-trbe.h" @@ -220,6 +222,7 @@ static inline void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr) */ trblimitr |= TRBLIMITR_EL1_E; write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); + kvm_enable_trbe(); /* Synchronize the TRBE enable event */ isb(); @@ -238,6 +241,7 @@ static inline void set_trbe_disabled(struct trbe_cpudata *cpudata) */ trblimitr &= ~TRBLIMITR_EL1_E; write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1); + kvm_disable_trbe(); if (trbe_needs_drain_after_disable(cpudata)) trbe_drain_buffer(); @@ -252,8 +256,8 @@ static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata) static void trbe_reset_local(struct trbe_cpudata *cpudata) { - trbe_drain_and_disable_local(cpudata); write_sysreg_s(0, SYS_TRBLIMITR_EL1); + trbe_drain_buffer(); write_sysreg_s(0, SYS_TRBPTR_EL1); write_sysreg_s(0, SYS_TRBBASER_EL1); write_sysreg_s(0, SYS_TRBSR_EL1); @@ -1109,6 +1113,16 @@ static bool is_perf_trbe(struct perf_output_handle *handle) return true; } +static u64 cpu_prohibit_trace(void) +{ + u64 trfcr = read_trfcr(); + + /* Prohibit tracing at EL0 & the kernel EL */ + write_trfcr(trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE)); + /* Return the original value of the TRFCR */ + return trfcr; +} + static irqreturn_t arm_trbe_irq_handler(int irq, void *dev) { struct perf_output_handle **handle_ptr = dev; @@ -1561,7 +1575,7 @@ static struct platform_driver arm_trbe_driver = { .suppress_bind_attrs = true, }, .probe = arm_trbe_device_probe, - .remove_new = arm_trbe_device_remove, + .remove = arm_trbe_device_remove, }; static int __init arm_trbe_init(void) diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c index f9ebf20c91e6..26cfc939e5bd 100644 --- a/drivers/hwtracing/coresight/ultrasoc-smb.c +++ b/drivers/hwtracing/coresight/ultrasoc-smb.c @@ -98,7 +98,7 @@ static int smb_open(struct inode *inode, struct file *file) struct smb_drv_data *drvdata = container_of(file->private_data, struct smb_drv_data, miscdev); - guard(spinlock)(&drvdata->spinlock); + guard(raw_spinlock)(&drvdata->spinlock); if (drvdata->reading) return -EBUSY; @@ -152,7 +152,7 @@ static int smb_release(struct inode *inode, struct file *file) struct smb_drv_data *drvdata = container_of(file->private_data, struct smb_drv_data, miscdev); - guard(spinlock)(&drvdata->spinlock); + guard(raw_spinlock)(&drvdata->spinlock); drvdata->reading = false; return 0; @@ -163,7 +163,6 @@ static const struct file_operations smb_fops = { .open = smb_open, .read = smb_read, .release = smb_release, - .llseek = no_llseek, }; static ssize_t buf_size_show(struct device *dev, struct device_attribute *attr, @@ -246,7 +245,7 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode, struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent); int ret = 0; - guard(spinlock)(&drvdata->spinlock); + guard(raw_spinlock)(&drvdata->spinlock); /* Do nothing, the trace data is reading by other interface now */ if (drvdata->reading) @@ -281,7 +280,7 @@ static int smb_disable(struct coresight_device *csdev) { struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent); - guard(spinlock)(&drvdata->spinlock); + guard(raw_spinlock)(&drvdata->spinlock); if (drvdata->reading) return -EBUSY; @@ -379,7 +378,7 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev, if (!buf) return 0; - guard(spinlock)(&drvdata->spinlock); + guard(raw_spinlock)(&drvdata->spinlock); /* Don't do anything if another tracer is using this sink. */ if (csdev->refcnt != 1) @@ -564,7 +563,7 @@ static int smb_probe(struct platform_device *pdev) smb_reset_buffer(drvdata); platform_set_drvdata(pdev, drvdata); - spin_lock_init(&drvdata->spinlock); + raw_spin_lock_init(&drvdata->spinlock); drvdata->pid = -1; ret = smb_register_sink(pdev, drvdata); @@ -601,7 +600,7 @@ static struct platform_driver smb_driver = { .suppress_bind_attrs = true, }, .probe = smb_probe, - .remove_new = smb_remove, + .remove = smb_remove, }; module_platform_driver(smb_driver); diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.h b/drivers/hwtracing/coresight/ultrasoc-smb.h index a91d39cfccb8..c4c111275627 100644 --- a/drivers/hwtracing/coresight/ultrasoc-smb.h +++ b/drivers/hwtracing/coresight/ultrasoc-smb.h @@ -115,7 +115,7 @@ struct smb_drv_data { struct coresight_device *csdev; struct smb_data_buffer sdb; struct miscdevice miscdev; - spinlock_t spinlock; + raw_spinlock_t spinlock; bool reading; pid_t pid; }; diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig index 4b6359326ede..4f7d2b6d79e2 100644 --- a/drivers/hwtracing/intel_th/Kconfig +++ b/drivers/hwtracing/intel_th/Kconfig @@ -60,6 +60,7 @@ config INTEL_TH_STH config INTEL_TH_MSU tristate "Intel(R) Trace Hub Memory Storage Unit" + depends on MMU help Memory Storage Unit (MSU) trace output device enables storing STP traces to system memory. It supports single diff --git a/drivers/hwtracing/intel_th/acpi.c b/drivers/hwtracing/intel_th/acpi.c index 87f9024e4bbb..d229324978bd 100644 --- a/drivers/hwtracing/intel_th/acpi.c +++ b/drivers/hwtracing/intel_th/acpi.c @@ -60,13 +60,11 @@ static int intel_th_acpi_probe(struct platform_device *pdev) return 0; } -static int intel_th_acpi_remove(struct platform_device *pdev) +static void intel_th_acpi_remove(struct platform_device *pdev) { struct intel_th *th = platform_get_drvdata(pdev); intel_th_free(th); - - return 0; } static struct platform_driver intel_th_acpi_driver = { diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index cc7f879bb175..47d9e6c3bac0 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -26,9 +26,9 @@ module_param(host_mode, bool, 0444); static DEFINE_IDA(intel_th_ida); -static int intel_th_match(struct device *dev, struct device_driver *driver) +static int intel_th_match(struct device *dev, const struct device_driver *driver) { - struct intel_th_driver *thdrv = to_intel_th_driver(driver); + const struct intel_th_driver *thdrv = to_intel_th_driver(driver); struct intel_th_device *thdev = to_intel_th_device(dev); if (thdev->type == INTEL_TH_SWITCH && @@ -180,7 +180,7 @@ static void intel_th_device_release(struct device *dev) intel_th_device_free(to_intel_th_device(dev)); } -static struct device_type intel_th_source_device_type = { +static const struct device_type intel_th_source_device_type = { .name = "intel_th_source_device", .release = intel_th_device_release, }; @@ -333,19 +333,19 @@ static struct attribute *intel_th_output_attrs[] = { ATTRIBUTE_GROUPS(intel_th_output); -static struct device_type intel_th_output_device_type = { +static const struct device_type intel_th_output_device_type = { .name = "intel_th_output_device", .groups = intel_th_output_groups, .release = intel_th_device_release, .devnode = intel_th_output_devnode, }; -static struct device_type intel_th_switch_device_type = { +static const struct device_type intel_th_switch_device_type = { .name = "intel_th_switch_device", .release = intel_th_device_release, }; -static struct device_type *intel_th_device_type[] = { +static const struct device_type *intel_th_device_type[] = { [INTEL_TH_SOURCE] = &intel_th_source_device_type, [INTEL_TH_OUTPUT] = &intel_th_output_device_type, [INTEL_TH_SWITCH] = &intel_th_switch_device_type, @@ -857,8 +857,9 @@ static irqreturn_t intel_th_irq(int irq, void *data) /** * intel_th_alloc() - allocate a new Intel TH device and its subdevices * @dev: parent device + * @drvdata: data private to the driver * @devres: resources indexed by th_mmio_idx - * @irq: irq number + * @ndevres: number of entries in the @devres resources */ struct intel_th * intel_th_alloc(struct device *dev, const struct intel_th_drvdata *drvdata, @@ -871,7 +872,7 @@ intel_th_alloc(struct device *dev, const struct intel_th_drvdata *drvdata, if (!th) return ERR_PTR(-ENOMEM); - th->id = ida_simple_get(&intel_th_ida, 0, 0, GFP_KERNEL); + th->id = ida_alloc(&intel_th_ida, GFP_KERNEL); if (th->id < 0) { err = th->id; goto err_alloc; @@ -931,7 +932,7 @@ err_chrdev: "intel_th/output"); err_ida: - ida_simple_remove(&intel_th_ida, th->id); + ida_free(&intel_th_ida, th->id); err_alloc: kfree(th); @@ -964,7 +965,7 @@ void intel_th_free(struct intel_th *th) __unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS, "intel_th/output"); - ida_simple_remove(&intel_th_ida, th->id); + ida_free(&intel_th_ida, th->id); kfree(th); } diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c index b3308934a687..3883f99fd5d5 100644 --- a/drivers/hwtracing/intel_th/gth.c +++ b/drivers/hwtracing/intel_th/gth.c @@ -154,9 +154,9 @@ static ssize_t master_attr_show(struct device *dev, spin_unlock(>h->gth_lock); if (port >= 0) - count = snprintf(buf, PAGE_SIZE, "%x\n", port); + count = sysfs_emit(buf, "%x\n", port); else - count = snprintf(buf, PAGE_SIZE, "disabled\n"); + count = sysfs_emit(buf, "disabled\n"); return count; } @@ -332,8 +332,8 @@ static ssize_t output_attr_show(struct device *dev, pm_runtime_get_sync(dev); spin_lock(>h->gth_lock); - count = snprintf(buf, PAGE_SIZE, "%x\n", - gth_output_parm_get(gth, oa->port, oa->parm)); + count = sysfs_emit(buf, "%x\n", + gth_output_parm_get(gth, oa->port, oa->parm)); spin_unlock(>h->gth_lock); pm_runtime_put(dev); diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index 6cbba733f259..3b87cd542c1b 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h @@ -189,7 +189,7 @@ struct intel_th_driver { }; #define to_intel_th_driver(_d) \ - container_of((_d), struct intel_th_driver, driver) + container_of_const((_d), struct intel_th_driver, driver) #define to_intel_th_driver_or_null(_d) \ ((_d) ? to_intel_th_driver(_d) : NULL) diff --git a/drivers/hwtracing/intel_th/msu-sink.c b/drivers/hwtracing/intel_th/msu-sink.c index 891b28ea25fe..256ce3260ad9 100644 --- a/drivers/hwtracing/intel_th/msu-sink.c +++ b/drivers/hwtracing/intel_th/msu-sink.c @@ -116,4 +116,5 @@ static const struct msu_buffer sink_mbuf = { module_intel_th_msu_buffer(sink_mbuf); +MODULE_DESCRIPTION("example software sink buffer for Intel TH MSU"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index 9621efe0e95c..7163950eb371 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -19,6 +19,7 @@ #include <linux/io.h> #include <linux/workqueue.h> #include <linux/dma-mapping.h> +#include <linux/pfn_t.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> @@ -61,6 +62,7 @@ enum lockout_state { * @lo_lock: lockout state serialization * @nr_blocks: number of blocks (pages) in this window * @nr_segs: number of segments in this window (<= @nr_blocks) + * @msc: pointer to the MSC device * @_sgt: array of block descriptors * @sgt: array of block descriptors */ @@ -104,24 +106,32 @@ struct msc_iter { /** * struct msc - MSC device representation - * @reg_base: register window base address + * @reg_base: register window base address for the entire MSU + * @msu_base: register window base address for this MSC * @thdev: intel_th_device pointer * @mbuf: MSU buffer, if assigned - * @mbuf_priv MSU buffer's private data, if @mbuf + * @mbuf_priv: MSU buffer's private data, if @mbuf + * @work: a work to stop the trace when the buffer is full * @win_list: list of windows in multiblock mode * @single_sgt: single mode buffer * @cur_win: current window + * @switch_on_unlock: window to switch to when it becomes available * @nr_pages: total number of pages allocated for this buffer * @single_sz: amount of data in single mode * @single_wrap: single mode wrap occurred * @base: buffer's base pointer * @base_addr: buffer's base address + * @orig_addr: MSC0 buffer's base address + * @orig_sz: MSC0 buffer's size * @user_count: number of users of the buffer * @mmap_count: number of mappings * @buf_mutex: mutex to serialize access to buffer-related bits - + * @iter_list: list of open file descriptor iterators + * @stop_on_full: stop the trace if the current window is full * @enabled: MSC is enabled * @wrap: wrapping is enabled + * @do_irq: IRQ resource is available, handle interrupts + * @multi_is_broken: multiblock mode enabled (not disabled by PCI drvdata) * @mode: MSC operating mode * @burst_len: write burst length * @index: number of this MSC in the MSU @@ -755,6 +765,8 @@ unlock: * Program storage mode, wrapping, burst length and trace buffer address * into a given MSC. Then, enable tracing and set msc::enabled. * The latter is serialized on msc::buf_mutex, so make sure to hold it. + * + * Return: %0 for success or a negative error code otherwise. */ static int msc_configure(struct msc *msc) { @@ -965,7 +977,6 @@ static void msc_buffer_contig_free(struct msc *msc) for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { struct page *page = virt_to_page(msc->base + off); - page->mapping = NULL; __free_page(page); } @@ -1147,9 +1158,6 @@ static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win) int i; for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) { - struct page *page = msc_sg_page(sg); - - page->mapping = NULL; dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, sg_virt(sg), sg_dma_address(sg)); } @@ -1291,7 +1299,8 @@ static void msc_buffer_free(struct msc *msc) /** * msc_buffer_alloc() - allocate a buffer for MSC * @msc: MSC device - * @size: allocation size in bytes + * @nr_pages: number of pages for each window + * @nr_wins: number of windows * * Allocate a storage buffer for MSC, depending on the msc::mode, it will be * either done via msc_buffer_contig_alloc() for SINGLE operation mode or @@ -1370,6 +1379,9 @@ static int msc_buffer_unlocked_free_unless_used(struct msc *msc) * @msc: MSC device * * This is a locked version of msc_buffer_unlocked_free_unless_used(). + * + * Return: 0 on successful deallocation or if there was no buffer to + * deallocate, -EBUSY if there are active users. */ static int msc_buffer_free_unless_used(struct msc *msc) { @@ -1438,6 +1450,8 @@ struct msc_win_to_user_struct { * @data: callback's private data * @src: source buffer * @len: amount of data to copy from the source buffer + * + * Return: >= %0 for success or -errno for error. */ static unsigned long msc_win_to_user(void *data, void *src, size_t len) { @@ -1584,22 +1598,10 @@ static void msc_mmap_close(struct vm_area_struct *vma) { struct msc_iter *iter = vma->vm_file->private_data; struct msc *msc = iter->msc; - unsigned long pg; if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex)) return; - /* drop page _refcounts */ - for (pg = 0; pg < msc->nr_pages; pg++) { - struct page *page = msc_buffer_get_page(msc, pg); - - if (WARN_ON_ONCE(!page)) - continue; - - if (page->mapping) - page->mapping = NULL; - } - /* last mapping -- drop user_count */ atomic_dec(&msc->user_count); mutex_unlock(&msc->buf_mutex); @@ -1609,16 +1611,14 @@ static vm_fault_t msc_mmap_fault(struct vm_fault *vmf) { struct msc_iter *iter = vmf->vma->vm_file->private_data; struct msc *msc = iter->msc; + struct page *page; - vmf->page = msc_buffer_get_page(msc, vmf->pgoff); - if (!vmf->page) + page = msc_buffer_get_page(msc, vmf->pgoff); + if (!page) return VM_FAULT_SIGBUS; - get_page(vmf->page); - vmf->page->mapping = vmf->vma->vm_file->f_mapping; - vmf->page->index = vmf->pgoff; - - return 0; + get_page(page); + return vmf_insert_mixed(vmf->vma, vmf->address, page_to_pfn_t(page)); } static const struct vm_operations_struct msc_mmap_ops = { @@ -1659,7 +1659,7 @@ out: atomic_dec(&msc->user_count); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY); + vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY | VM_MIXEDMAP); vma->vm_ops = &msc_mmap_ops; return ret; } @@ -1669,7 +1669,6 @@ static const struct file_operations intel_th_msc_fops = { .release = intel_th_msc_release, .read = intel_th_msc_read, .mmap = intel_th_msc_mmap, - .llseek = no_llseek, .owner = THIS_MODULE, }; diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 147d338c191e..e3def163d5cf 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -23,7 +23,6 @@ enum { TH_PCI_RTIT_BAR = 4, }; -#define BAR_MASK (BIT(TH_PCI_CONFIG_BAR) | BIT(TH_PCI_STH_SW_BAR)) #define PCI_REG_NPKDSC 0x80 #define NPKDSC_TSACT BIT(5) @@ -83,10 +82,16 @@ static int intel_th_pci_probe(struct pci_dev *pdev, if (err) return err; - err = pcim_iomap_regions_request_all(pdev, BAR_MASK, DRIVER_NAME); + err = pcim_request_all_regions(pdev, DRIVER_NAME); if (err) return err; + if (!pcim_iomap(pdev, TH_PCI_CONFIG_BAR, 0)) + return -ENOMEM; + + if (!pcim_iomap(pdev, TH_PCI_STH_SW_BAR, 0)) + return -ENOMEM; + if (pdev->resource[TH_PCI_RTIT_BAR].start) { resource[TH_MMIO_RTIT] = pdev->resource[TH_PCI_RTIT_BAR]; r++; @@ -290,6 +295,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = { .driver_data = (kernel_ulong_t)&intel_th_2x, }, { + /* Meteor Lake-S */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7f26), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Meteor Lake-S CPU */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xae24), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { /* Raptor Lake-S */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26), .driver_data = (kernel_ulong_t)&intel_th_2x, @@ -300,6 +315,41 @@ static const struct pci_device_id intel_th_pci_id_table[] = { .driver_data = (kernel_ulong_t)&intel_th_2x, }, { + /* Granite Rapids */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0963), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Granite Rapids SOC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3256), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Sapphire Rapids SOC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3456), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Lunar Lake */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa824), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Arrow Lake */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7724), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Panther Lake-H */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe324), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Panther Lake-P/U */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe424), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { /* Alder Lake CPU */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f), .driver_data = (kernel_ulong_t)&intel_th_2x, diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c index 9ca8c4e045f8..428f595a28a0 100644 --- a/drivers/hwtracing/intel_th/sth.c +++ b/drivers/hwtracing/intel_th/sth.c @@ -70,8 +70,8 @@ static ssize_t notrace sth_stm_packet(struct stm_data *stm_data, struct sth_device *sth = container_of(stm_data, struct sth_device, stm); struct intel_th_channel __iomem *out = sth_channel(sth, master, channel); - u64 __iomem *outp = &out->Dn; unsigned long reg = REG_STH_TRIG; + u64 __iomem *outp; #ifndef CONFIG_64BIT if (size > 4) diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c index 4bf04a977840..3090479a2979 100644 --- a/drivers/hwtracing/ptt/hisi_ptt.c +++ b/drivers/hwtracing/ptt/hisi_ptt.c @@ -1221,6 +1221,7 @@ static int hisi_ptt_register_pmu(struct hisi_ptt *hisi_ptt) hisi_ptt->hisi_ptt_pmu = (struct pmu) { .module = THIS_MODULE, + .parent = &hisi_ptt->pdev->dev, .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_NO_EXCLUDE, .task_ctx_nr = perf_sw_context, .attr_groups = hisi_ptt_pmu_groups, diff --git a/drivers/hwtracing/stm/console.c b/drivers/hwtracing/stm/console.c index a00f65e21747..097a00ac43a7 100644 --- a/drivers/hwtracing/stm/console.c +++ b/drivers/hwtracing/stm/console.c @@ -22,6 +22,7 @@ static struct stm_console { .data = { .name = "console", .nr_chans = 1, + .type = STM_USER, .link = stm_console_link, .unlink = stm_console_unlink, }, diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c index 534fbefc7f6a..cdba4e875b28 100644 --- a/drivers/hwtracing/stm/core.c +++ b/drivers/hwtracing/stm/core.c @@ -600,7 +600,7 @@ EXPORT_SYMBOL_GPL(stm_data_write); static ssize_t notrace stm_write(struct stm_device *stm, struct stm_output *output, - unsigned int chan, const char *buf, size_t count) + unsigned int chan, const char *buf, size_t count, struct stm_source_data *source) { int err; @@ -608,7 +608,7 @@ stm_write(struct stm_device *stm, struct stm_output *output, if (!stm->pdrv) return -ENODEV; - err = stm->pdrv->write(stm->data, output, chan, buf, count); + err = stm->pdrv->write(stm->data, output, chan, buf, count, source); if (err < 0) return err; @@ -657,7 +657,7 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf, pm_runtime_get_sync(&stm->dev); - count = stm_write(stm, &stmf->output, 0, kbuf, count); + count = stm_write(stm, &stmf->output, 0, kbuf, count, NULL); pm_runtime_mark_last_busy(&stm->dev); pm_runtime_put_autosuspend(&stm->dev); @@ -839,7 +839,6 @@ static const struct file_operations stm_fops = { .mmap = stm_char_mmap, .unlocked_ioctl = stm_char_ioctl, .compat_ioctl = compat_ptr_ioctl, - .llseek = no_llseek, }; static void stm_device_release(struct device *dev) @@ -868,8 +867,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, return -ENOMEM; stm->major = register_chrdev(0, stm_data->name, &stm_fops); - if (stm->major < 0) - goto err_free; + if (stm->major < 0) { + err = stm->major; + vfree(stm); + return err; + } device_initialize(&stm->dev); stm->dev.devt = MKDEV(stm->major, 0); @@ -913,10 +915,8 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data, err_device: unregister_chrdev(stm->major, stm_data->name); - /* matches device_initialize() above */ + /* calls stm_device_release() */ put_device(&stm->dev); -err_free: - vfree(stm); return err; } @@ -1298,7 +1298,7 @@ int notrace stm_source_write(struct stm_source_data *data, stm = srcu_dereference(src->link, &stm_source_srcu); if (stm) - count = stm_write(stm, &src->output, chan, buf, count); + count = stm_write(stm, &src->output, chan, buf, count, data); else count = -ENODEV; diff --git a/drivers/hwtracing/stm/ftrace.c b/drivers/hwtracing/stm/ftrace.c index 3bb606dfa634..a7cea7ea0163 100644 --- a/drivers/hwtracing/stm/ftrace.c +++ b/drivers/hwtracing/stm/ftrace.c @@ -23,6 +23,7 @@ static struct stm_ftrace { .data = { .name = "ftrace", .nr_chans = STM_FTRACE_NR_CHANNELS, + .type = STM_FTRACE, .link = stm_ftrace_link, .unlink = stm_ftrace_unlink, }, diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c index 81d7b21d31ec..495eb1dc8ac5 100644 --- a/drivers/hwtracing/stm/heartbeat.c +++ b/drivers/hwtracing/stm/heartbeat.c @@ -78,12 +78,11 @@ static int stm_heartbeat_init(void) } stm_heartbeat[i].data.nr_chans = 1; + stm_heartbeat[i].data.type = STM_USER; stm_heartbeat[i].data.link = stm_heartbeat_link; stm_heartbeat[i].data.unlink = stm_heartbeat_unlink; - hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC, - HRTIMER_MODE_ABS); - stm_heartbeat[i].hrtimer.function = - stm_heartbeat_hrtimer_handler; + hrtimer_setup(&stm_heartbeat[i].hrtimer, stm_heartbeat_hrtimer_handler, + CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ret = stm_source_register_device(NULL, &stm_heartbeat[i].data); if (ret) diff --git a/drivers/hwtracing/stm/p_basic.c b/drivers/hwtracing/stm/p_basic.c index 8980a6a5fd6c..5525c975cc6f 100644 --- a/drivers/hwtracing/stm/p_basic.c +++ b/drivers/hwtracing/stm/p_basic.c @@ -10,7 +10,8 @@ #include "stm.h" static ssize_t basic_write(struct stm_data *data, struct stm_output *output, - unsigned int chan, const char *buf, size_t count) + unsigned int chan, const char *buf, size_t count, + struct stm_source_data *source) { unsigned int c = output->channel + chan; unsigned int m = output->master; diff --git a/drivers/hwtracing/stm/p_sys-t.c b/drivers/hwtracing/stm/p_sys-t.c index 8254971c02e7..1e75aa0025a3 100644 --- a/drivers/hwtracing/stm/p_sys-t.c +++ b/drivers/hwtracing/stm/p_sys-t.c @@ -20,6 +20,7 @@ enum sys_t_message_type { MIPI_SYST_TYPE_RAW = 6, MIPI_SYST_TYPE_SHORT64, MIPI_SYST_TYPE_CLOCK, + MIPI_SYST_TYPE_SBD, }; enum sys_t_message_severity { @@ -53,6 +54,19 @@ enum sys_t_message_string_subtype { MIPI_SYST_STRING_PRINTF_64 = 12, }; +/** + * enum sys_t_message_sbd_subtype - SyS-T SBD message subtypes + * @MIPI_SYST_SBD_ID32: SBD message with 32-bit message ID + * @MIPI_SYST_SBD_ID64: SBD message with 64-bit message ID + * + * Structured Binary Data messages can send information of arbitrary length, + * together with ID's that describe BLOB's content and layout. + */ +enum sys_t_message_sbd_subtype { + MIPI_SYST_SBD_ID32 = 0, + MIPI_SYST_SBD_ID64 = 1, +}; + #define MIPI_SYST_TYPE(t) ((u32)(MIPI_SYST_TYPE_ ## t)) #define MIPI_SYST_SEVERITY(s) ((u32)(MIPI_SYST_SEVERITY_ ## s) << 4) #define MIPI_SYST_OPT_LOC BIT(8) @@ -75,6 +89,20 @@ enum sys_t_message_string_subtype { #define CLOCK_SYNC_HEADER (MIPI_SYST_TYPES(CLOCK, TRANSPORT_SYNC) | \ MIPI_SYST_SEVERITY(MAX)) +/* + * SyS-T and ftrace headers are compatible to an extent that ftrace event ID + * and args can be treated as SyS-T SBD message with 64-bit ID and arguments + * BLOB right behind the header without modification. Bits [16:63] coming + * together with message ID from ftrace aren't used by SBD and must be zeroed. + * + * 0 15 16 23 24 31 32 39 40 63 + * ftrace: <event_id> <flags> <preempt> <-pid-> <----> <args> + * SBD: <------- msg_id ------------------------------> <BLOB> + */ +#define SBD_HEADER (MIPI_SYST_TYPES(SBD, ID64) | \ + MIPI_SYST_SEVERITY(INFO) | \ + MIPI_SYST_OPT_GUID) + struct sys_t_policy_node { uuid_t uuid; bool do_len; @@ -284,14 +312,67 @@ sys_t_clock_sync(struct stm_data *data, unsigned int m, unsigned int c) return sizeof(header) + sizeof(payload); } +static inline u32 sys_t_header(struct stm_source_data *source) +{ + if (source && source->type == STM_FTRACE) + return SBD_HEADER; + return DATA_HEADER; +} + +static ssize_t sys_t_write_data(struct stm_data *data, + struct stm_source_data *source, + unsigned int master, unsigned int channel, + bool ts_first, const void *buf, size_t count) +{ + ssize_t sz; + const unsigned char nil = 0; + + /* + * Ftrace is zero-copy compatible with SyS-T SBD, but requires + * special handling of first 64 bits. Trim and send them separately + * to avoid damage on original ftrace buffer. + */ + if (source && source->type == STM_FTRACE) { + u64 compat_ftrace_header; + ssize_t header_sz; + ssize_t buf_sz; + + if (count < sizeof(compat_ftrace_header)) + return -EINVAL; + + /* SBD only makes use of low 16 bits (event ID) from ftrace event */ + compat_ftrace_header = *(u64 *)buf & 0xffff; + header_sz = stm_data_write(data, master, channel, false, + &compat_ftrace_header, + sizeof(compat_ftrace_header)); + if (header_sz != sizeof(compat_ftrace_header)) + return header_sz; + + buf_sz = stm_data_write(data, master, channel, false, + buf + header_sz, count - header_sz); + if (buf_sz != count - header_sz) + return buf_sz; + sz = header_sz + buf_sz; + } else { + sz = stm_data_write(data, master, channel, false, buf, count); + } + + if (sz <= 0) + return sz; + + data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil); + + return sz; +} + static ssize_t sys_t_write(struct stm_data *data, struct stm_output *output, - unsigned int chan, const char *buf, size_t count) + unsigned int chan, const char *buf, size_t count, + struct stm_source_data *source) { struct sys_t_output *op = output->pdrv_private; unsigned int c = output->channel + chan; unsigned int m = output->master; - const unsigned char nil = 0; - u32 header = DATA_HEADER; + u32 header = sys_t_header(source); u8 uuid[UUID_SIZE]; ssize_t sz; @@ -348,11 +429,7 @@ static ssize_t sys_t_write(struct stm_data *data, struct stm_output *output, } /* DATA */ - sz = stm_data_write(data, m, c, false, buf, count); - if (sz > 0) - data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil); - - return sz; + return sys_t_write_data(data, source, m, c, false, buf, count); } static const struct stm_protocol_driver sys_t_pdrv = { diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h index a9be49fc7a6b..85dda6e0d10c 100644 --- a/drivers/hwtracing/stm/stm.h +++ b/drivers/hwtracing/stm/stm.h @@ -96,7 +96,7 @@ struct stm_protocol_driver { const char *name; ssize_t (*write)(struct stm_data *data, struct stm_output *output, unsigned int chan, - const char *buf, size_t count); + const char *buf, size_t count, struct stm_source_data *source); void (*policy_node_init)(void *arg); int (*output_open)(void *priv, struct stm_output *output); void (*output_close)(struct stm_output *output); |