summaryrefslogtreecommitdiff
path: root/drivers/platform/x86/intel/pmt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/platform/x86/intel/pmt')
-rw-r--r--drivers/platform/x86/intel/pmt/Kconfig68
-rw-r--r--drivers/platform/x86/intel/pmt/Makefile16
-rw-r--r--drivers/platform/x86/intel/pmt/class.c427
-rw-r--r--drivers/platform/x86/intel/pmt/class.h82
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c614
-rw-r--r--drivers/platform/x86/intel/pmt/discovery-kunit.c116
-rw-r--r--drivers/platform/x86/intel/pmt/discovery.c635
-rw-r--r--drivers/platform/x86/intel/pmt/features.c205
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c442
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.h126
10 files changed, 2731 insertions, 0 deletions
diff --git a/drivers/platform/x86/intel/pmt/Kconfig b/drivers/platform/x86/intel/pmt/Kconfig
new file mode 100644
index 000000000000..7363446b7773
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/Kconfig
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Intel Platform Monitoring Technology drivers
+#
+
+config INTEL_PMT_CLASS
+ tristate
+ help
+ The Intel Platform Monitoring Technology (PMT) class driver provides
+ the basic sysfs interface and file hierarchy used by PMT devices.
+
+ For more information, see:
+ <file:Documentation/ABI/testing/sysfs-class-intel_pmt>
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_pmt_class.
+
+config INTEL_PMT_TELEMETRY
+ tristate "Intel Platform Monitoring Technology (PMT) Telemetry driver"
+ depends on INTEL_VSEC
+ select INTEL_PMT_DISCOVERY
+ select INTEL_PMT_CLASS
+ help
+ The Intel Platform Monitory Technology (PMT) Telemetry driver provides
+ access to hardware telemetry metrics on devices that support the
+ feature.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_pmt_telemetry.
+
+config INTEL_PMT_CRASHLOG
+ tristate "Intel Platform Monitoring Technology (PMT) Crashlog driver"
+ depends on INTEL_VSEC
+ select INTEL_PMT_CLASS
+ help
+ The Intel Platform Monitoring Technology (PMT) crashlog driver provides
+ access to hardware crashlog capabilities on devices that support the
+ feature.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_pmt_crashlog.
+
+config INTEL_PMT_DISCOVERY
+ tristate "Intel Platform Monitoring Technology (PMT) Discovery driver"
+ depends on INTEL_VSEC
+ select INTEL_PMT_CLASS
+ help
+ The Intel Platform Monitoring Technology (PMT) discovery driver provides
+ access to details about the various PMT features and feature specific
+ attributes.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pmt_discovery.
+
+config INTEL_PMT_KUNIT_TEST
+ tristate "KUnit tests for Intel PMT driver"
+ depends on INTEL_PMT_DISCOVERY
+ depends on INTEL_PMT_TELEMETRY || !INTEL_PMT_TELEMETRY
+ depends on KUNIT
+ help
+ Enable this option to compile and run a suite of KUnit tests for the Intel
+ Platform Monitoring Technology (PMT) driver. These tests are designed to
+ validate the driver's functionality, error handling, and overall stability,
+ helping developers catch regressions and ensure code quality during changes.
+
+ This option is intended for development and testing environments. It is
+ recommended to disable it in production builds. To compile this driver as a
+ module, choose M here: the module will be called pmt-discovery-kunit.
diff --git a/drivers/platform/x86/intel/pmt/Makefile b/drivers/platform/x86/intel/pmt/Makefile
new file mode 100644
index 000000000000..47f692c091c9
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/intel/pmt
+# Intel Platform Monitoring Technology Drivers
+#
+
+obj-$(CONFIG_INTEL_PMT_CLASS) += pmt_class.o
+pmt_class-y := class.o
+obj-$(CONFIG_INTEL_PMT_TELEMETRY) += pmt_telemetry.o
+pmt_telemetry-y := telemetry.o
+obj-$(CONFIG_INTEL_PMT_CRASHLOG) += pmt_crashlog.o
+pmt_crashlog-y := crashlog.o
+obj-$(CONFIG_INTEL_PMT_DISCOVERY) += pmt_discovery.o
+pmt_discovery-y := discovery.o features.o
+obj-$(CONFIG_INTEL_PMT_KUNIT_TEST) += pmt-discovery-kunit.o
+pmt-discovery-kunit-y := discovery-kunit.o
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
new file mode 100644
index 000000000000..7c3023d5d91d
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Platform Monitory Technology Telemetry driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: "Alexander Duyck" <alexander.h.duyck@linux.intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/intel_vsec.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "class.h"
+
+#define PMT_XA_START 1
+#define PMT_XA_MAX INT_MAX
+#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
+#define GUID_SPR_PUNIT 0x9956f43f
+
+bool intel_pmt_is_early_client_hw(struct device *dev)
+{
+ struct intel_vsec_device *ivdev = dev_to_ivdev(dev);
+
+ /*
+ * Early implementations of PMT on client platforms have some
+ * differences from the server platforms (which use the Out Of Band
+ * Management Services Module OOBMSM).
+ */
+ return !!(ivdev->quirks & VSEC_QUIRK_EARLY_HW);
+}
+EXPORT_SYMBOL_NS_GPL(intel_pmt_is_early_client_hw, "INTEL_PMT");
+
+static inline int
+pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count)
+{
+ int i, remain;
+ u64 *buf = to;
+
+ if (!IS_ALIGNED((unsigned long)from, 8))
+ return -EFAULT;
+
+ for (i = 0; i < count/8; i++)
+ buf[i] = readq(&from[i]);
+
+ /* Copy any remaining bytes */
+ remain = count % 8;
+ if (remain) {
+ u64 tmp = readq(&from[i]);
+
+ memcpy(&buf[i], &tmp, remain);
+ }
+
+ return count;
+}
+
+int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf,
+ void __iomem *addr, loff_t off, u32 count)
+{
+ if (cb && cb->read_telem)
+ return cb->read_telem(pdev, guid, buf, off, count);
+
+ addr += off;
+
+ if (guid == GUID_SPR_PUNIT)
+ /* PUNIT on SPR only supports aligned 64-bit read */
+ return pmt_memcpy64_fromio(buf, addr, count);
+
+ memcpy_fromio(buf, addr, count);
+
+ return count;
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_read_mmio, "INTEL_PMT");
+
+/*
+ * sysfs
+ */
+static ssize_t
+intel_pmt_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct intel_pmt_entry *entry = container_of(attr,
+ struct intel_pmt_entry,
+ pmt_bin_attr);
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off >= entry->size)
+ return 0;
+
+ if (count > entry->size - off)
+ count = entry->size - off;
+
+ count = pmt_telem_read_mmio(entry->pcidev, entry->cb, entry->header.guid, buf,
+ entry->base, off, count);
+
+ return count;
+}
+
+static int
+intel_pmt_mmap(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *attr, struct vm_area_struct *vma)
+{
+ struct intel_pmt_entry *entry = container_of(attr,
+ struct intel_pmt_entry,
+ pmt_bin_attr);
+ unsigned long vsize = vma->vm_end - vma->vm_start;
+ struct device *dev = kobj_to_dev(kobj);
+ unsigned long phys = entry->base_addr;
+ unsigned long pfn = PFN_DOWN(phys);
+ unsigned long psize;
+
+ if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+ return -EROFS;
+
+ psize = (PFN_UP(entry->base_addr + entry->size) - pfn) * PAGE_SIZE;
+ if (vsize > psize) {
+ dev_err(dev, "Requested mmap size is too large\n");
+ return -EINVAL;
+ }
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (io_remap_pfn_range(vma, vma->vm_start, pfn,
+ vsize, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static ssize_t
+guid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct intel_pmt_entry *entry = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%x\n", entry->guid);
+}
+static DEVICE_ATTR_RO(guid);
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct intel_pmt_entry *entry = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%zu\n", entry->size);
+}
+static DEVICE_ATTR_RO(size);
+
+static ssize_t
+offset_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct intel_pmt_entry *entry = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lu\n", offset_in_page(entry->base_addr));
+}
+static DEVICE_ATTR_RO(offset);
+
+static struct attribute *intel_pmt_attrs[] = {
+ &dev_attr_guid.attr,
+ &dev_attr_size.attr,
+ &dev_attr_offset.attr,
+ NULL
+};
+
+static umode_t intel_pmt_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev->parent);
+ struct intel_vsec_device *ivdev = auxdev_to_ivdev(auxdev);
+
+ /*
+ * Place the discovery features folder in /sys/class/intel_pmt, but
+ * exclude the common attributes as they are not applicable.
+ */
+ if (ivdev->cap_id == ilog2(VSEC_CAP_DISCOVERY))
+ return 0;
+
+ return attr->mode;
+}
+
+static bool intel_pmt_group_visible(struct kobject *kobj)
+{
+ return true;
+}
+DEFINE_SYSFS_GROUP_VISIBLE(intel_pmt);
+
+static const struct attribute_group intel_pmt_group = {
+ .attrs = intel_pmt_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(intel_pmt),
+};
+__ATTRIBUTE_GROUPS(intel_pmt);
+
+struct class intel_pmt_class = {
+ .name = "intel_pmt",
+ .dev_groups = intel_pmt_groups,
+};
+EXPORT_SYMBOL_GPL(intel_pmt_class);
+
+static int intel_pmt_populate_entry(struct intel_pmt_entry *entry,
+ struct intel_vsec_device *ivdev,
+ struct resource *disc_res)
+{
+ struct pci_dev *pci_dev = ivdev->pcidev;
+ struct device *dev = &ivdev->auxdev.dev;
+ struct intel_pmt_header *header = &entry->header;
+ u8 bir;
+
+ /*
+ * The base offset should always be 8 byte aligned.
+ *
+ * For non-local access types the lower 3 bits of base offset
+ * contains the index of the base address register where the
+ * telemetry can be found.
+ */
+ bir = GET_BIR(header->base_offset);
+
+ /* Local access and BARID only for now */
+ switch (header->access_type) {
+ case ACCESS_LOCAL:
+ if (bir) {
+ dev_err(dev,
+ "Unsupported BAR index %d for access type %d\n",
+ bir, header->access_type);
+ return -EINVAL;
+ }
+ /*
+ * For access_type LOCAL, the base address is as follows:
+ * base address = end of discovery region + base offset
+ */
+ entry->base_addr = disc_res->end + 1 + header->base_offset;
+
+ /*
+ * Some hardware use a different calculation for the base address
+ * when access_type == ACCESS_LOCAL. On the these systems
+ * ACCESS_LOCAL refers to an address in the same BAR as the
+ * header but at a fixed offset. But as the header address was
+ * supplied to the driver, we don't know which BAR it was in.
+ * So search for the bar whose range includes the header address.
+ */
+ if (intel_pmt_is_early_client_hw(dev)) {
+ int i;
+
+ entry->base_addr = 0;
+ for (i = 0; i < 6; i++)
+ if (disc_res->start >= pci_resource_start(pci_dev, i) &&
+ (disc_res->start <= pci_resource_end(pci_dev, i))) {
+ entry->base_addr = pci_resource_start(pci_dev, i) +
+ header->base_offset;
+ break;
+ }
+ if (!entry->base_addr)
+ return -EINVAL;
+ }
+
+ break;
+ case ACCESS_BARID:
+ /* Use the provided base address if it exists */
+ if (ivdev->base_addr) {
+ entry->base_addr = ivdev->base_addr +
+ GET_ADDRESS(header->base_offset);
+ break;
+ }
+
+ /*
+ * If another BAR was specified then the base offset
+ * represents the offset within that BAR. SO retrieve the
+ * address from the parent PCI device and add offset.
+ */
+ entry->base_addr = pci_resource_start(pci_dev, bir) +
+ GET_ADDRESS(header->base_offset);
+ break;
+ default:
+ dev_err(dev, "Unsupported access type %d\n",
+ header->access_type);
+ return -EINVAL;
+ }
+
+ entry->pcidev = pci_dev;
+ entry->guid = header->guid;
+ entry->size = header->size;
+ entry->cb = ivdev->priv_data;
+
+ return 0;
+}
+
+static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
+ struct intel_pmt_namespace *ns,
+ struct device *parent)
+{
+ struct intel_vsec_device *ivdev = dev_to_ivdev(parent);
+ struct resource res = {0};
+ struct device *dev;
+ int ret;
+
+ ret = xa_alloc(ns->xa, &entry->devid, entry, PMT_XA_LIMIT, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ dev = device_create(&intel_pmt_class, parent, MKDEV(0, 0), entry,
+ "%s%d", ns->name, entry->devid);
+
+ if (IS_ERR(dev)) {
+ dev_err(parent, "Could not create %s%d device node\n",
+ ns->name, entry->devid);
+ ret = PTR_ERR(dev);
+ goto fail_dev_create;
+ }
+
+ entry->kobj = &dev->kobj;
+
+ if (entry->attr_grp) {
+ ret = sysfs_create_group(entry->kobj, entry->attr_grp);
+ if (ret)
+ goto fail_sysfs_create_group;
+ }
+
+ /* if size is 0 assume no data buffer, so no file needed */
+ if (!entry->size)
+ return 0;
+
+ res.start = entry->base_addr;
+ res.end = res.start + entry->size - 1;
+ res.flags = IORESOURCE_MEM;
+
+ entry->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(entry->base)) {
+ ret = PTR_ERR(entry->base);
+ goto fail_ioremap;
+ }
+
+ sysfs_bin_attr_init(&entry->pmt_bin_attr);
+ entry->pmt_bin_attr.attr.name = ns->name;
+ entry->pmt_bin_attr.attr.mode = 0440;
+ entry->pmt_bin_attr.mmap = intel_pmt_mmap;
+ entry->pmt_bin_attr.read = intel_pmt_read;
+ entry->pmt_bin_attr.size = entry->size;
+
+ ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr);
+ if (ret)
+ goto fail_ioremap;
+
+ if (ns->pmt_add_endpoint) {
+ ret = ns->pmt_add_endpoint(ivdev, entry);
+ if (ret)
+ goto fail_add_endpoint;
+ }
+
+ return 0;
+
+fail_add_endpoint:
+ sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr);
+fail_ioremap:
+ if (entry->attr_grp)
+ sysfs_remove_group(entry->kobj, entry->attr_grp);
+fail_sysfs_create_group:
+ device_unregister(dev);
+fail_dev_create:
+ xa_erase(ns->xa, entry->devid);
+
+ return ret;
+}
+
+int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns,
+ struct intel_vsec_device *intel_vsec_dev, int idx)
+{
+ struct device *dev = &intel_vsec_dev->auxdev.dev;
+ struct resource *disc_res;
+ int ret;
+
+ disc_res = &intel_vsec_dev->resource[idx];
+
+ entry->disc_table = devm_ioremap_resource(dev, disc_res);
+ if (IS_ERR(entry->disc_table))
+ return PTR_ERR(entry->disc_table);
+
+ ret = ns->pmt_header_decode(entry, dev);
+ if (ret)
+ return ret;
+
+ ret = intel_pmt_populate_entry(entry, intel_vsec_dev, disc_res);
+ if (ret)
+ return ret;
+
+ return intel_pmt_dev_register(entry, ns, dev);
+}
+EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_create, "INTEL_PMT");
+
+void intel_pmt_dev_destroy(struct intel_pmt_entry *entry,
+ struct intel_pmt_namespace *ns)
+{
+ struct device *dev = kobj_to_dev(entry->kobj);
+
+ if (entry->size)
+ sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr);
+
+ if (entry->attr_grp)
+ sysfs_remove_group(entry->kobj, entry->attr_grp);
+
+ device_unregister(dev);
+ xa_erase(ns->xa, entry->devid);
+}
+EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_destroy, "INTEL_PMT");
+
+static int __init pmt_class_init(void)
+{
+ return class_register(&intel_pmt_class);
+}
+
+static void __exit pmt_class_exit(void)
+{
+ class_unregister(&intel_pmt_class);
+}
+
+module_init(pmt_class_init);
+module_exit(pmt_class_exit);
+
+MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>");
+MODULE_DESCRIPTION("Intel PMT Class driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h
new file mode 100644
index 000000000000..3c5ad5f52bca
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/class.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _INTEL_PMT_CLASS_H
+#define _INTEL_PMT_CLASS_H
+
+#include <linux/intel_vsec.h>
+#include <linux/xarray.h>
+#include <linux/types.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include "telemetry.h"
+
+/* PMT access types */
+#define ACCESS_BARID 2
+#define ACCESS_LOCAL 3
+
+/* PMT discovery base address/offset register layout */
+#define GET_BIR(v) ((v) & GENMASK(2, 0))
+#define GET_ADDRESS(v) ((v) & GENMASK(31, 3))
+
+struct pci_dev;
+extern struct class intel_pmt_class;
+
+struct telem_endpoint {
+ struct pci_dev *pcidev;
+ struct telem_header header;
+ struct pmt_callbacks *cb;
+ void __iomem *base;
+ bool present;
+ struct kref kref;
+};
+
+struct intel_pmt_header {
+ u32 base_offset;
+ u32 size;
+ u32 guid;
+ u8 access_type;
+};
+
+struct intel_pmt_entry {
+ struct telem_endpoint *ep;
+ struct pci_dev *pcidev;
+ struct intel_pmt_header header;
+ struct bin_attribute pmt_bin_attr;
+ const struct attribute_group *attr_grp;
+ struct kobject *kobj;
+ void __iomem *disc_table;
+ void __iomem *base;
+ struct pmt_callbacks *cb;
+ unsigned long base_addr;
+ size_t size;
+ u64 feature_flags;
+ u32 guid;
+ u32 num_rmids; /* Number of Resource Monitoring IDs */
+ int devid;
+};
+
+struct intel_pmt_namespace {
+ const char *name;
+ struct xarray *xa;
+ int (*pmt_header_decode)(struct intel_pmt_entry *entry,
+ struct device *dev);
+ int (*pmt_add_endpoint)(struct intel_vsec_device *ivdev,
+ struct intel_pmt_entry *entry);
+};
+
+int pmt_telem_read_mmio(struct pci_dev *pdev, struct pmt_callbacks *cb, u32 guid, void *buf,
+ void __iomem *addr, loff_t off, u32 count);
+bool intel_pmt_is_early_client_hw(struct device *dev);
+int intel_pmt_dev_create(struct intel_pmt_entry *entry,
+ struct intel_pmt_namespace *ns,
+ struct intel_vsec_device *dev, int idx);
+void intel_pmt_dev_destroy(struct intel_pmt_entry *entry,
+ struct intel_pmt_namespace *ns);
+#if IS_ENABLED(CONFIG_INTEL_PMT_DISCOVERY)
+void intel_pmt_get_features(struct intel_pmt_entry *entry);
+#else
+static inline void intel_pmt_get_features(struct intel_pmt_entry *entry) {}
+#endif
+
+#endif
diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c
new file mode 100644
index 000000000000..b0393c9c5b4b
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/crashlog.c
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Platform Monitoring Technology Crashlog driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: "Alexander Duyck" <alexander.h.duyck@linux.intel.com>
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/intel_vsec.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/overflow.h>
+
+#include "class.h"
+
+/* Crashlog discovery header types */
+#define CRASH_TYPE_OOBMSM 1
+
+/* Crashlog Discovery Header */
+#define CONTROL_OFFSET 0x0
+#define GUID_OFFSET 0x4
+#define BASE_OFFSET 0x8
+#define SIZE_OFFSET 0xC
+#define GET_ACCESS(v) ((v) & GENMASK(3, 0))
+#define GET_TYPE(v) (((v) & GENMASK(7, 4)) >> 4)
+#define GET_VERSION(v) (((v) & GENMASK(19, 16)) >> 16)
+/* size is in bytes */
+#define GET_SIZE(v) ((v) * sizeof(u32))
+
+/*
+ * Type 1 Version 0
+ * status and control registers are combined.
+ *
+ * Bits 29 and 30 control the state of bit 31.
+ * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured.
+ * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31.
+ * Bit 31 is the read-only status with a 1 indicating log is complete.
+ */
+#define TYPE1_VER0_STATUS_OFFSET 0x00
+#define TYPE1_VER0_CONTROL_OFFSET 0x00
+
+#define TYPE1_VER0_DISABLE BIT(28)
+#define TYPE1_VER0_CLEAR BIT(29)
+#define TYPE1_VER0_EXECUTE BIT(30)
+#define TYPE1_VER0_COMPLETE BIT(31)
+#define TYPE1_VER0_TRIGGER_MASK GENMASK(31, 28)
+
+/*
+ * Type 1 Version 2
+ * status and control are different registers
+ */
+#define TYPE1_VER2_STATUS_OFFSET 0x00
+#define TYPE1_VER2_CONTROL_OFFSET 0x14
+
+/* status register */
+#define TYPE1_VER2_CLEAR_SUPPORT BIT(20)
+#define TYPE1_VER2_REARMED BIT(25)
+#define TYPE1_VER2_ERROR BIT(26)
+#define TYPE1_VER2_CONSUMED BIT(27)
+#define TYPE1_VER2_DISABLED BIT(28)
+#define TYPE1_VER2_CLEARED BIT(29)
+#define TYPE1_VER2_IN_PROGRESS BIT(30)
+#define TYPE1_VER2_COMPLETE BIT(31)
+
+/* control register */
+#define TYPE1_VER2_CONSUME BIT(25)
+#define TYPE1_VER2_REARM BIT(28)
+#define TYPE1_VER2_EXECUTE BIT(29)
+#define TYPE1_VER2_CLEAR BIT(30)
+#define TYPE1_VER2_DISABLE BIT(31)
+#define TYPE1_VER2_TRIGGER_MASK \
+ (TYPE1_VER2_EXECUTE | TYPE1_VER2_CLEAR | TYPE1_VER2_DISABLE)
+
+/* After offset, order alphabetically, not bit ordered */
+struct crashlog_status {
+ u32 offset;
+ u32 clear_supported;
+ u32 cleared;
+ u32 complete;
+ u32 consumed;
+ u32 disabled;
+ u32 error;
+ u32 in_progress;
+ u32 rearmed;
+};
+
+struct crashlog_control {
+ u32 offset;
+ u32 trigger_mask;
+ u32 clear;
+ u32 consume;
+ u32 disable;
+ u32 manual;
+ u32 rearm;
+};
+
+struct crashlog_info {
+ const struct crashlog_status status;
+ const struct crashlog_control control;
+ const struct attribute_group *attr_grp;
+};
+
+struct crashlog_entry {
+ /* entry must be first member of struct */
+ struct intel_pmt_entry entry;
+ struct mutex control_mutex;
+ const struct crashlog_info *info;
+};
+
+struct pmt_crashlog_priv {
+ int num_entries;
+ struct crashlog_entry entry[];
+};
+
+/*
+ * I/O
+ */
+
+/* Read, modify, write the control register, setting or clearing @bit based on @set */
+static void pmt_crashlog_rmw(struct crashlog_entry *crashlog, u32 bit, bool set)
+{
+ const struct crashlog_control *control = &crashlog->info->control;
+ struct intel_pmt_entry *entry = &crashlog->entry;
+ u32 reg = readl(entry->disc_table + control->offset);
+
+ reg &= ~control->trigger_mask;
+
+ if (set)
+ reg |= bit;
+ else
+ reg &= ~bit;
+
+ writel(reg, entry->disc_table + control->offset);
+}
+
+/* Read the status register and see if the specified @bit is set */
+static bool pmt_crashlog_rc(struct crashlog_entry *crashlog, u32 bit)
+{
+ const struct crashlog_status *status = &crashlog->info->status;
+ u32 reg = readl(crashlog->entry.disc_table + status->offset);
+
+ return !!(reg & bit);
+}
+
+static bool pmt_crashlog_complete(struct crashlog_entry *crashlog)
+{
+ /* return current value of the crashlog complete flag */
+ return pmt_crashlog_rc(crashlog, crashlog->info->status.complete);
+}
+
+static bool pmt_crashlog_disabled(struct crashlog_entry *crashlog)
+{
+ /* return current value of the crashlog disabled flag */
+ return pmt_crashlog_rc(crashlog, crashlog->info->status.disabled);
+}
+
+static bool pmt_crashlog_supported(struct intel_pmt_entry *entry, u32 *crash_type, u32 *version)
+{
+ u32 discovery_header = readl(entry->disc_table + CONTROL_OFFSET);
+
+ *crash_type = GET_TYPE(discovery_header);
+ *version = GET_VERSION(discovery_header);
+
+ /*
+ * Currently we only recognize OOBMSM (type 1) and version 0 or 2
+ * devices.
+ *
+ * Ignore all other crashlog devices in the system.
+ */
+ if (*crash_type == CRASH_TYPE_OOBMSM && (*version == 0 || *version == 2))
+ return true;
+
+ return false;
+}
+
+static void pmt_crashlog_set_disable(struct crashlog_entry *crashlog,
+ bool disable)
+{
+ pmt_crashlog_rmw(crashlog, crashlog->info->control.disable, disable);
+}
+
+static void pmt_crashlog_set_clear(struct crashlog_entry *crashlog)
+{
+ pmt_crashlog_rmw(crashlog, crashlog->info->control.clear, true);
+}
+
+static void pmt_crashlog_set_execute(struct crashlog_entry *crashlog)
+{
+ pmt_crashlog_rmw(crashlog, crashlog->info->control.manual, true);
+}
+
+static bool pmt_crashlog_cleared(struct crashlog_entry *crashlog)
+{
+ return pmt_crashlog_rc(crashlog, crashlog->info->status.cleared);
+}
+
+static bool pmt_crashlog_consumed(struct crashlog_entry *crashlog)
+{
+ return pmt_crashlog_rc(crashlog, crashlog->info->status.consumed);
+}
+
+static void pmt_crashlog_set_consumed(struct crashlog_entry *crashlog)
+{
+ pmt_crashlog_rmw(crashlog, crashlog->info->control.consume, true);
+}
+
+static bool pmt_crashlog_error(struct crashlog_entry *crashlog)
+{
+ return pmt_crashlog_rc(crashlog, crashlog->info->status.error);
+}
+
+static bool pmt_crashlog_rearm(struct crashlog_entry *crashlog)
+{
+ return pmt_crashlog_rc(crashlog, crashlog->info->status.rearmed);
+}
+
+static void pmt_crashlog_set_rearm(struct crashlog_entry *crashlog)
+{
+ pmt_crashlog_rmw(crashlog, crashlog->info->control.rearm, true);
+}
+
+/*
+ * sysfs
+ */
+static ssize_t
+clear_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct crashlog_entry *crashlog = dev_get_drvdata(dev);
+ bool cleared = pmt_crashlog_cleared(crashlog);
+
+ return sysfs_emit(buf, "%d\n", cleared);
+}
+
+static ssize_t
+clear_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct crashlog_entry *crashlog;
+ bool clear;
+ int result;
+
+ crashlog = dev_get_drvdata(dev);
+
+ result = kstrtobool(buf, &clear);
+ if (result)
+ return result;
+
+ /* set bit only */
+ if (!clear)
+ return -EINVAL;
+
+ guard(mutex)(&crashlog->control_mutex);
+
+ pmt_crashlog_set_clear(crashlog);
+
+ return count;
+}
+static DEVICE_ATTR_RW(clear);
+
+static ssize_t
+consumed_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct crashlog_entry *crashlog = dev_get_drvdata(dev);
+ bool consumed = pmt_crashlog_consumed(crashlog);
+
+ return sysfs_emit(buf, "%d\n", consumed);
+}
+
+static ssize_t
+consumed_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct crashlog_entry *crashlog;
+ bool consumed;
+ int result;
+
+ crashlog = dev_get_drvdata(dev);
+
+ result = kstrtobool(buf, &consumed);
+ if (result)
+ return result;
+
+ /* set bit only */
+ if (!consumed)
+ return -EINVAL;
+
+ guard(mutex)(&crashlog->control_mutex);
+
+ if (pmt_crashlog_disabled(crashlog))
+ return -EBUSY;
+
+ if (!pmt_crashlog_complete(crashlog))
+ return -EEXIST;
+
+ pmt_crashlog_set_consumed(crashlog);
+
+ return count;
+}
+static DEVICE_ATTR_RW(consumed);
+
+static ssize_t
+enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct crashlog_entry *crashlog = dev_get_drvdata(dev);
+ bool enabled = !pmt_crashlog_disabled(crashlog);
+
+ return sprintf(buf, "%d\n", enabled);
+}
+
+static ssize_t
+enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct crashlog_entry *crashlog;
+ bool enabled;
+ int result;
+
+ crashlog = dev_get_drvdata(dev);
+
+ result = kstrtobool(buf, &enabled);
+ if (result)
+ return result;
+
+ guard(mutex)(&crashlog->control_mutex);
+
+ pmt_crashlog_set_disable(crashlog, !enabled);
+
+ return count;
+}
+static DEVICE_ATTR_RW(enable);
+
+static ssize_t
+error_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct crashlog_entry *crashlog = dev_get_drvdata(dev);
+ bool error = pmt_crashlog_error(crashlog);
+
+ return sysfs_emit(buf, "%d\n", error);
+}
+static DEVICE_ATTR_RO(error);
+
+static ssize_t
+rearm_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct crashlog_entry *crashlog = dev_get_drvdata(dev);
+ int rearmed = pmt_crashlog_rearm(crashlog);
+
+ return sysfs_emit(buf, "%d\n", rearmed);
+}
+
+static ssize_t
+rearm_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct crashlog_entry *crashlog;
+ bool rearm;
+ int result;
+
+ crashlog = dev_get_drvdata(dev);
+
+ result = kstrtobool(buf, &rearm);
+ if (result)
+ return result;
+
+ /* set only */
+ if (!rearm)
+ return -EINVAL;
+
+ guard(mutex)(&crashlog->control_mutex);
+
+ pmt_crashlog_set_rearm(crashlog);
+
+ return count;
+}
+static DEVICE_ATTR_RW(rearm);
+
+static ssize_t
+trigger_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct crashlog_entry *crashlog;
+ bool trigger;
+
+ crashlog = dev_get_drvdata(dev);
+ trigger = pmt_crashlog_complete(crashlog);
+
+ return sprintf(buf, "%d\n", trigger);
+}
+
+static ssize_t
+trigger_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct crashlog_entry *crashlog;
+ bool trigger;
+ int result;
+
+ crashlog = dev_get_drvdata(dev);
+
+ result = kstrtobool(buf, &trigger);
+ if (result)
+ return result;
+
+ guard(mutex)(&crashlog->control_mutex);
+
+ /* if device is currently disabled, return busy */
+ if (pmt_crashlog_disabled(crashlog))
+ return -EBUSY;
+
+ if (!trigger) {
+ pmt_crashlog_set_clear(crashlog);
+ return count;
+ }
+
+ /* we cannot trigger a new crash if one is still pending */
+ if (pmt_crashlog_complete(crashlog))
+ return -EEXIST;
+
+ pmt_crashlog_set_execute(crashlog);
+
+ return count;
+}
+static DEVICE_ATTR_RW(trigger);
+
+static struct attribute *pmt_crashlog_type1_ver0_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_trigger.attr,
+ NULL
+};
+
+static struct attribute *pmt_crashlog_type1_ver2_attrs[] = {
+ &dev_attr_clear.attr,
+ &dev_attr_consumed.attr,
+ &dev_attr_enable.attr,
+ &dev_attr_error.attr,
+ &dev_attr_rearm.attr,
+ &dev_attr_trigger.attr,
+ NULL
+};
+
+static const struct attribute_group pmt_crashlog_type1_ver0_group = {
+ .attrs = pmt_crashlog_type1_ver0_attrs,
+};
+
+static const struct attribute_group pmt_crashlog_type1_ver2_group = {
+ .attrs = pmt_crashlog_type1_ver2_attrs,
+};
+
+static const struct crashlog_info crashlog_type1_ver0 = {
+ .status.offset = TYPE1_VER0_STATUS_OFFSET,
+ .status.cleared = TYPE1_VER0_CLEAR,
+ .status.complete = TYPE1_VER0_COMPLETE,
+ .status.disabled = TYPE1_VER0_DISABLE,
+
+ .control.offset = TYPE1_VER0_CONTROL_OFFSET,
+ .control.trigger_mask = TYPE1_VER0_TRIGGER_MASK,
+ .control.clear = TYPE1_VER0_CLEAR,
+ .control.disable = TYPE1_VER0_DISABLE,
+ .control.manual = TYPE1_VER0_EXECUTE,
+ .attr_grp = &pmt_crashlog_type1_ver0_group,
+};
+
+static const struct crashlog_info crashlog_type1_ver2 = {
+ .status.offset = TYPE1_VER2_STATUS_OFFSET,
+ .status.clear_supported = TYPE1_VER2_CLEAR_SUPPORT,
+ .status.cleared = TYPE1_VER2_CLEARED,
+ .status.complete = TYPE1_VER2_COMPLETE,
+ .status.consumed = TYPE1_VER2_CONSUMED,
+ .status.disabled = TYPE1_VER2_DISABLED,
+ .status.error = TYPE1_VER2_ERROR,
+ .status.in_progress = TYPE1_VER2_IN_PROGRESS,
+ .status.rearmed = TYPE1_VER2_REARMED,
+
+ .control.offset = TYPE1_VER2_CONTROL_OFFSET,
+ .control.trigger_mask = TYPE1_VER2_TRIGGER_MASK,
+ .control.clear = TYPE1_VER2_CLEAR,
+ .control.consume = TYPE1_VER2_CONSUME,
+ .control.disable = TYPE1_VER2_DISABLE,
+ .control.manual = TYPE1_VER2_EXECUTE,
+ .control.rearm = TYPE1_VER2_REARM,
+ .attr_grp = &pmt_crashlog_type1_ver2_group,
+};
+
+static const struct crashlog_info *select_crashlog_info(u32 type, u32 version)
+{
+ if (version == 0)
+ return &crashlog_type1_ver0;
+
+ return &crashlog_type1_ver2;
+}
+
+static int pmt_crashlog_header_decode(struct intel_pmt_entry *entry,
+ struct device *dev)
+{
+ void __iomem *disc_table = entry->disc_table;
+ struct intel_pmt_header *header = &entry->header;
+ struct crashlog_entry *crashlog;
+ u32 version;
+ u32 type;
+
+ if (!pmt_crashlog_supported(entry, &type, &version))
+ return 1;
+
+ /* initialize the crashlog struct */
+ crashlog = container_of(entry, struct crashlog_entry, entry);
+ mutex_init(&crashlog->control_mutex);
+
+ crashlog->info = select_crashlog_info(type, version);
+
+ header->access_type = GET_ACCESS(readl(disc_table));
+ header->guid = readl(disc_table + GUID_OFFSET);
+ header->base_offset = readl(disc_table + BASE_OFFSET);
+
+ /* Size is measured in DWORDS, but accessor returns bytes */
+ header->size = GET_SIZE(readl(disc_table + SIZE_OFFSET));
+
+ entry->attr_grp = crashlog->info->attr_grp;
+
+ return 0;
+}
+
+static DEFINE_XARRAY_ALLOC(crashlog_array);
+static struct intel_pmt_namespace pmt_crashlog_ns = {
+ .name = "crashlog",
+ .xa = &crashlog_array,
+ .pmt_header_decode = pmt_crashlog_header_decode,
+};
+
+/*
+ * initialization
+ */
+static void pmt_crashlog_remove(struct auxiliary_device *auxdev)
+{
+ struct pmt_crashlog_priv *priv = auxiliary_get_drvdata(auxdev);
+ int i;
+
+ for (i = 0; i < priv->num_entries; i++) {
+ struct crashlog_entry *crashlog = &priv->entry[i];
+
+ intel_pmt_dev_destroy(&crashlog->entry, &pmt_crashlog_ns);
+ mutex_destroy(&crashlog->control_mutex);
+ }
+}
+
+static int pmt_crashlog_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ struct intel_vsec_device *intel_vsec_dev = auxdev_to_ivdev(auxdev);
+ struct pmt_crashlog_priv *priv;
+ size_t size;
+ int i, ret;
+
+ size = struct_size(priv, entry, intel_vsec_dev->num_resources);
+ priv = devm_kzalloc(&auxdev->dev, size, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ auxiliary_set_drvdata(auxdev, priv);
+
+ for (i = 0; i < intel_vsec_dev->num_resources; i++) {
+ struct intel_pmt_entry *entry = &priv->entry[priv->num_entries].entry;
+
+ ret = intel_pmt_dev_create(entry, &pmt_crashlog_ns, intel_vsec_dev, i);
+ if (ret < 0)
+ goto abort_probe;
+ if (ret)
+ continue;
+
+ priv->num_entries++;
+ }
+
+ return 0;
+abort_probe:
+ pmt_crashlog_remove(auxdev);
+ return ret;
+}
+
+static const struct auxiliary_device_id pmt_crashlog_id_table[] = {
+ { .name = "intel_vsec.crashlog" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pmt_crashlog_id_table);
+
+static struct auxiliary_driver pmt_crashlog_aux_driver = {
+ .id_table = pmt_crashlog_id_table,
+ .remove = pmt_crashlog_remove,
+ .probe = pmt_crashlog_probe,
+};
+
+static int __init pmt_crashlog_init(void)
+{
+ return auxiliary_driver_register(&pmt_crashlog_aux_driver);
+}
+
+static void __exit pmt_crashlog_exit(void)
+{
+ auxiliary_driver_unregister(&pmt_crashlog_aux_driver);
+ xa_destroy(&crashlog_array);
+}
+
+module_init(pmt_crashlog_init);
+module_exit(pmt_crashlog_exit);
+
+MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>");
+MODULE_DESCRIPTION("Intel PMT Crashlog driver");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("INTEL_PMT");
diff --git a/drivers/platform/x86/intel/pmt/discovery-kunit.c b/drivers/platform/x86/intel/pmt/discovery-kunit.c
new file mode 100644
index 000000000000..f44eb41d58f6
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/discovery-kunit.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Platform Monitory Technology Discovery KUNIT tests
+ *
+ * Copyright (c) 2025, Intel Corporation.
+ * All Rights Reserved.
+ */
+
+#include <kunit/test.h>
+#include <linux/err.h>
+#include <linux/intel_pmt_features.h>
+#include <linux/intel_vsec.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define PMT_FEATURE_COUNT (FEATURE_MAX + 1)
+
+static void
+validate_pmt_regions(struct kunit *test, struct pmt_feature_group *feature_group, int feature_id)
+{
+ int i;
+
+ kunit_info(test, "Feature ID %d [%s] has %d regions.\n", feature_id,
+ pmt_feature_names[feature_id], feature_group->count);
+
+ for (i = 0; i < feature_group->count; i++) {
+ struct telemetry_region *region = &feature_group->regions[i];
+
+ kunit_info(test, " - Region %d: cdie_mask=%u, package_id=%u, partition=%u, segment=%u,",
+ i, region->plat_info.cdie_mask, region->plat_info.package_id,
+ region->plat_info.partition, region->plat_info.segment);
+ kunit_info(test, "\t\tbus=%u, device=%u, function=%u, guid=0x%x,",
+ region->plat_info.bus_number, region->plat_info.device_number,
+ region->plat_info.function_number, region->guid);
+ kunit_info(test, "\t\taddr=%p, size=%zu, num_rmids=%u", region->addr, region->size,
+ region->num_rmids);
+
+
+ KUNIT_ASSERT_GE(test, region->plat_info.cdie_mask, 0);
+ KUNIT_ASSERT_GE(test, region->plat_info.package_id, 0);
+ KUNIT_ASSERT_GE(test, region->plat_info.partition, 0);
+ KUNIT_ASSERT_GE(test, region->plat_info.segment, 0);
+ KUNIT_ASSERT_GE(test, region->plat_info.bus_number, 0);
+ KUNIT_ASSERT_GE(test, region->plat_info.device_number, 0);
+ KUNIT_ASSERT_GE(test, region->plat_info.function_number, 0);
+
+ KUNIT_ASSERT_NE(test, region->guid, 0);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, (__force const void *)region->addr);
+ }
+}
+
+static void linebreak(struct kunit *test)
+{
+ kunit_info(test, "*****************************************************************************\n");
+}
+
+static void test_intel_pmt_get_regions_by_feature(struct kunit *test)
+{
+ struct pmt_feature_group *feature_group;
+ int num_available = 0;
+ int feature_id;
+
+ /* Iterate through all possible feature IDs */
+ for (feature_id = 1; feature_id < PMT_FEATURE_COUNT; feature_id++, linebreak(test)) {
+ const char *name;
+
+ if (!pmt_feature_id_is_valid(feature_id))
+ continue;
+
+ name = pmt_feature_names[feature_id];
+
+ feature_group = intel_pmt_get_regions_by_feature(feature_id);
+ if (IS_ERR(feature_group)) {
+ if (PTR_ERR(feature_group) == -ENOENT)
+ kunit_warn(test, "intel_pmt_get_regions_by_feature() reporting feature %d [%s] is not present.\n",
+ feature_id, name);
+ else
+ kunit_warn(test, "intel_pmt_get_regions_by_feature() returned error %ld while attempt to lookup %d [%s].\n",
+ PTR_ERR(feature_group), feature_id, name);
+
+ continue;
+ }
+
+ if (!feature_group) {
+ kunit_warn(test, "Feature ID %d: %s is not available.\n", feature_id, name);
+ continue;
+ }
+
+ num_available++;
+
+ validate_pmt_regions(test, feature_group, feature_id);
+
+ intel_pmt_put_feature_group(feature_group);
+ }
+
+ if (num_available == 0)
+ kunit_warn(test, "No PMT region groups were available for any feature ID (0-10).\n");
+}
+
+static struct kunit_case intel_pmt_discovery_test_cases[] = {
+ KUNIT_CASE(test_intel_pmt_get_regions_by_feature),
+ {}
+};
+
+static struct kunit_suite intel_pmt_discovery_test_suite = {
+ .name = "pmt_discovery_test",
+ .test_cases = intel_pmt_discovery_test_cases,
+};
+
+kunit_test_suite(intel_pmt_discovery_test_suite);
+
+MODULE_IMPORT_NS("INTEL_PMT_DISCOVERY");
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Intel PMT Discovery KUNIT test driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/pmt/discovery.c b/drivers/platform/x86/intel/pmt/discovery.c
new file mode 100644
index 000000000000..32713a194a55
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/discovery.c
@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Platform Monitory Technology Discovery driver
+ *
+ * Copyright (c) 2025, Intel Corporation.
+ * All Rights Reserved.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kdev_t.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string_choices.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include <linux/intel_pmt_features.h>
+#include <linux/intel_vsec.h>
+
+#include "class.h"
+
+#define MAX_FEATURE_VERSION 0
+#define DT_TBIR GENMASK(2, 0)
+#define FEAT_ATTR_SIZE(x) ((x) * sizeof(u32))
+#define PMT_GUID_SIZE(x) ((x) * sizeof(u32))
+#define PMT_ACCESS_TYPE_RSVD 0xF
+#define SKIP_FEATURE 1
+
+struct feature_discovery_table {
+ u32 access_type:4;
+ u32 version:8;
+ u32 size:16;
+ u32 reserved:4;
+ u32 id;
+ u32 offset;
+ u32 reserved2;
+};
+
+/* Common feature table header */
+struct feature_header {
+ u32 attr_size:8;
+ u32 num_guids:8;
+ u32 reserved:16;
+};
+
+/* Feature attribute fields */
+struct caps {
+ u32 caps;
+};
+
+struct command {
+ u32 max_stream_size:16;
+ u32 max_command_size:16;
+};
+
+struct watcher {
+ u32 reserved:21;
+ u32 period:11;
+ struct command command;
+};
+
+struct rmid {
+ u32 num_rmids:16; /* Number of Resource Monitoring IDs */
+ u32 reserved:16;
+ struct watcher watcher;
+};
+
+struct feature_table {
+ struct feature_header header;
+ struct caps caps;
+ union {
+ struct command command;
+ struct watcher watcher;
+ struct rmid rmid;
+ };
+ u32 *guids;
+};
+
+/* For backreference in struct feature */
+struct pmt_features_priv;
+
+struct feature {
+ struct feature_table table;
+ struct kobject kobj;
+ struct pmt_features_priv *priv;
+ struct list_head list;
+ const struct attribute_group *attr_group;
+ enum pmt_feature_id id;
+};
+
+struct pmt_features_priv {
+ struct device *parent;
+ struct device *dev;
+ int count;
+ u32 mask;
+ struct feature feature[];
+};
+
+static LIST_HEAD(pmt_feature_list);
+static DEFINE_MUTEX(feature_list_lock);
+
+#define to_pmt_feature(x) container_of(x, struct feature, kobj)
+static void pmt_feature_release(struct kobject *kobj)
+{
+}
+
+static ssize_t caps_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct feature *feature = to_pmt_feature(kobj);
+ struct pmt_cap **pmt_caps;
+ u32 caps = feature->table.caps.caps;
+ ssize_t ret = 0;
+
+ switch (feature->id) {
+ case FEATURE_PER_CORE_PERF_TELEM:
+ pmt_caps = pmt_caps_pcpt;
+ break;
+ case FEATURE_PER_CORE_ENV_TELEM:
+ pmt_caps = pmt_caps_pcet;
+ break;
+ case FEATURE_PER_RMID_PERF_TELEM:
+ pmt_caps = pmt_caps_rmid_perf;
+ break;
+ case FEATURE_ACCEL_TELEM:
+ pmt_caps = pmt_caps_accel;
+ break;
+ case FEATURE_UNCORE_TELEM:
+ pmt_caps = pmt_caps_uncore;
+ break;
+ case FEATURE_CRASH_LOG:
+ pmt_caps = pmt_caps_crashlog;
+ break;
+ case FEATURE_PETE_LOG:
+ pmt_caps = pmt_caps_pete;
+ break;
+ case FEATURE_TPMI_CTRL:
+ pmt_caps = pmt_caps_tpmi;
+ break;
+ case FEATURE_TRACING:
+ pmt_caps = pmt_caps_tracing;
+ break;
+ case FEATURE_PER_RMID_ENERGY_TELEM:
+ pmt_caps = pmt_caps_rmid_energy;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ while (*pmt_caps) {
+ struct pmt_cap *pmt_cap = *pmt_caps;
+
+ while (pmt_cap->name) {
+ ret += sysfs_emit_at(buf, ret, "%-40s Available: %s\n", pmt_cap->name,
+ str_yes_no(pmt_cap->mask & caps));
+ pmt_cap++;
+ }
+ pmt_caps++;
+ }
+
+ return ret;
+}
+static struct kobj_attribute caps_attribute = __ATTR_RO(caps);
+
+static struct watcher *get_watcher(struct feature *feature)
+{
+ switch (feature_layout[feature->id]) {
+ case LAYOUT_RMID:
+ return &feature->table.rmid.watcher;
+ case LAYOUT_WATCHER:
+ return &feature->table.watcher;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static struct command *get_command(struct feature *feature)
+{
+ switch (feature_layout[feature->id]) {
+ case LAYOUT_RMID:
+ return &feature->table.rmid.watcher.command;
+ case LAYOUT_WATCHER:
+ return &feature->table.watcher.command;
+ case LAYOUT_COMMAND:
+ return &feature->table.command;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static ssize_t num_rmids_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct feature *feature = to_pmt_feature(kobj);
+
+ return sysfs_emit(buf, "%u\n", feature->table.rmid.num_rmids);
+}
+static struct kobj_attribute num_rmids_attribute = __ATTR_RO(num_rmids);
+
+static ssize_t min_watcher_period_ms_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct feature *feature = to_pmt_feature(kobj);
+ struct watcher *watcher = get_watcher(feature);
+
+ if (IS_ERR(watcher))
+ return PTR_ERR(watcher);
+
+ return sysfs_emit(buf, "%u\n", watcher->period);
+}
+static struct kobj_attribute min_watcher_period_ms_attribute =
+ __ATTR_RO(min_watcher_period_ms);
+
+static ssize_t max_stream_size_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct feature *feature = to_pmt_feature(kobj);
+ struct command *command = get_command(feature);
+
+ if (IS_ERR(command))
+ return PTR_ERR(command);
+
+ return sysfs_emit(buf, "%u\n", command->max_stream_size);
+}
+static struct kobj_attribute max_stream_size_attribute =
+ __ATTR_RO(max_stream_size);
+
+static ssize_t max_command_size_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct feature *feature = to_pmt_feature(kobj);
+ struct command *command = get_command(feature);
+
+ if (IS_ERR(command))
+ return PTR_ERR(command);
+
+ return sysfs_emit(buf, "%u\n", command->max_command_size);
+}
+static struct kobj_attribute max_command_size_attribute =
+ __ATTR_RO(max_command_size);
+
+static ssize_t guids_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct feature *feature = to_pmt_feature(kobj);
+ int i, count = 0;
+
+ for (i = 0; i < feature->table.header.num_guids; i++)
+ count += sysfs_emit_at(buf, count, "0x%x\n",
+ feature->table.guids[i]);
+
+ return count;
+}
+static struct kobj_attribute guids_attribute = __ATTR_RO(guids);
+
+static struct attribute *pmt_feature_rmid_attrs[] = {
+ &caps_attribute.attr,
+ &num_rmids_attribute.attr,
+ &min_watcher_period_ms_attribute.attr,
+ &max_stream_size_attribute.attr,
+ &max_command_size_attribute.attr,
+ &guids_attribute.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(pmt_feature_rmid);
+
+static const struct kobj_type pmt_feature_rmid_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = pmt_feature_release,
+ .default_groups = pmt_feature_rmid_groups,
+};
+
+static struct attribute *pmt_feature_watcher_attrs[] = {
+ &caps_attribute.attr,
+ &min_watcher_period_ms_attribute.attr,
+ &max_stream_size_attribute.attr,
+ &max_command_size_attribute.attr,
+ &guids_attribute.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(pmt_feature_watcher);
+
+static const struct kobj_type pmt_feature_watcher_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = pmt_feature_release,
+ .default_groups = pmt_feature_watcher_groups,
+};
+
+static struct attribute *pmt_feature_command_attrs[] = {
+ &caps_attribute.attr,
+ &max_stream_size_attribute.attr,
+ &max_command_size_attribute.attr,
+ &guids_attribute.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(pmt_feature_command);
+
+static const struct kobj_type pmt_feature_command_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = pmt_feature_release,
+ .default_groups = pmt_feature_command_groups,
+};
+
+static struct attribute *pmt_feature_guids_attrs[] = {
+ &caps_attribute.attr,
+ &guids_attribute.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(pmt_feature_guids);
+
+static const struct kobj_type pmt_feature_guids_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = pmt_feature_release,
+ .default_groups = pmt_feature_guids_groups,
+};
+
+static int
+pmt_feature_get_disc_table(struct pmt_features_priv *priv,
+ struct resource *disc_res,
+ struct feature_discovery_table *disc_tbl)
+{
+ void __iomem *disc_base;
+
+ disc_base = devm_ioremap_resource(priv->dev, disc_res);
+ if (IS_ERR(disc_base))
+ return PTR_ERR(disc_base);
+
+ memcpy_fromio(disc_tbl, disc_base, sizeof(*disc_tbl));
+
+ devm_iounmap(priv->dev, disc_base);
+
+ if (priv->mask & BIT(disc_tbl->id))
+ return dev_err_probe(priv->dev, -EINVAL, "Duplicate feature: %s\n",
+ pmt_feature_names[disc_tbl->id]);
+
+ /*
+ * Some devices may expose non-functioning entries that are
+ * reserved for future use. They have zero size. Do not fail
+ * probe for these. Just ignore them.
+ */
+ if (disc_tbl->size == 0 || disc_tbl->access_type == PMT_ACCESS_TYPE_RSVD)
+ return SKIP_FEATURE;
+
+ if (disc_tbl->version > MAX_FEATURE_VERSION)
+ return SKIP_FEATURE;
+
+ if (!pmt_feature_id_is_valid(disc_tbl->id))
+ return SKIP_FEATURE;
+
+ priv->mask |= BIT(disc_tbl->id);
+
+ return 0;
+}
+
+static int
+pmt_feature_get_feature_table(struct pmt_features_priv *priv,
+ struct feature *feature,
+ struct feature_discovery_table *disc_tbl,
+ struct resource *disc_res)
+{
+ struct feature_table *feat_tbl = &feature->table;
+ struct feature_header *header;
+ struct resource res = {};
+ resource_size_t res_size;
+ void __iomem *feat_base, *feat_offset;
+ void *tbl_offset;
+ size_t size;
+ u32 *guids;
+ u8 tbir;
+
+ tbir = FIELD_GET(DT_TBIR, disc_tbl->offset);
+
+ switch (disc_tbl->access_type) {
+ case ACCESS_LOCAL:
+ if (tbir)
+ return dev_err_probe(priv->dev, -EINVAL,
+ "Unsupported BAR index %u for access type %u\n",
+ tbir, disc_tbl->access_type);
+
+
+ /*
+ * For access_type LOCAL, the base address is as follows:
+ * base address = end of discovery region + base offset + 1
+ */
+ res = DEFINE_RES_MEM(disc_res->end + disc_tbl->offset + 1,
+ disc_tbl->size * sizeof(u32));
+ break;
+
+ default:
+ return dev_err_probe(priv->dev, -EINVAL, "Unrecognized access_type %u\n",
+ disc_tbl->access_type);
+ }
+
+ feature->id = disc_tbl->id;
+
+ /* Get the feature table */
+ feat_base = devm_ioremap_resource(priv->dev, &res);
+ if (IS_ERR(feat_base))
+ return PTR_ERR(feat_base);
+
+ feat_offset = feat_base;
+ tbl_offset = feat_tbl;
+
+ /* Get the header */
+ header = &feat_tbl->header;
+ memcpy_fromio(header, feat_offset, sizeof(*header));
+
+ /* Validate fields fit within mapped resource */
+ size = sizeof(*header) + FEAT_ATTR_SIZE(header->attr_size) +
+ PMT_GUID_SIZE(header->num_guids);
+ res_size = resource_size(&res);
+ if (WARN(size > res_size, "Bad table size %zu > %pa", size, &res_size))
+ return -EINVAL;
+
+ /* Get the feature attributes, including capability fields */
+ tbl_offset += sizeof(*header);
+ feat_offset += sizeof(*header);
+
+ memcpy_fromio(tbl_offset, feat_offset, FEAT_ATTR_SIZE(header->attr_size));
+
+ /* Finally, get the guids */
+ guids = devm_kmalloc(priv->dev, PMT_GUID_SIZE(header->num_guids), GFP_KERNEL);
+ if (!guids)
+ return -ENOMEM;
+
+ feat_offset += FEAT_ATTR_SIZE(header->attr_size);
+
+ memcpy_fromio(guids, feat_offset, PMT_GUID_SIZE(header->num_guids));
+
+ feat_tbl->guids = guids;
+
+ devm_iounmap(priv->dev, feat_base);
+
+ return 0;
+}
+
+static void pmt_features_add_feat(struct feature *feature)
+{
+ guard(mutex)(&feature_list_lock);
+ list_add(&feature->list, &pmt_feature_list);
+}
+
+static void pmt_features_remove_feat(struct feature *feature)
+{
+ guard(mutex)(&feature_list_lock);
+ list_del(&feature->list);
+}
+
+/* Get the discovery table and use it to get the feature table */
+static int pmt_features_discovery(struct pmt_features_priv *priv,
+ struct feature *feature,
+ struct intel_vsec_device *ivdev,
+ int idx)
+{
+ struct feature_discovery_table disc_tbl = {}; /* Avoid false warning */
+ struct resource *disc_res = &ivdev->resource[idx];
+ const struct kobj_type *ktype;
+ int ret;
+
+ ret = pmt_feature_get_disc_table(priv, disc_res, &disc_tbl);
+ if (ret)
+ return ret;
+
+ ret = pmt_feature_get_feature_table(priv, feature, &disc_tbl, disc_res);
+ if (ret)
+ return ret;
+
+ switch (feature_layout[feature->id]) {
+ case LAYOUT_RMID:
+ ktype = &pmt_feature_rmid_ktype;
+ feature->attr_group = &pmt_feature_rmid_group;
+ break;
+ case LAYOUT_WATCHER:
+ ktype = &pmt_feature_watcher_ktype;
+ feature->attr_group = &pmt_feature_watcher_group;
+ break;
+ case LAYOUT_COMMAND:
+ ktype = &pmt_feature_command_ktype;
+ feature->attr_group = &pmt_feature_command_group;
+ break;
+ case LAYOUT_CAPS_ONLY:
+ ktype = &pmt_feature_guids_ktype;
+ feature->attr_group = &pmt_feature_guids_group;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = kobject_init_and_add(&feature->kobj, ktype, &priv->dev->kobj,
+ "%s", pmt_feature_names[feature->id]);
+ if (ret)
+ return ret;
+
+ kobject_uevent(&feature->kobj, KOBJ_ADD);
+ pmt_features_add_feat(feature);
+
+ return 0;
+}
+
+static void pmt_features_remove(struct auxiliary_device *auxdev)
+{
+ struct pmt_features_priv *priv = auxiliary_get_drvdata(auxdev);
+ int i;
+
+ for (i = 0; i < priv->count; i++) {
+ struct feature *feature = &priv->feature[i];
+
+ pmt_features_remove_feat(feature);
+ sysfs_remove_group(&feature->kobj, feature->attr_group);
+ kobject_put(&feature->kobj);
+ }
+
+ device_unregister(priv->dev);
+}
+
+static int pmt_features_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
+{
+ struct intel_vsec_device *ivdev = auxdev_to_ivdev(auxdev);
+ struct pmt_features_priv *priv;
+ size_t size;
+ int ret, i;
+
+ size = struct_size(priv, feature, ivdev->num_resources);
+ priv = devm_kzalloc(&auxdev->dev, size, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->parent = &ivdev->pcidev->dev;
+ auxiliary_set_drvdata(auxdev, priv);
+
+ priv->dev = device_create(&intel_pmt_class, &auxdev->dev, MKDEV(0, 0), priv,
+ "%s-%s", "features", dev_name(priv->parent));
+ if (IS_ERR(priv->dev))
+ return dev_err_probe(priv->dev, PTR_ERR(priv->dev),
+ "Could not create %s-%s device node\n",
+ "features", dev_name(priv->dev));
+
+ /* Initialize each feature */
+ for (i = 0; i < ivdev->num_resources; i++) {
+ struct feature *feature = &priv->feature[priv->count];
+
+ ret = pmt_features_discovery(priv, feature, ivdev, i);
+ if (ret == SKIP_FEATURE)
+ continue;
+ if (ret != 0)
+ goto abort_probe;
+
+ feature->priv = priv;
+ priv->count++;
+ }
+
+ return 0;
+
+abort_probe:
+ /*
+ * Only fully initialized features are tracked in priv->count, which is
+ * incremented only after a feature is completely set up (i.e., after
+ * discovery and sysfs registration). If feature initialization fails,
+ * the failing feature's state is local and does not require rollback.
+ *
+ * Therefore, on error, we can safely call the driver's remove() routine
+ * pmt_features_remove() to clean up only those features that were
+ * fully initialized and counted. All other resources are device-managed
+ * and will be cleaned up automatically during device_unregister().
+ */
+ pmt_features_remove(auxdev);
+
+ return ret;
+}
+
+static void pmt_get_features(struct intel_pmt_entry *entry, struct feature *f)
+{
+ int num_guids = f->table.header.num_guids;
+ int i;
+
+ for (i = 0; i < num_guids; i++) {
+ if (f->table.guids[i] != entry->guid)
+ continue;
+
+ entry->feature_flags |= BIT(f->id);
+
+ if (feature_layout[f->id] == LAYOUT_RMID)
+ entry->num_rmids = f->table.rmid.num_rmids;
+ else
+ entry->num_rmids = 0; /* entry is kzalloc but set anyway */
+ }
+}
+
+void intel_pmt_get_features(struct intel_pmt_entry *entry)
+{
+ struct feature *feature;
+
+ mutex_lock(&feature_list_lock);
+ list_for_each_entry(feature, &pmt_feature_list, list) {
+ if (feature->priv->parent != &entry->ep->pcidev->dev)
+ continue;
+
+ pmt_get_features(entry, feature);
+ }
+ mutex_unlock(&feature_list_lock);
+}
+EXPORT_SYMBOL_NS_GPL(intel_pmt_get_features, "INTEL_PMT");
+
+static const struct auxiliary_device_id pmt_features_id_table[] = {
+ { .name = "intel_vsec.discovery" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pmt_features_id_table);
+
+static struct auxiliary_driver pmt_features_aux_driver = {
+ .id_table = pmt_features_id_table,
+ .remove = pmt_features_remove,
+ .probe = pmt_features_probe,
+};
+module_auxiliary_driver(pmt_features_aux_driver);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Intel PMT Discovery driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("INTEL_PMT");
diff --git a/drivers/platform/x86/intel/pmt/features.c b/drivers/platform/x86/intel/pmt/features.c
new file mode 100644
index 000000000000..8a39cddc75c8
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/features.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: "David E. Box" <david.e.box@linux.intel.com>
+ */
+
+#include <linux/export.h>
+#include <linux/types.h>
+
+#include <linux/intel_pmt_features.h>
+
+const char * const pmt_feature_names[] = {
+ [FEATURE_PER_CORE_PERF_TELEM] = "per_core_performance_telemetry",
+ [FEATURE_PER_CORE_ENV_TELEM] = "per_core_environment_telemetry",
+ [FEATURE_PER_RMID_PERF_TELEM] = "per_rmid_perf_telemetry",
+ [FEATURE_ACCEL_TELEM] = "accelerator_telemetry",
+ [FEATURE_UNCORE_TELEM] = "uncore_telemetry",
+ [FEATURE_CRASH_LOG] = "crash_log",
+ [FEATURE_PETE_LOG] = "pete_log",
+ [FEATURE_TPMI_CTRL] = "tpmi_control",
+ [FEATURE_TRACING] = "tracing",
+ [FEATURE_PER_RMID_ENERGY_TELEM] = "per_rmid_energy_telemetry",
+};
+EXPORT_SYMBOL_NS_GPL(pmt_feature_names, "INTEL_PMT_DISCOVERY");
+
+enum feature_layout feature_layout[] = {
+ [FEATURE_PER_CORE_PERF_TELEM] = LAYOUT_WATCHER,
+ [FEATURE_PER_CORE_ENV_TELEM] = LAYOUT_WATCHER,
+ [FEATURE_PER_RMID_PERF_TELEM] = LAYOUT_RMID,
+ [FEATURE_ACCEL_TELEM] = LAYOUT_WATCHER,
+ [FEATURE_UNCORE_TELEM] = LAYOUT_WATCHER,
+ [FEATURE_CRASH_LOG] = LAYOUT_COMMAND,
+ [FEATURE_PETE_LOG] = LAYOUT_COMMAND,
+ [FEATURE_TPMI_CTRL] = LAYOUT_CAPS_ONLY,
+ [FEATURE_TRACING] = LAYOUT_CAPS_ONLY,
+ [FEATURE_PER_RMID_ENERGY_TELEM] = LAYOUT_RMID,
+};
+
+struct pmt_cap pmt_cap_common[] = {
+ {PMT_CAP_TELEM, "telemetry"},
+ {PMT_CAP_WATCHER, "watcher"},
+ {PMT_CAP_CRASHLOG, "crashlog"},
+ {PMT_CAP_STREAMING, "streaming"},
+ {PMT_CAP_THRESHOLD, "threshold"},
+ {PMT_CAP_WINDOW, "window"},
+ {PMT_CAP_CONFIG, "config"},
+ {PMT_CAP_TRACING, "tracing"},
+ {PMT_CAP_INBAND, "inband"},
+ {PMT_CAP_OOB, "oob"},
+ {PMT_CAP_SECURED_CHAN, "secure_chan"},
+ {PMT_CAP_PMT_SP, "pmt_sp"},
+ {PMT_CAP_PMT_SP_POLICY, "pmt_sp_policy"},
+ {}
+};
+
+struct pmt_cap pmt_cap_pcpt[] = {
+ {PMT_CAP_PCPT_CORE_PERF, "core_performance"},
+ {PMT_CAP_PCPT_CORE_C0_RES, "core_c0_residency"},
+ {PMT_CAP_PCPT_CORE_ACTIVITY, "core_activity"},
+ {PMT_CAP_PCPT_CACHE_PERF, "cache_performance"},
+ {PMT_CAP_PCPT_QUALITY_TELEM, "quality_telemetry"},
+ {}
+};
+
+struct pmt_cap *pmt_caps_pcpt[] = {
+ pmt_cap_common,
+ pmt_cap_pcpt,
+ NULL
+};
+
+struct pmt_cap pmt_cap_pcet[] = {
+ {PMT_CAP_PCET_WORKPOINT_HIST, "workpoint_histogram"},
+ {PMT_CAP_PCET_CORE_CURR_TEMP, "core_current_temp"},
+ {PMT_CAP_PCET_CORE_INST_RES, "core_inst_residency"},
+ {PMT_CAP_PCET_QUALITY_TELEM, "quality_telemetry"},
+ {PMT_CAP_PCET_CORE_CDYN_LVL, "core_cdyn_level"},
+ {PMT_CAP_PCET_CORE_STRESS_LVL, "core_stress_level"},
+ {PMT_CAP_PCET_CORE_DAS, "core_digital_aging_sensor"},
+ {PMT_CAP_PCET_FIVR_HEALTH, "fivr_health"},
+ {PMT_CAP_PCET_ENERGY, "energy"},
+ {PMT_CAP_PCET_PEM_STATUS, "pem_status"},
+ {PMT_CAP_PCET_CORE_C_STATE, "core_c_state"},
+ {}
+};
+
+struct pmt_cap *pmt_caps_pcet[] = {
+ pmt_cap_common,
+ pmt_cap_pcet,
+ NULL
+};
+
+struct pmt_cap pmt_cap_rmid_perf[] = {
+ {PMT_CAP_RMID_CORES_PERF, "core_performance"},
+ {PMT_CAP_RMID_CACHE_PERF, "cache_performance"},
+ {PMT_CAP_RMID_PERF_QUAL, "performance_quality"},
+ {}
+};
+
+struct pmt_cap *pmt_caps_rmid_perf[] = {
+ pmt_cap_common,
+ pmt_cap_rmid_perf,
+ NULL
+};
+
+struct pmt_cap pmt_cap_accel[] = {
+ {PMT_CAP_ACCEL_CPM_TELEM, "content_processing_module"},
+ {PMT_CAP_ACCEL_TIP_TELEM, "content_turbo_ip"},
+ {}
+};
+
+struct pmt_cap *pmt_caps_accel[] = {
+ pmt_cap_common,
+ pmt_cap_accel,
+ NULL
+};
+
+struct pmt_cap pmt_cap_uncore[] = {
+ {PMT_CAP_UNCORE_IO_CA_TELEM, "io_ca"},
+ {PMT_CAP_UNCORE_RMID_TELEM, "rmid"},
+ {PMT_CAP_UNCORE_D2D_ULA_TELEM, "d2d_ula"},
+ {PMT_CAP_UNCORE_PKGC_TELEM, "package_c"},
+ {}
+};
+
+struct pmt_cap *pmt_caps_uncore[] = {
+ pmt_cap_common,
+ pmt_cap_uncore,
+ NULL
+};
+
+struct pmt_cap pmt_cap_crashlog[] = {
+ {PMT_CAP_CRASHLOG_MAN_TRIG, "manual_trigger"},
+ {PMT_CAP_CRASHLOG_CORE, "core"},
+ {PMT_CAP_CRASHLOG_UNCORE, "uncore"},
+ {PMT_CAP_CRASHLOG_TOR, "tor"},
+ {PMT_CAP_CRASHLOG_S3M, "s3m"},
+ {PMT_CAP_CRASHLOG_PERSISTENCY, "persistency"},
+ {PMT_CAP_CRASHLOG_CLIP_GPIO, "crashlog_in_progress"},
+ {PMT_CAP_CRASHLOG_PRE_RESET, "pre_reset_extraction"},
+ {PMT_CAP_CRASHLOG_POST_RESET, "post_reset_extraction"},
+ {}
+};
+
+struct pmt_cap *pmt_caps_crashlog[] = {
+ pmt_cap_common,
+ pmt_cap_crashlog,
+ NULL
+};
+
+struct pmt_cap pmt_cap_pete[] = {
+ {PMT_CAP_PETE_MAN_TRIG, "manual_trigger"},
+ {PMT_CAP_PETE_ENCRYPTION, "encryption"},
+ {PMT_CAP_PETE_PERSISTENCY, "persistency"},
+ {PMT_CAP_PETE_REQ_TOKENS, "required_tokens"},
+ {PMT_CAP_PETE_PROD_ENABLED, "production_enabled"},
+ {PMT_CAP_PETE_DEBUG_ENABLED, "debug_enabled"},
+ {}
+};
+
+struct pmt_cap *pmt_caps_pete[] = {
+ pmt_cap_common,
+ pmt_cap_pete,
+ NULL
+};
+
+struct pmt_cap pmt_cap_tpmi[] = {
+ {PMT_CAP_TPMI_MAILBOX, "mailbox"},
+ {PMT_CAP_TPMI_LOCK, "bios_lock"},
+ {}
+};
+
+struct pmt_cap *pmt_caps_tpmi[] = {
+ pmt_cap_common,
+ pmt_cap_tpmi,
+ NULL
+};
+
+struct pmt_cap pmt_cap_tracing[] = {
+ {PMT_CAP_TRACE_SRAR, "srar_errors"},
+ {PMT_CAP_TRACE_CORRECTABLE, "correctable_errors"},
+ {PMT_CAP_TRACE_MCTP, "mctp"},
+ {PMT_CAP_TRACE_MRT, "memory_resiliency"},
+ {}
+};
+
+struct pmt_cap *pmt_caps_tracing[] = {
+ pmt_cap_common,
+ pmt_cap_tracing,
+ NULL
+};
+
+struct pmt_cap pmt_cap_rmid_energy[] = {
+ {PMT_CAP_RMID_ENERGY, "energy"},
+ {PMT_CAP_RMID_ACTIVITY, "activity"},
+ {PMT_CAP_RMID_ENERGY_QUAL, "energy_quality"},
+ {}
+};
+
+struct pmt_cap *pmt_caps_rmid_energy[] = {
+ pmt_cap_common,
+ pmt_cap_rmid_energy,
+ NULL
+};
diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
new file mode 100644
index 000000000000..a4dfca6cac19
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/telemetry.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Platform Monitory Technology Telemetry driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: "David E. Box" <david.e.box@linux.intel.com>
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
+#include <linux/intel_pmt_features.h>
+#include <linux/intel_vsec.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/xarray.h>
+
+#include "class.h"
+
+#define TELEM_SIZE_OFFSET 0x0
+#define TELEM_GUID_OFFSET 0x4
+#define TELEM_BASE_OFFSET 0x8
+#define TELEM_ACCESS(v) ((v) & GENMASK(3, 0))
+#define TELEM_TYPE(v) (((v) & GENMASK(7, 4)) >> 4)
+/* size is in bytes */
+#define TELEM_SIZE(v) (((v) & GENMASK(27, 12)) >> 10)
+
+/* Used by client hardware to identify a fixed telemetry entry*/
+#define TELEM_CLIENT_FIXED_BLOCK_GUID 0x10000000
+
+#define NUM_BYTES_QWORD(v) ((v) << 3)
+#define SAMPLE_ID_OFFSET(v) ((v) << 3)
+
+#define NUM_BYTES_DWORD(v) ((v) << 2)
+#define SAMPLE_ID_OFFSET32(v) ((v) << 2)
+
+/* Protects access to the xarray of telemetry endpoint handles */
+static DEFINE_MUTEX(ep_lock);
+
+enum telem_type {
+ TELEM_TYPE_PUNIT = 0,
+ TELEM_TYPE_CRASHLOG,
+ TELEM_TYPE_PUNIT_FIXED,
+};
+
+struct pmt_telem_priv {
+ int num_entries;
+ struct intel_pmt_entry entry[];
+};
+
+static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry,
+ struct device *dev)
+{
+ u32 guid = readl(entry->disc_table + TELEM_GUID_OFFSET);
+
+ if (intel_pmt_is_early_client_hw(dev)) {
+ u32 type = TELEM_TYPE(readl(entry->disc_table));
+
+ if ((type == TELEM_TYPE_PUNIT_FIXED) ||
+ (guid == TELEM_CLIENT_FIXED_BLOCK_GUID))
+ return true;
+ }
+
+ return false;
+}
+
+static int pmt_telem_header_decode(struct intel_pmt_entry *entry,
+ struct device *dev)
+{
+ void __iomem *disc_table = entry->disc_table;
+ struct intel_pmt_header *header = &entry->header;
+
+ if (pmt_telem_region_overlaps(entry, dev))
+ return 1;
+
+ header->access_type = TELEM_ACCESS(readl(disc_table));
+ header->guid = readl(disc_table + TELEM_GUID_OFFSET);
+ header->base_offset = readl(disc_table + TELEM_BASE_OFFSET);
+
+ /* Size is measured in DWORDS, but accessor returns bytes */
+ header->size = TELEM_SIZE(readl(disc_table));
+
+ /*
+ * Some devices may expose non-functioning entries that are
+ * reserved for future use. They have zero size. Do not fail
+ * probe for these. Just ignore them.
+ */
+ if (header->size == 0 || header->access_type == 0xF)
+ return 1;
+
+ return 0;
+}
+
+static int pmt_telem_add_endpoint(struct intel_vsec_device *ivdev,
+ struct intel_pmt_entry *entry)
+{
+ struct telem_endpoint *ep;
+
+ /* Endpoint lifetimes are managed by kref, not devres */
+ entry->ep = kzalloc(sizeof(*(entry->ep)), GFP_KERNEL);
+ if (!entry->ep)
+ return -ENOMEM;
+
+ ep = entry->ep;
+ ep->pcidev = ivdev->pcidev;
+ ep->header.access_type = entry->header.access_type;
+ ep->header.guid = entry->header.guid;
+ ep->header.base_offset = entry->header.base_offset;
+ ep->header.size = entry->header.size;
+ ep->base = entry->base;
+ ep->present = true;
+ ep->cb = ivdev->priv_data;
+
+ kref_init(&ep->kref);
+
+ return 0;
+}
+
+static DEFINE_XARRAY_ALLOC(telem_array);
+static struct intel_pmt_namespace pmt_telem_ns = {
+ .name = "telem",
+ .xa = &telem_array,
+ .pmt_header_decode = pmt_telem_header_decode,
+ .pmt_add_endpoint = pmt_telem_add_endpoint,
+};
+
+/* Called when all users unregister and the device is removed */
+static void pmt_telem_ep_release(struct kref *kref)
+{
+ struct telem_endpoint *ep;
+
+ ep = container_of(kref, struct telem_endpoint, kref);
+ kfree(ep);
+}
+
+unsigned long pmt_telem_get_next_endpoint(unsigned long start)
+{
+ struct intel_pmt_entry *entry;
+ unsigned long found_idx;
+
+ mutex_lock(&ep_lock);
+ xa_for_each_start(&telem_array, found_idx, entry, start) {
+ /*
+ * Return first found index after start.
+ * 0 is not valid id.
+ */
+ if (found_idx > start)
+ break;
+ }
+ mutex_unlock(&ep_lock);
+
+ return found_idx == start ? 0 : found_idx;
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_get_next_endpoint, "INTEL_PMT_TELEMETRY");
+
+struct telem_endpoint *pmt_telem_register_endpoint(int devid)
+{
+ struct intel_pmt_entry *entry;
+ unsigned long index = devid;
+
+ mutex_lock(&ep_lock);
+ entry = xa_find(&telem_array, &index, index, XA_PRESENT);
+ if (!entry) {
+ mutex_unlock(&ep_lock);
+ return ERR_PTR(-ENXIO);
+ }
+
+ kref_get(&entry->ep->kref);
+ mutex_unlock(&ep_lock);
+
+ return entry->ep;
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_register_endpoint, "INTEL_PMT_TELEMETRY");
+
+void pmt_telem_unregister_endpoint(struct telem_endpoint *ep)
+{
+ kref_put(&ep->kref, pmt_telem_ep_release);
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_unregister_endpoint, "INTEL_PMT_TELEMETRY");
+
+int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info)
+{
+ struct intel_pmt_entry *entry;
+ unsigned long index = devid;
+ int err = 0;
+
+ if (!info)
+ return -EINVAL;
+
+ mutex_lock(&ep_lock);
+ entry = xa_find(&telem_array, &index, index, XA_PRESENT);
+ if (!entry) {
+ err = -ENXIO;
+ goto unlock;
+ }
+
+ info->pdev = entry->ep->pcidev;
+ info->header = entry->ep->header;
+
+unlock:
+ mutex_unlock(&ep_lock);
+ return err;
+
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_get_endpoint_info, "INTEL_PMT_TELEMETRY");
+
+static int pmt_copy_region(struct telemetry_region *region,
+ struct intel_pmt_entry *entry)
+{
+
+ struct oobmsm_plat_info *plat_info;
+
+ plat_info = intel_vsec_get_mapping(entry->ep->pcidev);
+ if (IS_ERR(plat_info))
+ return PTR_ERR(plat_info);
+
+ region->plat_info = *plat_info;
+ region->guid = entry->guid;
+ region->addr = entry->ep->base;
+ region->size = entry->size;
+ region->num_rmids = entry->num_rmids;
+
+ return 0;
+}
+
+static void pmt_feature_group_release(struct kref *kref)
+{
+ struct pmt_feature_group *feature_group;
+
+ feature_group = container_of(kref, struct pmt_feature_group, kref);
+ kfree(feature_group);
+}
+
+struct pmt_feature_group *intel_pmt_get_regions_by_feature(enum pmt_feature_id id)
+{
+ struct pmt_feature_group *feature_group __free(kfree) = NULL;
+ struct telemetry_region *region;
+ struct intel_pmt_entry *entry;
+ unsigned long idx;
+ int count = 0;
+ size_t size;
+
+ if (!pmt_feature_id_is_valid(id))
+ return ERR_PTR(-EINVAL);
+
+ guard(mutex)(&ep_lock);
+ xa_for_each(&telem_array, idx, entry) {
+ if (entry->feature_flags & BIT(id))
+ count++;
+ }
+
+ if (!count)
+ return ERR_PTR(-ENOENT);
+
+ size = struct_size(feature_group, regions, count);
+ feature_group = kzalloc(size, GFP_KERNEL);
+ if (!feature_group)
+ return ERR_PTR(-ENOMEM);
+
+ feature_group->count = count;
+
+ region = feature_group->regions;
+ xa_for_each(&telem_array, idx, entry) {
+ int ret;
+
+ if (!(entry->feature_flags & BIT(id)))
+ continue;
+
+ ret = pmt_copy_region(region, entry);
+ if (ret)
+ return ERR_PTR(ret);
+
+ region++;
+ }
+
+ kref_init(&feature_group->kref);
+
+ return no_free_ptr(feature_group);
+}
+EXPORT_SYMBOL(intel_pmt_get_regions_by_feature);
+
+void intel_pmt_put_feature_group(struct pmt_feature_group *feature_group)
+{
+ kref_put(&feature_group->kref, pmt_feature_group_release);
+}
+EXPORT_SYMBOL(intel_pmt_put_feature_group);
+
+int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count)
+{
+ u32 offset, size;
+
+ if (!ep->present)
+ return -ENODEV;
+
+ offset = SAMPLE_ID_OFFSET(id);
+ size = ep->header.size;
+
+ if (offset + NUM_BYTES_QWORD(count) > size)
+ return -EINVAL;
+
+ pmt_telem_read_mmio(ep->pcidev, ep->cb, ep->header.guid, data, ep->base, offset,
+ NUM_BYTES_QWORD(count));
+
+ return ep->present ? 0 : -EPIPE;
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_read, "INTEL_PMT_TELEMETRY");
+
+int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count)
+{
+ u32 offset, size;
+
+ if (!ep->present)
+ return -ENODEV;
+
+ offset = SAMPLE_ID_OFFSET32(id);
+ size = ep->header.size;
+
+ if (offset + NUM_BYTES_DWORD(count) > size)
+ return -EINVAL;
+
+ memcpy_fromio(data, ep->base + offset, NUM_BYTES_DWORD(count));
+
+ return ep->present ? 0 : -EPIPE;
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_read32, "INTEL_PMT_TELEMETRY");
+
+struct telem_endpoint *
+pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev, u32 guid, u16 pos)
+{
+ int devid = 0;
+ int inst = 0;
+ int err = 0;
+
+ while ((devid = pmt_telem_get_next_endpoint(devid))) {
+ struct telem_endpoint_info ep_info;
+
+ err = pmt_telem_get_endpoint_info(devid, &ep_info);
+ if (err)
+ return ERR_PTR(err);
+
+ if (ep_info.header.guid == guid && ep_info.pdev == pcidev) {
+ if (inst == pos)
+ return pmt_telem_register_endpoint(devid);
+ ++inst;
+ }
+ }
+
+ return ERR_PTR(-ENXIO);
+}
+EXPORT_SYMBOL_NS_GPL(pmt_telem_find_and_register_endpoint, "INTEL_PMT_TELEMETRY");
+
+static void pmt_telem_remove(struct auxiliary_device *auxdev)
+{
+ struct pmt_telem_priv *priv = auxiliary_get_drvdata(auxdev);
+ int i;
+
+ mutex_lock(&ep_lock);
+ for (i = 0; i < priv->num_entries; i++) {
+ struct intel_pmt_entry *entry = &priv->entry[i];
+
+ kref_put(&entry->ep->kref, pmt_telem_ep_release);
+ intel_pmt_dev_destroy(entry, &pmt_telem_ns);
+ }
+ mutex_unlock(&ep_lock);
+};
+
+static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
+{
+ struct intel_vsec_device *intel_vsec_dev = auxdev_to_ivdev(auxdev);
+ struct pmt_telem_priv *priv;
+ size_t size;
+ int i, ret;
+
+ size = struct_size(priv, entry, intel_vsec_dev->num_resources);
+ priv = devm_kzalloc(&auxdev->dev, size, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ auxiliary_set_drvdata(auxdev, priv);
+
+ for (i = 0; i < intel_vsec_dev->num_resources; i++) {
+ struct intel_pmt_entry *entry = &priv->entry[priv->num_entries];
+
+ mutex_lock(&ep_lock);
+ ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i);
+ mutex_unlock(&ep_lock);
+ if (ret < 0)
+ goto abort_probe;
+ if (ret)
+ continue;
+
+ priv->num_entries++;
+
+ intel_pmt_get_features(entry);
+ }
+
+ return 0;
+abort_probe:
+ pmt_telem_remove(auxdev);
+ return ret;
+}
+
+static const struct auxiliary_device_id pmt_telem_id_table[] = {
+ { .name = "intel_vsec.telemetry" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pmt_telem_id_table);
+
+static struct auxiliary_driver pmt_telem_aux_driver = {
+ .id_table = pmt_telem_id_table,
+ .remove = pmt_telem_remove,
+ .probe = pmt_telem_probe,
+};
+
+static int __init pmt_telem_init(void)
+{
+ return auxiliary_driver_register(&pmt_telem_aux_driver);
+}
+module_init(pmt_telem_init);
+
+static void __exit pmt_telem_exit(void)
+{
+ auxiliary_driver_unregister(&pmt_telem_aux_driver);
+ xa_destroy(&telem_array);
+}
+module_exit(pmt_telem_exit);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Intel PMT Telemetry driver");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("INTEL_PMT");
+MODULE_IMPORT_NS("INTEL_VSEC");
diff --git a/drivers/platform/x86/intel/pmt/telemetry.h b/drivers/platform/x86/intel/pmt/telemetry.h
new file mode 100644
index 000000000000..d45af5512b4e
--- /dev/null
+++ b/drivers/platform/x86/intel/pmt/telemetry.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TELEMETRY_H
+#define _TELEMETRY_H
+
+/* Telemetry types */
+#define PMT_TELEM_TELEMETRY 0
+#define PMT_TELEM_CRASHLOG 1
+
+struct telem_endpoint;
+struct pci_dev;
+
+struct telem_header {
+ u8 access_type;
+ u16 size;
+ u32 guid;
+ u32 base_offset;
+};
+
+struct telem_endpoint_info {
+ struct pci_dev *pdev;
+ struct telem_header header;
+};
+
+/**
+ * pmt_telem_get_next_endpoint() - Get next device id for a telemetry endpoint
+ * @start: starting devid to look from
+ *
+ * This functions can be used in a while loop predicate to retrieve the devid
+ * of all available telemetry endpoints. Functions pmt_telem_get_next_endpoint()
+ * and pmt_telem_register_endpoint() can be used inside of the loop to examine
+ * endpoint info and register to receive a pointer to the endpoint. The pointer
+ * is then usable in the telemetry read calls to access the telemetry data.
+ *
+ * Return:
+ * * devid - devid of the next present endpoint from start
+ * * 0 - when no more endpoints are present after start
+ */
+unsigned long pmt_telem_get_next_endpoint(unsigned long start);
+
+/**
+ * pmt_telem_register_endpoint() - Register a telemetry endpoint
+ * @devid: device id/handle of the telemetry endpoint
+ *
+ * Increments the kref usage counter for the endpoint.
+ *
+ * Return:
+ * * endpoint - On success returns pointer to the telemetry endpoint
+ * * -ENXIO - telemetry endpoint not found
+ */
+struct telem_endpoint *pmt_telem_register_endpoint(int devid);
+
+/**
+ * pmt_telem_unregister_endpoint() - Unregister a telemetry endpoint
+ * @ep: ep structure to populate.
+ *
+ * Decrements the kref usage counter for the endpoint.
+ */
+void pmt_telem_unregister_endpoint(struct telem_endpoint *ep);
+
+/**
+ * pmt_telem_get_endpoint_info() - Get info for an endpoint from its devid
+ * @devid: device id/handle of the telemetry endpoint
+ * @info: Endpoint info structure to be populated
+ *
+ * Return:
+ * * 0 - Success
+ * * -ENXIO - telemetry endpoint not found for the devid
+ * * -EINVAL - @info is NULL
+ */
+int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info);
+
+/**
+ * pmt_telem_find_and_register_endpoint() - Get a telemetry endpoint from
+ * pci_dev device, guid and pos
+ * @pdev: PCI device inside the Intel vsec
+ * @guid: GUID of the telemetry space
+ * @pos: Instance of the guid
+ *
+ * Return:
+ * * endpoint - On success returns pointer to the telemetry endpoint
+ * * -ENXIO - telemetry endpoint not found
+ */
+struct telem_endpoint *pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev,
+ u32 guid, u16 pos);
+
+/**
+ * pmt_telem_read() - Read qwords from counter sram using sample id
+ * @ep: Telemetry endpoint to be read
+ * @id: The beginning sample id of the metric(s) to be read
+ * @data: Allocated qword buffer
+ * @count: Number of qwords requested
+ *
+ * Callers must ensure reads are aligned. When the call returns -ENODEV,
+ * the device has been removed and callers should unregister the telemetry
+ * endpoint.
+ *
+ * Return:
+ * * 0 - Success
+ * * -ENODEV - The device is not present.
+ * * -EINVAL - The offset is out bounds
+ * * -EPIPE - The device was removed during the read. Data written
+ * but should be considered invalid.
+ */
+int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count);
+
+/**
+ * pmt_telem_read32() - Read qwords from counter sram using sample id
+ * @ep: Telemetry endpoint to be read
+ * @id: The beginning sample id of the metric(s) to be read
+ * @data: Allocated dword buffer
+ * @count: Number of dwords requested
+ *
+ * Callers must ensure reads are aligned. When the call returns -ENODEV,
+ * the device has been removed and callers should unregister the telemetry
+ * endpoint.
+ *
+ * Return:
+ * * 0 - Success
+ * * -ENODEV - The device is not present.
+ * * -EINVAL - The offset is out bounds
+ * * -EPIPE - The device was removed during the read. Data written
+ * but should be considered invalid.
+ */
+int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count);
+
+#endif