diff options
Diffstat (limited to 'drivers/acpi/riscv')
| -rw-r--r-- | drivers/acpi/riscv/Kconfig | 7 | ||||
| -rw-r--r-- | drivers/acpi/riscv/Makefile | 5 | ||||
| -rw-r--r-- | drivers/acpi/riscv/cppc.c | 155 | ||||
| -rw-r--r-- | drivers/acpi/riscv/cpuidle.c | 81 | ||||
| -rw-r--r-- | drivers/acpi/riscv/init.c | 15 | ||||
| -rw-r--r-- | drivers/acpi/riscv/init.h | 5 | ||||
| -rw-r--r-- | drivers/acpi/riscv/irq.c | 404 | ||||
| -rw-r--r-- | drivers/acpi/riscv/rhct.c | 93 | ||||
| -rw-r--r-- | drivers/acpi/riscv/rimt.c | 520 |
9 files changed, 1281 insertions, 4 deletions
diff --git a/drivers/acpi/riscv/Kconfig b/drivers/acpi/riscv/Kconfig new file mode 100644 index 000000000000..046296a18d00 --- /dev/null +++ b/drivers/acpi/riscv/Kconfig @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# ACPI Configuration for RISC-V +# + +config ACPI_RIMT + bool diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile index 8b3b126e0b94..1284a076fa88 100644 --- a/drivers/acpi/riscv/Makefile +++ b/drivers/acpi/riscv/Makefile @@ -1,2 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-y += rhct.o +obj-y += rhct.o init.o irq.o +obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o +obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o +obj-$(CONFIG_ACPI_RIMT) += rimt.o diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c new file mode 100644 index 000000000000..42c1a9052470 --- /dev/null +++ b/drivers/acpi/riscv/cppc.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Implement CPPC FFH helper routines for RISC-V. + * + * Copyright (C) 2024 Ventana Micro Systems Inc. + */ + +#include <acpi/cppc_acpi.h> +#include <asm/csr.h> +#include <asm/sbi.h> + +#define SBI_EXT_CPPC 0x43505043 + +/* CPPC interfaces defined in SBI spec */ +#define SBI_CPPC_PROBE 0x0 +#define SBI_CPPC_READ 0x1 +#define SBI_CPPC_READ_HI 0x2 +#define SBI_CPPC_WRITE 0x3 + +/* RISC-V FFH definitions from RISC-V FFH spec */ +#define FFH_CPPC_TYPE(r) (((r) & GENMASK_ULL(63, 60)) >> 60) +#define FFH_CPPC_SBI_REG(r) ((r) & GENMASK(31, 0)) +#define FFH_CPPC_CSR_NUM(r) ((r) & GENMASK(11, 0)) + +#define FFH_CPPC_SBI 0x1 +#define FFH_CPPC_CSR 0x2 + +struct sbi_cppc_data { + u64 val; + u32 reg; + struct sbiret ret; +}; + +static bool cppc_ext_present; + +static int __init sbi_cppc_init(void) +{ + if (sbi_spec_version >= sbi_mk_version(2, 0) && + sbi_probe_extension(SBI_EXT_CPPC) > 0) { + cppc_ext_present = true; + } else { + cppc_ext_present = false; + } + + return 0; +} +device_initcall(sbi_cppc_init); + +static void sbi_cppc_read(void *read_data) +{ + struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data; + + data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_READ, + data->reg, 0, 0, 0, 0, 0); +} + +static void sbi_cppc_write(void *write_data) +{ + struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data; + + data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_WRITE, + data->reg, data->val, 0, 0, 0, 0); +} + +static void cppc_ffh_csr_read(void *read_data) +{ + struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data; + + switch (data->reg) { + /* Support only TIME CSR for now */ + case CSR_TIME: + data->ret.value = csr_read(CSR_TIME); + data->ret.error = 0; + break; + default: + data->ret.error = -EINVAL; + break; + } +} + +static void cppc_ffh_csr_write(void *write_data) +{ + struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data; + + data->ret.error = -EINVAL; +} + +/* + * Refer to drivers/acpi/cppc_acpi.c for the description of the functions + * below. + */ +bool cpc_ffh_supported(void) +{ + return true; +} + +int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val) +{ + struct sbi_cppc_data data; + + if (WARN_ON_ONCE(irqs_disabled())) + return -EPERM; + + if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) { + if (!cppc_ext_present) + return -EINVAL; + + data.reg = FFH_CPPC_SBI_REG(reg->address); + + smp_call_function_single(cpu, sbi_cppc_read, &data, 1); + + *val = data.ret.value; + + return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; + } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) { + data.reg = FFH_CPPC_CSR_NUM(reg->address); + + smp_call_function_single(cpu, cppc_ffh_csr_read, &data, 1); + + *val = data.ret.value; + + return data.ret.error; + } + + return -EINVAL; +} + +int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val) +{ + struct sbi_cppc_data data; + + if (WARN_ON_ONCE(irqs_disabled())) + return -EPERM; + + if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) { + if (!cppc_ext_present) + return -EINVAL; + + data.reg = FFH_CPPC_SBI_REG(reg->address); + data.val = val; + + smp_call_function_single(cpu, sbi_cppc_write, &data, 1); + + return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; + } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) { + data.reg = FFH_CPPC_CSR_NUM(reg->address); + data.val = val; + + smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1); + + return data.ret.error; + } + + return -EINVAL; +} diff --git a/drivers/acpi/riscv/cpuidle.c b/drivers/acpi/riscv/cpuidle.c new file mode 100644 index 000000000000..624f9bbdb58c --- /dev/null +++ b/drivers/acpi/riscv/cpuidle.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024, Ventana Micro Systems Inc + * Author: Sunil V L <sunilvl@ventanamicro.com> + * + */ + +#include <linux/acpi.h> +#include <acpi/processor.h> +#include <linux/cpu_pm.h> +#include <linux/cpuidle.h> +#include <linux/suspend.h> +#include <asm/cpuidle.h> +#include <asm/sbi.h> +#include <asm/suspend.h> + +#define RISCV_FFH_LPI_TYPE_MASK GENMASK_ULL(63, 60) +#define RISCV_FFH_LPI_RSVD_MASK GENMASK_ULL(59, 32) + +#define RISCV_FFH_LPI_TYPE_SBI BIT_ULL(60) + +static int acpi_cpu_init_idle(unsigned int cpu) +{ + int i; + struct acpi_lpi_state *lpi; + struct acpi_processor *pr = per_cpu(processors, cpu); + + if (unlikely(!pr || !pr->flags.has_lpi)) + return -EINVAL; + + if (!riscv_sbi_hsm_is_supported()) + return -ENODEV; + + if (pr->power.count <= 1) + return -ENODEV; + + for (i = 1; i < pr->power.count; i++) { + u32 state; + + lpi = &pr->power.lpi_states[i]; + + /* + * Validate Entry Method as per FFH spec. + * bits[63:60] should be 0x1 + * bits[59:32] should be 0x0 + * bits[31:0] represent a SBI power_state + */ + if (((lpi->address & RISCV_FFH_LPI_TYPE_MASK) != RISCV_FFH_LPI_TYPE_SBI) || + (lpi->address & RISCV_FFH_LPI_RSVD_MASK)) { + pr_warn("Invalid LPI entry method %#llx\n", lpi->address); + return -EINVAL; + } + + state = lpi->address; + if (!riscv_sbi_suspend_state_is_valid(state)) { + pr_warn("Invalid SBI power state %#x\n", state); + return -EINVAL; + } + } + + return 0; +} + +int acpi_processor_ffh_lpi_probe(unsigned int cpu) +{ + return acpi_cpu_init_idle(cpu); +} + +int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) +{ + u32 state = lpi->address; + + if (state & SBI_HSM_SUSP_NON_RET_BIT) + return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, + lpi->index, + state); + else + return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend, + lpi->index, + state); +} diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c new file mode 100644 index 000000000000..7c00f7995e86 --- /dev/null +++ b/drivers/acpi/riscv/init.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023-2024, Ventana Micro Systems Inc + * Author: Sunil V L <sunilvl@ventanamicro.com> + */ + +#include <linux/acpi.h> +#include "init.h" + +void __init acpi_arch_init(void) +{ + riscv_acpi_init_gsi_mapping(); + if (IS_ENABLED(CONFIG_ACPI_RIMT)) + riscv_acpi_rimt_init(); +} diff --git a/drivers/acpi/riscv/init.h b/drivers/acpi/riscv/init.h new file mode 100644 index 000000000000..1680aa2aaf23 --- /dev/null +++ b/drivers/acpi/riscv/init.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#include <linux/init.h> + +void __init riscv_acpi_init_gsi_mapping(void); +void __init riscv_acpi_rimt_init(void); diff --git a/drivers/acpi/riscv/irq.c b/drivers/acpi/riscv/irq.c new file mode 100644 index 000000000000..d9a2154d6c6a --- /dev/null +++ b/drivers/acpi/riscv/irq.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023-2024, Ventana Micro Systems Inc + * Author: Sunil V L <sunilvl@ventanamicro.com> + */ + +#include <linux/acpi.h> +#include <linux/sort.h> +#include <linux/irq.h> + +#include "init.h" + +#define RISCV_ACPI_INTC_FLAG_PENDING BIT(0) + +struct riscv_ext_intc_list { + acpi_handle handle; + u32 gsi_base; + u32 nr_irqs; + u32 nr_idcs; + u32 id; + u32 type; + u32 flag; + struct list_head list; +}; + +struct acpi_irq_dep_ctx { + int rc; + unsigned int index; + acpi_handle handle; +}; + +LIST_HEAD(ext_intc_list); + +static int irqchip_cmp_func(const void *in0, const void *in1) +{ + struct acpi_probe_entry *elem0 = (struct acpi_probe_entry *)in0; + struct acpi_probe_entry *elem1 = (struct acpi_probe_entry *)in1; + + return (elem0->type > elem1->type) - (elem0->type < elem1->type); +} + +/* + * On RISC-V, RINTC structures in MADT should be probed before any other + * interrupt controller structures and IMSIC before APLIC. The interrupt + * controller subtypes in MADT of ACPI spec for RISC-V are defined in + * the incremental order like RINTC(24)->IMSIC(25)->APLIC(26)->PLIC(27). + * Hence, simply sorting the subtypes in incremental order will + * establish the required order. + */ +void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr) +{ + struct acpi_probe_entry *ape = ap_head; + + if (nr == 1 || !ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) + return; + sort(ape, nr, sizeof(*ape), irqchip_cmp_func, NULL); +} + +static acpi_status riscv_acpi_update_gsi_handle(u32 gsi_base, acpi_handle handle) +{ + struct riscv_ext_intc_list *ext_intc_element; + struct list_head *i, *tmp; + + list_for_each_safe(i, tmp, &ext_intc_list) { + ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); + if (gsi_base == ext_intc_element->gsi_base) { + ext_intc_element->handle = handle; + return AE_OK; + } + } + + return AE_NOT_FOUND; +} + +int riscv_acpi_update_gsi_range(u32 gsi_base, u32 nr_irqs) +{ + struct riscv_ext_intc_list *ext_intc_element; + + list_for_each_entry(ext_intc_element, &ext_intc_list, list) { + if (gsi_base == ext_intc_element->gsi_base && + (ext_intc_element->flag & RISCV_ACPI_INTC_FLAG_PENDING)) { + ext_intc_element->nr_irqs = nr_irqs; + ext_intc_element->flag &= ~RISCV_ACPI_INTC_FLAG_PENDING; + return 0; + } + } + + return -ENODEV; +} + +int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, + u32 *id, u32 *nr_irqs, u32 *nr_idcs) +{ + struct riscv_ext_intc_list *ext_intc_element; + struct list_head *i; + + list_for_each(i, &ext_intc_list) { + ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); + if (ext_intc_element->handle == ACPI_HANDLE_FWNODE(fwnode)) { + *gsi_base = ext_intc_element->gsi_base; + *id = ext_intc_element->id; + *nr_irqs = ext_intc_element->nr_irqs; + if (nr_idcs) + *nr_idcs = ext_intc_element->nr_idcs; + + return 0; + } + } + + return -ENODEV; +} + +struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi) +{ + struct riscv_ext_intc_list *ext_intc_element; + struct acpi_device *adev; + struct list_head *i; + + list_for_each(i, &ext_intc_list) { + ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); + if (gsi >= ext_intc_element->gsi_base && + gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) { + adev = acpi_fetch_acpi_dev(ext_intc_element->handle); + if (!adev) + return NULL; + + return acpi_fwnode_handle(adev); + } + } + + return NULL; +} + +static int __init riscv_acpi_register_ext_intc(u32 gsi_base, u32 nr_irqs, u32 nr_idcs, + u32 id, u32 type) +{ + struct riscv_ext_intc_list *ext_intc_element, *node, *prev; + + ext_intc_element = kzalloc(sizeof(*ext_intc_element), GFP_KERNEL); + if (!ext_intc_element) + return -ENOMEM; + + ext_intc_element->gsi_base = gsi_base; + + /* If nr_irqs is zero, indicate it in flag and set to max range possible */ + if (nr_irqs) { + ext_intc_element->nr_irqs = nr_irqs; + } else { + ext_intc_element->flag |= RISCV_ACPI_INTC_FLAG_PENDING; + ext_intc_element->nr_irqs = U32_MAX - ext_intc_element->gsi_base; + } + + ext_intc_element->nr_idcs = nr_idcs; + ext_intc_element->id = id; + list_for_each_entry(node, &ext_intc_list, list) { + if (node->gsi_base < ext_intc_element->gsi_base) + break; + } + + /* Adjust the previous node's GSI range if that has pending registration */ + prev = list_prev_entry(node, list); + if (!list_entry_is_head(prev, &ext_intc_list, list)) { + if (prev->flag & RISCV_ACPI_INTC_FLAG_PENDING) + prev->nr_irqs = ext_intc_element->gsi_base - prev->gsi_base; + } + + list_add_tail(&ext_intc_element->list, &node->list); + return 0; +} + +static acpi_status __init riscv_acpi_create_gsi_map_smsi(acpi_handle handle, u32 level, + void *context, void **return_value) +{ + acpi_status status; + u64 gbase; + + if (!acpi_has_method(handle, "_GSB")) { + acpi_handle_err(handle, "_GSB method not found\n"); + return AE_ERROR; + } + + status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "failed to evaluate _GSB method\n"); + return status; + } + + riscv_acpi_register_ext_intc(gbase, 0, 0, 0, ACPI_RISCV_IRQCHIP_SMSI); + status = riscv_acpi_update_gsi_handle((u32)gbase, handle); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "failed to find the GSI mapping entry\n"); + return status; + } + + return AE_OK; +} + +static acpi_status __init riscv_acpi_create_gsi_map(acpi_handle handle, u32 level, + void *context, void **return_value) +{ + acpi_status status; + u64 gbase; + + if (!acpi_has_method(handle, "_GSB")) { + acpi_handle_err(handle, "_GSB method not found\n"); + return AE_ERROR; + } + + status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "failed to evaluate _GSB method\n"); + return status; + } + + status = riscv_acpi_update_gsi_handle((u32)gbase, handle); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "failed to find the GSI mapping entry\n"); + return status; + } + + return AE_OK; +} + +static int __init riscv_acpi_aplic_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_aplic *aplic = (struct acpi_madt_aplic *)header; + + return riscv_acpi_register_ext_intc(aplic->gsi_base, aplic->num_sources, aplic->num_idcs, + aplic->id, ACPI_RISCV_IRQCHIP_APLIC); +} + +static int __init riscv_acpi_plic_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_plic *plic = (struct acpi_madt_plic *)header; + + return riscv_acpi_register_ext_intc(plic->gsi_base, plic->num_irqs, 0, + plic->id, ACPI_RISCV_IRQCHIP_PLIC); +} + +void __init riscv_acpi_init_gsi_mapping(void) +{ + /* There can be either PLIC or APLIC */ + if (acpi_table_parse_madt(ACPI_MADT_TYPE_PLIC, riscv_acpi_plic_parse_madt, 0) > 0) { + acpi_get_devices("RSCV0001", riscv_acpi_create_gsi_map, NULL, NULL); + return; + } + + if (acpi_table_parse_madt(ACPI_MADT_TYPE_APLIC, riscv_acpi_aplic_parse_madt, 0) > 0) + acpi_get_devices("RSCV0002", riscv_acpi_create_gsi_map, NULL, NULL); + + /* Unlike PLIC/APLIC, SYSMSI doesn't have MADT */ + acpi_get_devices("RSCV0006", riscv_acpi_create_gsi_map_smsi, NULL, NULL); +} + +static acpi_handle riscv_acpi_get_gsi_handle(u32 gsi) +{ + struct riscv_ext_intc_list *ext_intc_element; + struct list_head *i; + + list_for_each(i, &ext_intc_list) { + ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); + if (gsi >= ext_intc_element->gsi_base && + gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) + return ext_intc_element->handle; + } + + return NULL; +} + +static acpi_status riscv_acpi_irq_get_parent(struct acpi_resource *ares, void *context) +{ + struct acpi_irq_dep_ctx *ctx = context; + struct acpi_resource_irq *irq; + struct acpi_resource_extended_irq *eirq; + + switch (ares->type) { + case ACPI_RESOURCE_TYPE_IRQ: + irq = &ares->data.irq; + if (ctx->index >= irq->interrupt_count) { + ctx->index -= irq->interrupt_count; + return AE_OK; + } + ctx->handle = riscv_acpi_get_gsi_handle(irq->interrupts[ctx->index]); + return AE_CTRL_TERMINATE; + case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: + eirq = &ares->data.extended_irq; + if (eirq->producer_consumer == ACPI_PRODUCER) + return AE_OK; + + if (ctx->index >= eirq->interrupt_count) { + ctx->index -= eirq->interrupt_count; + return AE_OK; + } + + /* Support GSIs only */ + if (eirq->resource_source.string_length) + return AE_OK; + + ctx->handle = riscv_acpi_get_gsi_handle(eirq->interrupts[ctx->index]); + return AE_CTRL_TERMINATE; + } + + return AE_OK; +} + +static int riscv_acpi_irq_get_dep(acpi_handle handle, unsigned int index, acpi_handle *gsi_handle) +{ + struct acpi_irq_dep_ctx ctx = {-EINVAL, index, NULL}; + + if (!gsi_handle) + return 0; + + acpi_walk_resources(handle, METHOD_NAME__CRS, riscv_acpi_irq_get_parent, &ctx); + *gsi_handle = ctx.handle; + if (*gsi_handle) + return 1; + + return 0; +} + +static u32 riscv_acpi_add_prt_dep(acpi_handle handle) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_pci_routing_table *entry; + struct acpi_handle_list dep_devices; + acpi_handle gsi_handle; + acpi_handle link_handle; + acpi_status status; + u32 count = 0; + + status = acpi_get_irq_routing_table(handle, &buffer); + if (ACPI_FAILURE(status)) { + acpi_handle_err(handle, "failed to get IRQ routing table\n"); + kfree(buffer.pointer); + return 0; + } + + entry = buffer.pointer; + while (entry && (entry->length > 0)) { + if (entry->source[0]) { + acpi_get_handle(handle, entry->source, &link_handle); + dep_devices.count = 1; + dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); + if (!dep_devices.handles) { + acpi_handle_err(handle, "failed to allocate memory\n"); + continue; + } + + dep_devices.handles[0] = link_handle; + count += acpi_scan_add_dep(handle, &dep_devices); + } else { + gsi_handle = riscv_acpi_get_gsi_handle(entry->source_index); + dep_devices.count = 1; + dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); + if (!dep_devices.handles) { + acpi_handle_err(handle, "failed to allocate memory\n"); + continue; + } + + dep_devices.handles[0] = gsi_handle; + count += acpi_scan_add_dep(handle, &dep_devices); + } + + entry = (struct acpi_pci_routing_table *) + ((unsigned long)entry + entry->length); + } + + kfree(buffer.pointer); + return count; +} + +static u32 riscv_acpi_add_irq_dep(acpi_handle handle) +{ + struct acpi_handle_list dep_devices; + acpi_handle gsi_handle; + u32 count = 0; + int i; + + for (i = 0; + riscv_acpi_irq_get_dep(handle, i, &gsi_handle); + i++) { + dep_devices.count = 1; + dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); + if (!dep_devices.handles) { + acpi_handle_err(handle, "failed to allocate memory\n"); + continue; + } + + dep_devices.handles[0] = gsi_handle; + count += acpi_scan_add_dep(handle, &dep_devices); + } + + return count; +} + +u32 arch_acpi_add_auto_dep(acpi_handle handle) +{ + if (acpi_has_method(handle, "_PRT")) + return riscv_acpi_add_prt_dep(handle); + + return riscv_acpi_add_irq_dep(handle); +} diff --git a/drivers/acpi/riscv/rhct.c b/drivers/acpi/riscv/rhct.c index b280b3e9c7d9..caa2c16e1697 100644 --- a/drivers/acpi/riscv/rhct.c +++ b/drivers/acpi/riscv/rhct.c @@ -8,8 +8,9 @@ #define pr_fmt(fmt) "ACPI: RHCT: " fmt #include <linux/acpi.h> +#include <linux/bits.h> -static struct acpi_table_header *acpi_get_rhct(void) +static struct acpi_table_rhct *acpi_get_rhct(void) { static struct acpi_table_header *rhct; acpi_status status; @@ -26,7 +27,7 @@ static struct acpi_table_header *acpi_get_rhct(void) } } - return rhct; + return (struct acpi_table_rhct *)rhct; } /* @@ -48,7 +49,7 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const BUG_ON(acpi_disabled); if (!table) { - rhct = (struct acpi_table_rhct *)acpi_get_rhct(); + rhct = acpi_get_rhct(); if (!rhct) return -ENOENT; } else { @@ -81,3 +82,89 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const return -1; } + +static void acpi_parse_hart_info_cmo_node(struct acpi_table_rhct *rhct, + struct acpi_rhct_hart_info *hart_info, + u32 *cbom_size, u32 *cboz_size, u32 *cbop_size) +{ + u32 size_hartinfo = sizeof(struct acpi_rhct_hart_info); + u32 size_hdr = sizeof(struct acpi_rhct_node_header); + struct acpi_rhct_node_header *ref_node; + struct acpi_rhct_cmo_node *cmo_node; + u32 *hart_info_node_offset; + + hart_info_node_offset = ACPI_ADD_PTR(u32, hart_info, size_hartinfo); + for (int i = 0; i < hart_info->num_offsets; i++) { + ref_node = ACPI_ADD_PTR(struct acpi_rhct_node_header, + rhct, hart_info_node_offset[i]); + if (ref_node->type == ACPI_RHCT_NODE_TYPE_CMO) { + cmo_node = ACPI_ADD_PTR(struct acpi_rhct_cmo_node, + ref_node, size_hdr); + if (cbom_size && cmo_node->cbom_size <= 30) { + if (!*cbom_size) + *cbom_size = BIT(cmo_node->cbom_size); + else if (*cbom_size != BIT(cmo_node->cbom_size)) + pr_warn("CBOM size is not the same across harts\n"); + } + + if (cboz_size && cmo_node->cboz_size <= 30) { + if (!*cboz_size) + *cboz_size = BIT(cmo_node->cboz_size); + else if (*cboz_size != BIT(cmo_node->cboz_size)) + pr_warn("CBOZ size is not the same across harts\n"); + } + + if (cbop_size && cmo_node->cbop_size <= 30) { + if (!*cbop_size) + *cbop_size = BIT(cmo_node->cbop_size); + else if (*cbop_size != BIT(cmo_node->cbop_size)) + pr_warn("CBOP size is not the same across harts\n"); + } + } + } +} + +/* + * During early boot, the caller should call acpi_get_table() and pass its pointer to + * these functions (and free up later). At run time, since this table can be used + * multiple times, pass NULL so that the table remains in memory. + */ +void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size, + u32 *cboz_size, u32 *cbop_size) +{ + u32 size_hdr = sizeof(struct acpi_rhct_node_header); + struct acpi_rhct_node_header *node, *end; + struct acpi_rhct_hart_info *hart_info; + struct acpi_table_rhct *rhct; + + if (acpi_disabled) + return; + + if (table) { + rhct = (struct acpi_table_rhct *)table; + } else { + rhct = acpi_get_rhct(); + if (!rhct) + return; + } + + if (cbom_size) + *cbom_size = 0; + + if (cboz_size) + *cboz_size = 0; + + if (cbop_size) + *cbop_size = 0; + + end = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->header.length); + for (node = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->node_offset); + node < end; + node = ACPI_ADD_PTR(struct acpi_rhct_node_header, node, node->length)) { + if (node->type == ACPI_RHCT_NODE_TYPE_HART_INFO) { + hart_info = ACPI_ADD_PTR(struct acpi_rhct_hart_info, node, size_hdr); + acpi_parse_hart_info_cmo_node(rhct, hart_info, cbom_size, + cboz_size, cbop_size); + } + } +} diff --git a/drivers/acpi/riscv/rimt.c b/drivers/acpi/riscv/rimt.c new file mode 100644 index 000000000000..7f423405e5ef --- /dev/null +++ b/drivers/acpi/riscv/rimt.c @@ -0,0 +1,520 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024-2025, Ventana Micro Systems Inc + * Author: Sunil V L <sunilvl@ventanamicro.com> + * + */ + +#define pr_fmt(fmt) "ACPI: RIMT: " fmt + +#include <linux/acpi.h> +#include <linux/acpi_rimt.h> +#include <linux/iommu.h> +#include <linux/list.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include "init.h" + +struct rimt_fwnode { + struct list_head list; + struct acpi_rimt_node *rimt_node; + struct fwnode_handle *fwnode; +}; + +static LIST_HEAD(rimt_fwnode_list); +static DEFINE_SPINLOCK(rimt_fwnode_lock); + +#define RIMT_TYPE_MASK(type) (1 << (type)) +#define RIMT_IOMMU_TYPE BIT(0) + +/* Root pointer to the mapped RIMT table */ +static struct acpi_table_header *rimt_table; + +/** + * rimt_set_fwnode() - Create rimt_fwnode and use it to register + * iommu data in the rimt_fwnode_list + * + * @rimt_node: RIMT table node associated with the IOMMU + * @fwnode: fwnode associated with the RIMT node + * + * Returns: 0 on success + * <0 on failure + */ +static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node, + struct fwnode_handle *fwnode) +{ + struct rimt_fwnode *np; + + np = kzalloc(sizeof(*np), GFP_ATOMIC); + + if (WARN_ON(!np)) + return -ENOMEM; + + INIT_LIST_HEAD(&np->list); + np->rimt_node = rimt_node; + np->fwnode = fwnode; + + spin_lock(&rimt_fwnode_lock); + list_add_tail(&np->list, &rimt_fwnode_list); + spin_unlock(&rimt_fwnode_lock); + + return 0; +} + +static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node, + void *context) +{ + acpi_status status = AE_NOT_FOUND; + struct device *dev = context; + + if (node->type == ACPI_RIMT_NODE_TYPE_IOMMU) { + struct acpi_rimt_iommu *iommu_node = (struct acpi_rimt_iommu *)&node->node_data; + + if (dev_is_pci(dev)) { + struct pci_dev *pdev; + u16 bdf; + + pdev = to_pci_dev(dev); + bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); + if ((pci_domain_nr(pdev->bus) == iommu_node->pcie_segment_number) && + bdf == iommu_node->pcie_bdf) { + status = AE_OK; + } else { + status = AE_NOT_FOUND; + } + } else { + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res && res->start == iommu_node->base_address) + status = AE_OK; + else + status = AE_NOT_FOUND; + } + } else if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) { + struct acpi_rimt_pcie_rc *pci_rc; + struct pci_bus *bus; + + bus = to_pci_bus(dev); + pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data; + + /* + * It is assumed that PCI segment numbers maps one-to-one + * with root complexes. Each segment number can represent only + * one root complex. + */ + status = pci_rc->pcie_segment_number == pci_domain_nr(bus) ? + AE_OK : AE_NOT_FOUND; + } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) { + struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_rimt_platform_device *ncomp; + struct device *plat_dev = dev; + struct acpi_device *adev; + + /* + * Walk the device tree to find a device with an + * ACPI companion; there is no point in scanning + * RIMT for a device matching a platform device if + * the device does not have an ACPI companion to + * start with. + */ + do { + adev = ACPI_COMPANION(plat_dev); + if (adev) + break; + + plat_dev = plat_dev->parent; + } while (plat_dev); + + if (!adev) + return status; + + status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); + if (ACPI_FAILURE(status)) { + dev_warn(plat_dev, "Can't get device full path name\n"); + return status; + } + + ncomp = (struct acpi_rimt_platform_device *)node->node_data; + status = !strcmp(ncomp->device_name, buf.pointer) ? + AE_OK : AE_NOT_FOUND; + acpi_os_free(buf.pointer); + } + + return status; +} + +static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type, + void *context) +{ + struct acpi_rimt_node *rimt_node, *rimt_end; + struct acpi_table_rimt *rimt; + int i; + + if (!rimt_table) + return NULL; + + /* Get the first RIMT node */ + rimt = (struct acpi_table_rimt *)rimt_table; + rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt, + rimt->node_offset); + rimt_end = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, + rimt_table->length); + + for (i = 0; i < rimt->num_nodes; i++) { + if (WARN_TAINT(rimt_node >= rimt_end, TAINT_FIRMWARE_WORKAROUND, + "RIMT node pointer overflows, bad table!\n")) + return NULL; + + if (rimt_node->type == type && + ACPI_SUCCESS(rimt_match_node_callback(rimt_node, context))) + return rimt_node; + + rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_node, + rimt_node->length); + } + + return NULL; +} + +/* + * RISC-V supports IOMMU as a PCI device or a platform device. + * When it is a platform device, there should be a namespace device as + * well along with RIMT. To create the link between RIMT information and + * the platform device, the IOMMU driver should register itself with the + * RIMT module. This is true for PCI based IOMMU as well. + */ +int rimt_iommu_register(struct device *dev) +{ + struct fwnode_handle *rimt_fwnode; + struct acpi_rimt_node *node; + + node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev); + if (!node) { + pr_err("Could not find IOMMU node in RIMT\n"); + return -ENODEV; + } + + if (dev_is_pci(dev)) { + rimt_fwnode = acpi_alloc_fwnode_static(); + if (!rimt_fwnode) + return -ENOMEM; + + rimt_fwnode->dev = dev; + if (!dev->fwnode) + dev->fwnode = rimt_fwnode; + + rimt_set_fwnode(node, rimt_fwnode); + } else { + rimt_set_fwnode(node, dev->fwnode); + } + + return 0; +} + +#ifdef CONFIG_IOMMU_API + +/** + * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node + * + * @node: RIMT table node to be looked-up + * + * Returns: fwnode_handle pointer on success, NULL on failure + */ +static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node) +{ + struct fwnode_handle *fwnode = NULL; + struct rimt_fwnode *curr; + + spin_lock(&rimt_fwnode_lock); + list_for_each_entry(curr, &rimt_fwnode_list, list) { + if (curr->rimt_node == node) { + fwnode = curr->fwnode; + break; + } + } + spin_unlock(&rimt_fwnode_lock); + + return fwnode; +} + +static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node) +{ + struct acpi_rimt_pcie_rc *pci_rc; + + pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data; + return pci_rc->flags & ACPI_RIMT_PCIE_ATS_SUPPORTED; +} + +static int rimt_iommu_xlate(struct device *dev, struct acpi_rimt_node *node, u32 deviceid) +{ + struct fwnode_handle *rimt_fwnode; + + if (!node) + return -ENODEV; + + rimt_fwnode = rimt_get_fwnode(node); + + /* + * The IOMMU drivers may not be probed yet. + * Defer the IOMMU configuration + */ + if (!rimt_fwnode) + return -EPROBE_DEFER; + + return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode); +} + +struct rimt_pci_alias_info { + struct device *dev; + struct acpi_rimt_node *node; + const struct iommu_ops *ops; +}; + +static int rimt_id_map(struct acpi_rimt_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out) +{ + if (rid_in < map->source_id_base || + (rid_in > map->source_id_base + map->num_ids)) + return -ENXIO; + + *rid_out = map->dest_id_base + (rid_in - map->source_id_base); + return 0; +} + +static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node, + u32 *id_out, int index) +{ + struct acpi_rimt_platform_device *plat_node; + u32 id_mapping_offset, num_id_mapping; + struct acpi_rimt_pcie_rc *pci_node; + struct acpi_rimt_id_mapping *map; + struct acpi_rimt_node *parent; + + if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) { + pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data; + id_mapping_offset = pci_node->id_mapping_offset; + num_id_mapping = pci_node->num_id_mappings; + } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) { + plat_node = (struct acpi_rimt_platform_device *)&node->node_data; + id_mapping_offset = plat_node->id_mapping_offset; + num_id_mapping = plat_node->num_id_mappings; + } else { + return NULL; + } + + if (!id_mapping_offset || !num_id_mapping || index >= num_id_mapping) + return NULL; + + map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node, + id_mapping_offset + index * sizeof(*map)); + + /* Firmware bug! */ + if (!map->dest_offset) { + pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", + node, node->type); + return NULL; + } + + parent = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, map->dest_offset); + + if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE || + node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) { + *id_out = map->dest_id_base; + return parent; + } + + return NULL; +} + +static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node, + u32 id_in, u32 *id_out, + u8 type_mask) +{ + struct acpi_rimt_platform_device *plat_node; + u32 id_mapping_offset, num_id_mapping; + struct acpi_rimt_pcie_rc *pci_node; + u32 id = id_in; + + /* Parse the ID mapping tree to find specified node type */ + while (node) { + struct acpi_rimt_id_mapping *map; + int i, rc = 0; + u32 map_id = id; + + if (RIMT_TYPE_MASK(node->type) & type_mask) { + if (id_out) + *id_out = id; + return node; + } + + if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) { + pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data; + id_mapping_offset = pci_node->id_mapping_offset; + num_id_mapping = pci_node->num_id_mappings; + } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) { + plat_node = (struct acpi_rimt_platform_device *)&node->node_data; + id_mapping_offset = plat_node->id_mapping_offset; + num_id_mapping = plat_node->num_id_mappings; + } else { + goto fail_map; + } + + if (!id_mapping_offset || !num_id_mapping) + goto fail_map; + + map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node, + id_mapping_offset); + + /* Firmware bug! */ + if (!map->dest_offset) { + pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", + node, node->type); + goto fail_map; + } + + /* Do the ID translation */ + for (i = 0; i < num_id_mapping; i++, map++) { + rc = rimt_id_map(map, node->type, map_id, &id); + if (!rc) + break; + } + + if (i == num_id_mapping) + goto fail_map; + + node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, + rc ? 0 : map->dest_offset); + } + +fail_map: + /* Map input ID to output ID unchanged on mapping failure */ + if (id_out) + *id_out = id_in; + + return NULL; +} + +static struct acpi_rimt_node *rimt_node_map_platform_id(struct acpi_rimt_node *node, u32 *id_out, + u8 type_mask, int index) +{ + struct acpi_rimt_node *parent; + u32 id; + + parent = rimt_node_get_id(node, &id, index); + if (!parent) + return NULL; + + if (!(RIMT_TYPE_MASK(parent->type) & type_mask)) + parent = rimt_node_map_id(parent, id, id_out, type_mask); + else + if (id_out) + *id_out = id; + + return parent; +} + +static int rimt_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) +{ + struct rimt_pci_alias_info *info = data; + struct acpi_rimt_node *parent; + u32 deviceid; + + parent = rimt_node_map_id(info->node, alias, &deviceid, RIMT_IOMMU_TYPE); + return rimt_iommu_xlate(info->dev, parent, deviceid); +} + +static int rimt_plat_iommu_map(struct device *dev, struct acpi_rimt_node *node) +{ + struct acpi_rimt_node *parent; + int err = -ENODEV, i = 0; + u32 deviceid = 0; + + do { + parent = rimt_node_map_platform_id(node, &deviceid, + RIMT_IOMMU_TYPE, + i++); + + if (parent) + err = rimt_iommu_xlate(dev, parent, deviceid); + } while (parent && !err); + + return err; +} + +static int rimt_plat_iommu_map_id(struct device *dev, + struct acpi_rimt_node *node, + const u32 *in_id) +{ + struct acpi_rimt_node *parent; + u32 deviceid; + + parent = rimt_node_map_id(node, *in_id, &deviceid, RIMT_IOMMU_TYPE); + if (parent) + return rimt_iommu_xlate(dev, parent, deviceid); + + return -ENODEV; +} + +/** + * rimt_iommu_configure_id - Set-up IOMMU configuration for a device. + * + * @dev: device to configure + * @id_in: optional input id const value pointer + * + * Returns: 0 on success, <0 on failure + */ +int rimt_iommu_configure_id(struct device *dev, const u32 *id_in) +{ + struct acpi_rimt_node *node; + int err = -ENODEV; + + if (dev_is_pci(dev)) { + struct iommu_fwspec *fwspec; + struct pci_bus *bus = to_pci_dev(dev)->bus; + struct rimt_pci_alias_info info = { .dev = dev }; + + node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX, &bus->dev); + if (!node) + return -ENODEV; + + info.node = node; + err = pci_for_each_dma_alias(to_pci_dev(dev), + rimt_pci_iommu_init, &info); + + fwspec = dev_iommu_fwspec_get(dev); + if (fwspec && rimt_pcie_rc_supports_ats(node)) + fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; + } else { + node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PLAT_DEVICE, dev); + if (!node) + return -ENODEV; + + err = id_in ? rimt_plat_iommu_map_id(dev, node, id_in) : + rimt_plat_iommu_map(dev, node); + } + + return err; +} + +#endif + +void __init riscv_acpi_rimt_init(void) +{ + acpi_status status; + + /* rimt_table will be used at runtime after the rimt init, + * so we don't need to call acpi_put_table() to release + * the RIMT table mapping. + */ + status = acpi_get_table(ACPI_SIG_RIMT, 0, &rimt_table); + if (ACPI_FAILURE(status)) { + if (status != AE_NOT_FOUND) { + const char *msg = acpi_format_exception(status); + + pr_err("Failed to get table, %s\n", msg); + } + + return; + } +} |
