summaryrefslogtreecommitdiff
path: root/arch/powerpc/sysdev/xive/native.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/sysdev/xive/native.c')
-rw-r--r--arch/powerpc/sysdev/xive/native.c252
1 files changed, 207 insertions, 45 deletions
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 1ca127d052a6..a0934b516933 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016,2017 IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) "xive: " fmt
@@ -17,13 +13,15 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/cpumask.h>
#include <linux/mm.h>
+#include <linux/kmemleak.h>
-#include <asm/prom.h>
+#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/irq.h>
@@ -43,6 +41,7 @@ static u32 xive_queue_shift;
static u32 xive_pool_vps = XIVE_INVALID_VP;
static struct kmem_cache *xive_provision_cache;
static bool xive_has_single_esc;
+bool xive_has_save_restore;
int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
{
@@ -64,14 +63,10 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
opal_flags = be64_to_cpu(flags);
if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
+ if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI2)
+ data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
if (opal_flags & OPAL_XIVE_IRQ_LSI)
data->flags |= XIVE_IRQ_FLAG_LSI;
- if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG)
- data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG;
- if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
- data->flags |= XIVE_IRQ_FLAG_MASK_FW;
- if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
- data->flags |= XIVE_IRQ_FLAG_EOI_FW;
data->eoi_page = be64_to_cpu(eoi_page);
data->trig_page = be64_to_cpu(trig_page);
data->esb_shift = be32_to_cpu(esb_shift);
@@ -115,6 +110,22 @@ int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
}
EXPORT_SYMBOL_GPL(xive_native_configure_irq);
+static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
+ u32 *sw_irq)
+{
+ s64 rc;
+ __be64 vp;
+ __be32 lirq;
+
+ rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq);
+
+ *target = be64_to_cpu(vp);
+ *sw_irq = be32_to_cpu(lirq);
+
+ return rc == 0 ? 0 : -ENXIO;
+}
+
+#define vp_err(vp, fmt, ...) pr_err("VP[0x%x]: " fmt, vp, ##__VA_ARGS__)
/* This can be called multiple time to change a queue configuration */
int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
@@ -143,7 +154,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
&esc_irq_be,
NULL);
if (rc) {
- pr_err("Error %lld getting queue info prio %d\n", rc, prio);
+ vp_err(vp_id, "Failed to get queue %d info : %lld\n", prio, rc);
rc = -EIO;
goto fail;
}
@@ -166,7 +177,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
msleep(OPAL_BUSY_DELAY_MS);
}
if (rc) {
- pr_err("Error %lld setting queue for prio %d\n", rc, prio);
+ vp_err(vp_id, "Failed to set queue %d info: %lld\n", prio, rc);
rc = -EIO;
} else {
/*
@@ -193,7 +204,7 @@ static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
msleep(OPAL_BUSY_DELAY_MS);
}
if (rc)
- pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
+ vp_err(vp_id, "Failed to disable queue %d : %lld\n", prio, rc);
}
void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
@@ -235,6 +246,17 @@ static bool xive_native_match(struct device_node *node)
return of_device_is_compatible(node, "ibm,opal-xive-vc");
}
+static s64 opal_xive_allocate_irq(u32 chip_id)
+{
+ s64 irq = opal_xive_allocate_irq_raw(chip_id);
+
+ /*
+ * Old versions of skiboot can incorrectly return 0xffffffff to
+ * indicate no space, fix it up here.
+ */
+ return irq == 0xffffffff ? OPAL_RESOURCE : irq;
+}
+
#ifdef CONFIG_SMP
static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
{
@@ -258,12 +280,12 @@ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
}
#endif /* CONFIG_SMP */
-u32 xive_native_alloc_irq(void)
+u32 xive_native_alloc_irq_on_chip(u32 chip_id)
{
s64 rc;
for (;;) {
- rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
+ rc = opal_xive_allocate_irq(chip_id);
if (rc != OPAL_BUSY)
break;
msleep(OPAL_BUSY_DELAY_MS);
@@ -272,7 +294,7 @@ u32 xive_native_alloc_irq(void)
return 0;
return rc;
}
-EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
+EXPORT_SYMBOL_GPL(xive_native_alloc_irq_on_chip);
void xive_native_free_irq(u32 irq)
{
@@ -291,7 +313,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
s64 rc;
/* Free the IPI */
- if (!xc->hw_ipi)
+ if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
for (;;) {
rc = opal_xive_free_irq(xc->hw_ipi);
@@ -299,7 +321,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
msleep(OPAL_BUSY_DELAY_MS);
continue;
}
- xc->hw_ipi = 0;
+ xc->hw_ipi = XIVE_BAD_IRQ;
break;
}
}
@@ -361,13 +383,9 @@ static void xive_native_update_pending(struct xive_cpu *xc)
}
}
-static void xive_native_eoi(u32 hw_irq)
+static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
{
- /*
- * Not normally used except if specific interrupts need
- * a workaround on EOI.
- */
- opal_int_eoi(hw_irq);
+ xc->chip_id = cpu_to_chip_id(cpu);
}
static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
@@ -397,7 +415,7 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
return;
}
- /* Grab it's CAM value */
+ /* Grab its CAM value */
rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
if (rc) {
pr_err("Failed to get pool VP info CPU %d\n", cpu);
@@ -437,15 +455,30 @@ void xive_native_sync_source(u32 hw_irq)
}
EXPORT_SYMBOL_GPL(xive_native_sync_source);
+void xive_native_sync_queue(u32 hw_irq)
+{
+ opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
+}
+EXPORT_SYMBOL_GPL(xive_native_sync_queue);
+
+#ifdef CONFIG_DEBUG_FS
+static int xive_native_debug_create(struct dentry *xive_dir)
+{
+ debugfs_create_bool("save-restore", 0600, xive_dir, &xive_has_save_restore);
+ return 0;
+}
+#endif
+
static const struct xive_ops xive_native_ops = {
.populate_irq_data = xive_native_populate_irq_data,
.configure_irq = xive_native_configure_irq,
+ .get_irq_config = xive_native_get_irq_config,
.setup_queue = xive_native_setup_queue,
.cleanup_queue = xive_native_cleanup_queue,
.match = xive_native_match,
.shutdown = xive_native_shutdown,
.update_pending = xive_native_update_pending,
- .eoi = xive_native_eoi,
+ .prepare_cpu = xive_native_prepare_cpu,
.setup_cpu = xive_native_setup_cpu,
.teardown_cpu = xive_native_teardown_cpu,
.sync_source = xive_native_sync_source,
@@ -453,10 +486,13 @@ static const struct xive_ops xive_native_ops = {
.get_ipi = xive_native_get_ipi,
.put_ipi = xive_native_put_ipi,
#endif /* CONFIG_SMP */
+#ifdef CONFIG_DEBUG_FS
+ .debug_create = xive_native_debug_create,
+#endif /* CONFIG_DEBUG_FS */
.name = "native",
};
-static bool xive_parse_provisioning(struct device_node *np)
+static bool __init xive_parse_provisioning(struct device_node *np)
{
int rc;
@@ -496,16 +532,16 @@ static bool xive_parse_provisioning(struct device_node *np)
return true;
}
-static void xive_native_setup_pools(void)
+static void __init xive_native_setup_pools(void)
{
/* Allocate a pool big enough */
- pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
+ pr_debug("Allocating VP block for pool size %u\n", nr_cpu_ids);
xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
- pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
+ pr_err("Failed to allocate pool VP, KVM might not function\n");
- pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
+ pr_debug("Pool VPs allocated at 0x%x for %u max CPUs\n",
xive_pool_vps, nr_cpu_ids);
}
@@ -515,14 +551,15 @@ u32 xive_native_default_eq_shift(void)
}
EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
+unsigned long xive_tima_os;
+EXPORT_SYMBOL_GPL(xive_tima_os);
+
bool __init xive_native_init(void)
{
struct device_node *np;
struct resource r;
void __iomem *tima;
- struct property *prop;
u8 max_prio = 7;
- const __be32 *p;
u32 val, cpu;
s64 rc;
@@ -540,12 +577,12 @@ bool __init xive_native_init(void)
/* Resource 1 is HV window */
if (of_address_to_resource(np, 1, &r)) {
pr_err("Failed to get thread mgmnt area resource\n");
- return false;
+ goto err_put;
}
tima = ioremap(r.start, resource_size(&r));
if (!tima) {
pr_err("Failed to map thread mgmnt area\n");
- return false;
+ goto err_put;
}
/* Read number of priorities */
@@ -553,41 +590,55 @@ bool __init xive_native_init(void)
max_prio = val - 1;
/* Iterate the EQ sizes and pick one */
- of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
+ of_property_for_each_u32(np, "ibm,xive-eq-sizes", val) {
xive_queue_shift = val;
if (val == PAGE_SHIFT)
break;
}
/* Do we support single escalation */
- if (of_get_property(np, "single-escalation-support", NULL) != NULL)
- xive_has_single_esc = true;
+ xive_has_single_esc = of_property_read_bool(np, "single-escalation-support");
+
+ xive_has_save_restore = of_property_read_bool(np, "vp-save-restore");
/* Configure Thread Management areas for KVM */
for_each_possible_cpu(cpu)
kvmppc_set_xive_tima(cpu, r.start, tima);
- /* Grab size of provisionning pages */
+ /* Resource 2 is OS window */
+ if (of_address_to_resource(np, 2, &r)) {
+ pr_err("Failed to get thread mgmnt area resource\n");
+ goto err_put;
+ }
+
+ xive_tima_os = r.start;
+
+ /* Grab size of provisioning pages */
xive_parse_provisioning(np);
/* Switch the XIVE to exploitation mode */
rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
if (rc) {
pr_err("Switch to exploitation mode failed with error %lld\n", rc);
- return false;
+ goto err_put;
}
/* Setup some dummy HV pool VPs */
xive_native_setup_pools();
/* Initialize XIVE core with our backend */
- if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
+ if (!xive_core_init(np, &xive_native_ops, tima, TM_QW3_HV_PHYS,
max_prio)) {
opal_xive_reset(OPAL_XIVE_MODE_EMU);
- return false;
+ goto err_put;
}
+ of_node_put(np);
pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
return true;
+
+err_put:
+ of_node_put(np);
+ return false;
}
static bool xive_native_provision_pages(void)
@@ -607,6 +658,7 @@ static bool xive_native_provision_pages(void)
pr_err("Failed to allocate provisioning page\n");
return false;
}
+ kmemleak_ignore(p);
opal_xive_donate_page(chip, __pa(p));
}
return true;
@@ -672,6 +724,8 @@ int xive_native_enable_vp(u32 vp_id, bool single_escalation)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
+ if (rc)
+ vp_err(vp_id, "Failed to enable VP : %lld\n", rc);
return rc ? -EIO : 0;
}
EXPORT_SYMBOL_GPL(xive_native_enable_vp);
@@ -686,6 +740,8 @@ int xive_native_disable_vp(u32 vp_id)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
+ if (rc)
+ vp_err(vp_id, "Failed to disable VP : %lld\n", rc);
return rc ? -EIO : 0;
}
EXPORT_SYMBOL_GPL(xive_native_disable_vp);
@@ -697,8 +753,10 @@ int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
s64 rc;
rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
- if (rc)
+ if (rc) {
+ vp_err(vp_id, "Failed to get VP info : %lld\n", rc);
return -EIO;
+ }
*out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
*out_chip_id = be32_to_cpu(vp_chip_id_be);
@@ -711,3 +769,107 @@ bool xive_native_has_single_escalation(void)
return xive_has_single_esc;
}
EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
+
+bool xive_native_has_save_restore(void)
+{
+ return xive_has_save_restore;
+}
+EXPORT_SYMBOL_GPL(xive_native_has_save_restore);
+
+int xive_native_get_queue_info(u32 vp_id, u32 prio,
+ u64 *out_qpage,
+ u64 *out_qsize,
+ u64 *out_qeoi_page,
+ u32 *out_escalate_irq,
+ u64 *out_qflags)
+{
+ __be64 qpage;
+ __be64 qsize;
+ __be64 qeoi_page;
+ __be32 escalate_irq;
+ __be64 qflags;
+ s64 rc;
+
+ rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
+ &qeoi_page, &escalate_irq, &qflags);
+ if (rc) {
+ vp_err(vp_id, "failed to get queue %d info : %lld\n", prio, rc);
+ return -EIO;
+ }
+
+ if (out_qpage)
+ *out_qpage = be64_to_cpu(qpage);
+ if (out_qsize)
+ *out_qsize = be64_to_cpu(qsize);
+ if (out_qeoi_page)
+ *out_qeoi_page = be64_to_cpu(qeoi_page);
+ if (out_escalate_irq)
+ *out_escalate_irq = be32_to_cpu(escalate_irq);
+ if (out_qflags)
+ *out_qflags = be64_to_cpu(qflags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
+
+int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
+{
+ __be32 opal_qtoggle;
+ __be32 opal_qindex;
+ s64 rc;
+
+ rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
+ &opal_qindex);
+ if (rc) {
+ vp_err(vp_id, "failed to get queue %d state : %lld\n", prio, rc);
+ return -EIO;
+ }
+
+ if (qtoggle)
+ *qtoggle = be32_to_cpu(opal_qtoggle);
+ if (qindex)
+ *qindex = be32_to_cpu(opal_qindex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
+
+int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
+{
+ s64 rc;
+
+ rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
+ if (rc) {
+ vp_err(vp_id, "failed to set queue %d state : %lld\n", prio, rc);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
+
+bool xive_native_has_queue_state_support(void)
+{
+ return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
+ opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
+}
+EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
+
+int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
+{
+ __be64 state;
+ s64 rc;
+
+ rc = opal_xive_get_vp_state(vp_id, &state);
+ if (rc) {
+ vp_err(vp_id, "failed to get vp state : %lld\n", rc);
+ return -EIO;
+ }
+
+ if (out_state)
+ *out_state = be64_to_cpu(state);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
+
+machine_arch_initcall(powernv, xive_core_debug_init);