summaryrefslogtreecommitdiff
path: root/arch/powerpc/sysdev/xive/native.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/sysdev/xive/native.c')
-rw-r--r--arch/powerpc/sysdev/xive/native.c117
1 files changed, 71 insertions, 46 deletions
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 0ff6b739052c..f1c0fa6ece21 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -13,13 +13,15 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/cpumask.h>
#include <linux/mm.h>
+#include <linux/kmemleak.h>
-#include <asm/prom.h>
+#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/irq.h>
@@ -39,6 +41,7 @@ static u32 xive_queue_shift;
static u32 xive_pool_vps = XIVE_INVALID_VP;
static struct kmem_cache *xive_provision_cache;
static bool xive_has_single_esc;
+bool xive_has_save_restore;
int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
{
@@ -60,14 +63,10 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
opal_flags = be64_to_cpu(flags);
if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
+ if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI2)
+ data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
if (opal_flags & OPAL_XIVE_IRQ_LSI)
data->flags |= XIVE_IRQ_FLAG_LSI;
- if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG)
- data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG;
- if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
- data->flags |= XIVE_IRQ_FLAG_MASK_FW;
- if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
- data->flags |= XIVE_IRQ_FLAG_EOI_FW;
data->eoi_page = be64_to_cpu(eoi_page);
data->trig_page = be64_to_cpu(trig_page);
data->esb_shift = be32_to_cpu(esb_shift);
@@ -126,6 +125,8 @@ static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
return rc == 0 ? 0 : -ENXIO;
}
+#define vp_err(vp, fmt, ...) pr_err("VP[0x%x]: " fmt, vp, ##__VA_ARGS__)
+
/* This can be called multiple time to change a queue configuration */
int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
__be32 *qpage, u32 order, bool can_escalate)
@@ -153,7 +154,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
&esc_irq_be,
NULL);
if (rc) {
- pr_err("Error %lld getting queue info prio %d\n", rc, prio);
+ vp_err(vp_id, "Failed to get queue %d info : %lld\n", prio, rc);
rc = -EIO;
goto fail;
}
@@ -176,7 +177,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
msleep(OPAL_BUSY_DELAY_MS);
}
if (rc) {
- pr_err("Error %lld setting queue for prio %d\n", rc, prio);
+ vp_err(vp_id, "Failed to set queue %d info: %lld\n", prio, rc);
rc = -EIO;
} else {
/*
@@ -203,7 +204,7 @@ static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
msleep(OPAL_BUSY_DELAY_MS);
}
if (rc)
- pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
+ vp_err(vp_id, "Failed to disable queue %d : %lld\n", prio, rc);
}
void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
@@ -279,12 +280,12 @@ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
}
#endif /* CONFIG_SMP */
-u32 xive_native_alloc_irq(void)
+u32 xive_native_alloc_irq_on_chip(u32 chip_id)
{
s64 rc;
for (;;) {
- rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
+ rc = opal_xive_allocate_irq(chip_id);
if (rc != OPAL_BUSY)
break;
msleep(OPAL_BUSY_DELAY_MS);
@@ -293,7 +294,7 @@ u32 xive_native_alloc_irq(void)
return 0;
return rc;
}
-EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
+EXPORT_SYMBOL_GPL(xive_native_alloc_irq_on_chip);
void xive_native_free_irq(u32 irq)
{
@@ -312,7 +313,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
s64 rc;
/* Free the IPI */
- if (!xc->hw_ipi)
+ if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
for (;;) {
rc = opal_xive_free_irq(xc->hw_ipi);
@@ -320,7 +321,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
msleep(OPAL_BUSY_DELAY_MS);
continue;
}
- xc->hw_ipi = 0;
+ xc->hw_ipi = XIVE_BAD_IRQ;
break;
}
}
@@ -382,13 +383,9 @@ static void xive_native_update_pending(struct xive_cpu *xc)
}
}
-static void xive_native_eoi(u32 hw_irq)
+static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
{
- /*
- * Not normally used except if specific interrupts need
- * a workaround on EOI.
- */
- opal_int_eoi(hw_irq);
+ xc->chip_id = cpu_to_chip_id(cpu);
}
static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
@@ -464,6 +461,14 @@ void xive_native_sync_queue(u32 hw_irq)
}
EXPORT_SYMBOL_GPL(xive_native_sync_queue);
+#ifdef CONFIG_DEBUG_FS
+static int xive_native_debug_create(struct dentry *xive_dir)
+{
+ debugfs_create_bool("save-restore", 0600, xive_dir, &xive_has_save_restore);
+ return 0;
+}
+#endif
+
static const struct xive_ops xive_native_ops = {
.populate_irq_data = xive_native_populate_irq_data,
.configure_irq = xive_native_configure_irq,
@@ -473,7 +478,7 @@ static const struct xive_ops xive_native_ops = {
.match = xive_native_match,
.shutdown = xive_native_shutdown,
.update_pending = xive_native_update_pending,
- .eoi = xive_native_eoi,
+ .prepare_cpu = xive_native_prepare_cpu,
.setup_cpu = xive_native_setup_cpu,
.teardown_cpu = xive_native_teardown_cpu,
.sync_source = xive_native_sync_source,
@@ -481,10 +486,13 @@ static const struct xive_ops xive_native_ops = {
.get_ipi = xive_native_get_ipi,
.put_ipi = xive_native_put_ipi,
#endif /* CONFIG_SMP */
+#ifdef CONFIG_DEBUG_FS
+ .debug_create = xive_native_debug_create,
+#endif /* CONFIG_DEBUG_FS */
.name = "native",
};
-static bool xive_parse_provisioning(struct device_node *np)
+static bool __init xive_parse_provisioning(struct device_node *np)
{
int rc;
@@ -524,16 +532,16 @@ static bool xive_parse_provisioning(struct device_node *np)
return true;
}
-static void xive_native_setup_pools(void)
+static void __init xive_native_setup_pools(void)
{
/* Allocate a pool big enough */
- pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
+ pr_debug("Allocating VP block for pool size %u\n", nr_cpu_ids);
xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
- pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
+ pr_err("Failed to allocate pool VP, KVM might not function\n");
- pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
+ pr_debug("Pool VPs allocated at 0x%x for %u max CPUs\n",
xive_pool_vps, nr_cpu_ids);
}
@@ -571,12 +579,12 @@ bool __init xive_native_init(void)
/* Resource 1 is HV window */
if (of_address_to_resource(np, 1, &r)) {
pr_err("Failed to get thread mgmnt area resource\n");
- return false;
+ goto err_put;
}
tima = ioremap(r.start, resource_size(&r));
if (!tima) {
pr_err("Failed to map thread mgmnt area\n");
- return false;
+ goto err_put;
}
/* Read number of priorities */
@@ -591,8 +599,9 @@ bool __init xive_native_init(void)
}
/* Do we support single escalation */
- if (of_get_property(np, "single-escalation-support", NULL) != NULL)
- xive_has_single_esc = true;
+ xive_has_single_esc = of_property_read_bool(np, "single-escalation-support");
+
+ xive_has_save_restore = of_property_read_bool(np, "vp-save-restore");
/* Configure Thread Management areas for KVM */
for_each_possible_cpu(cpu)
@@ -601,32 +610,37 @@ bool __init xive_native_init(void)
/* Resource 2 is OS window */
if (of_address_to_resource(np, 2, &r)) {
pr_err("Failed to get thread mgmnt area resource\n");
- return false;
+ goto err_put;
}
xive_tima_os = r.start;
- /* Grab size of provisionning pages */
+ /* Grab size of provisioning pages */
xive_parse_provisioning(np);
/* Switch the XIVE to exploitation mode */
rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
if (rc) {
pr_err("Switch to exploitation mode failed with error %lld\n", rc);
- return false;
+ goto err_put;
}
/* Setup some dummy HV pool VPs */
xive_native_setup_pools();
/* Initialize XIVE core with our backend */
- if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
+ if (!xive_core_init(np, &xive_native_ops, tima, TM_QW3_HV_PHYS,
max_prio)) {
opal_xive_reset(OPAL_XIVE_MODE_EMU);
- return false;
+ goto err_put;
}
+ of_node_put(np);
pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
return true;
+
+err_put:
+ of_node_put(np);
+ return false;
}
static bool xive_native_provision_pages(void)
@@ -646,6 +660,7 @@ static bool xive_native_provision_pages(void)
pr_err("Failed to allocate provisioning page\n");
return false;
}
+ kmemleak_ignore(p);
opal_xive_donate_page(chip, __pa(p));
}
return true;
@@ -711,6 +726,8 @@ int xive_native_enable_vp(u32 vp_id, bool single_escalation)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
+ if (rc)
+ vp_err(vp_id, "Failed to enable VP : %lld\n", rc);
return rc ? -EIO : 0;
}
EXPORT_SYMBOL_GPL(xive_native_enable_vp);
@@ -725,6 +742,8 @@ int xive_native_disable_vp(u32 vp_id)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
+ if (rc)
+ vp_err(vp_id, "Failed to disable VP : %lld\n", rc);
return rc ? -EIO : 0;
}
EXPORT_SYMBOL_GPL(xive_native_disable_vp);
@@ -736,8 +755,10 @@ int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
s64 rc;
rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
- if (rc)
+ if (rc) {
+ vp_err(vp_id, "Failed to get VP info : %lld\n", rc);
return -EIO;
+ }
*out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
*out_chip_id = be32_to_cpu(vp_chip_id_be);
@@ -751,6 +772,12 @@ bool xive_native_has_single_escalation(void)
}
EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
+bool xive_native_has_save_restore(void)
+{
+ return xive_has_save_restore;
+}
+EXPORT_SYMBOL_GPL(xive_native_has_save_restore);
+
int xive_native_get_queue_info(u32 vp_id, u32 prio,
u64 *out_qpage,
u64 *out_qsize,
@@ -768,15 +795,14 @@ int xive_native_get_queue_info(u32 vp_id, u32 prio,
rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
&qeoi_page, &escalate_irq, &qflags);
if (rc) {
- pr_err("OPAL failed to get queue info for VCPU %d/%d : %lld\n",
- vp_id, prio, rc);
+ vp_err(vp_id, "failed to get queue %d info : %lld\n", prio, rc);
return -EIO;
}
if (out_qpage)
*out_qpage = be64_to_cpu(qpage);
if (out_qsize)
- *out_qsize = be32_to_cpu(qsize);
+ *out_qsize = be64_to_cpu(qsize);
if (out_qeoi_page)
*out_qeoi_page = be64_to_cpu(qeoi_page);
if (out_escalate_irq)
@@ -797,8 +823,7 @@ int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
&opal_qindex);
if (rc) {
- pr_err("OPAL failed to get queue state for VCPU %d/%d : %lld\n",
- vp_id, prio, rc);
+ vp_err(vp_id, "failed to get queue %d state : %lld\n", prio, rc);
return -EIO;
}
@@ -817,8 +842,7 @@ int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
if (rc) {
- pr_err("OPAL failed to set queue state for VCPU %d/%d : %lld\n",
- vp_id, prio, rc);
+ vp_err(vp_id, "failed to set queue %d state : %lld\n", prio, rc);
return -EIO;
}
@@ -840,8 +864,7 @@ int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
rc = opal_xive_get_vp_state(vp_id, &state);
if (rc) {
- pr_err("OPAL failed to get vp state for VCPU %d : %lld\n",
- vp_id, rc);
+ vp_err(vp_id, "failed to get vp state : %lld\n", rc);
return -EIO;
}
@@ -850,3 +873,5 @@ int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
+
+machine_arch_initcall(powernv, xive_core_debug_init);