diff options
Diffstat (limited to 'arch/powerpc/sysdev')
56 files changed, 923 insertions, 1677 deletions
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig index 9ebcc1337560..18ff2c4a814a 100644 --- a/arch/powerpc/sysdev/Kconfig +++ b/arch/powerpc/sysdev/Kconfig @@ -5,24 +5,18 @@ config PPC4xx_PCI_EXPRESS bool - depends on PCI && 4xx + depends on PCI && 44x config PPC4xx_HSTA_MSI bool depends on PCI_MSI - depends on PCI && 4xx - -config PPC4xx_MSI - bool - depends on PCI_MSI - depends on PCI && 4xx + depends on PCI && 44x config PPC_MSI_BITMAP bool depends on PCI_MSI default y if MPIC default y if FSL_PCI - default y if PPC4xx_MSI default y if PPC_POWERNV source "arch/powerpc/sysdev/xics/Kconfig" diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 026b3f01a991..0834a9a12600 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile @@ -1,7 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) - mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) obj-$(CONFIG_MPIC_TIMER) += mpic_timer.o @@ -14,7 +12,6 @@ obj-$(CONFIG_PPC_MSI_BITMAP) += msi_bitmap.o obj-$(CONFIG_PPC_MPC106) += grackle.o obj-$(CONFIG_PPC_DCR_NATIVE) += dcr-low.o -obj-$(CONFIG_PPC_PMI) += pmi.o obj-$(CONFIG_U3_DART) += dart_iommu.o obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o obj-$(CONFIG_FSL_SOC) += fsl_soc.o fsl_mpic_err.o @@ -23,7 +20,6 @@ obj-$(CONFIG_FSL_PMC) += fsl_pmc.o obj-$(CONFIG_FSL_CORENET_RCPM) += fsl_rcpm.o obj-$(CONFIG_FSL_LBC) += fsl_lbc.o obj-$(CONFIG_FSL_GTM) += fsl_gtm.o -obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c index 68538b8329f7..14cc5ea936c0 100644 --- a/arch/powerpc/sysdev/cpm2.c +++ b/arch/powerpc/sysdev/cpm2.c @@ -37,11 +37,9 @@ #include <asm/io.h> #include <asm/irq.h> -#include <asm/mpc8260.h> #include <asm/page.h> #include <asm/cpm2.h> #include <asm/rheap.h> -#include <asm/fs_pd.h> #include <sysdev/fsl_soc.h> @@ -107,7 +105,7 @@ EXPORT_SYMBOL(cpm_command); * memory mapped space. * The baud rate clock is the system clock divided by something. * It was set up long ago during the initial boot phase and is - * is given to us. + * given to us. * Baud rate clocks are zero-based in the driver code (as that maps * to port numbers). Documentation uses 1-based numbering. */ @@ -119,9 +117,9 @@ void __cpm2_setbrg(uint brg, uint rate, uint clk, int div16, int src) /* This is good enough to get SMCs running..... */ if (brg < 4) { - bp = cpm2_map_size(im_brgc1, 16); + bp = &cpm2_immr->im_brgc1; } else { - bp = cpm2_map_size(im_brgc5, 16); + bp = &cpm2_immr->im_brgc5; brg -= 4; } bp += brg; @@ -131,16 +129,14 @@ void __cpm2_setbrg(uint brg, uint rate, uint clk, int div16, int src) val |= CPM_BRG_DIV16; out_be32(bp, val); - cpm2_unmap(bp); } EXPORT_SYMBOL(__cpm2_setbrg); -int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode) +int __init cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode) { int ret = 0; int shift; int i, bits = 0; - cpmux_t __iomem *im_cpmux; u32 __iomem *reg; u32 mask = 7; @@ -203,35 +199,33 @@ int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode) {CPM_CLK_SCC4, CPM_CLK8, 7}, }; - im_cpmux = cpm2_map(im_cpmux); - switch (target) { case CPM_CLK_SCC1: - reg = &im_cpmux->cmx_scr; + reg = &cpm2_immr->im_cpmux.cmx_scr; shift = 24; break; case CPM_CLK_SCC2: - reg = &im_cpmux->cmx_scr; + reg = &cpm2_immr->im_cpmux.cmx_scr; shift = 16; break; case CPM_CLK_SCC3: - reg = &im_cpmux->cmx_scr; + reg = &cpm2_immr->im_cpmux.cmx_scr; shift = 8; break; case CPM_CLK_SCC4: - reg = &im_cpmux->cmx_scr; + reg = &cpm2_immr->im_cpmux.cmx_scr; shift = 0; break; case CPM_CLK_FCC1: - reg = &im_cpmux->cmx_fcr; + reg = &cpm2_immr->im_cpmux.cmx_fcr; shift = 24; break; case CPM_CLK_FCC2: - reg = &im_cpmux->cmx_fcr; + reg = &cpm2_immr->im_cpmux.cmx_fcr; shift = 16; break; case CPM_CLK_FCC3: - reg = &im_cpmux->cmx_fcr; + reg = &cpm2_immr->im_cpmux.cmx_fcr; shift = 8; break; default: @@ -261,16 +255,14 @@ int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode) out_be32(reg, (in_be32(reg) & ~mask) | bits); - cpm2_unmap(im_cpmux); return ret; } -int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock) +int __init cpm2_smc_clk_setup(enum cpm_clk_target target, int clock) { int ret = 0; int shift; int i, bits = 0; - cpmux_t __iomem *im_cpmux; u8 __iomem *reg; u8 mask = 3; @@ -285,16 +277,14 @@ int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock) {CPM_CLK_SMC2, CPM_CLK15, 3}, }; - im_cpmux = cpm2_map(im_cpmux); - switch (target) { case CPM_CLK_SMC1: - reg = &im_cpmux->cmx_smr; + reg = &cpm2_immr->im_cpmux.cmx_smr; mask = 3; shift = 4; break; case CPM_CLK_SMC2: - reg = &im_cpmux->cmx_smr; + reg = &cpm2_immr->im_cpmux.cmx_smr; mask = 3; shift = 0; break; @@ -317,7 +307,6 @@ int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock) out_8(reg, (in_8(reg) & ~mask) | bits); - cpm2_unmap(im_cpmux); return ret; } @@ -326,7 +315,7 @@ struct cpm2_ioports { u32 res[3]; }; -void cpm2_set_pin(int port, int pin, int flags) +void __init cpm2_set_pin(int port, int pin, int flags) { struct cpm2_ioports __iomem *iop = (struct cpm2_ioports __iomem *)&cpm2_immr->im_ioport; diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index 9e86074719a9..4a59ed1d62ce 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c @@ -30,12 +30,10 @@ #include <linux/sched.h> #include <linux/signal.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <asm/immap_cpm2.h> -#include <asm/mpc8260.h> #include <asm/io.h> -#include <asm/prom.h> -#include <asm/fs_pd.h> #include "cpm2_pic.h" @@ -209,7 +207,7 @@ unsigned int cpm2_get_irq(void) if (irq == 0) return(-1); - return irq_linear_revmap(cpm2_pic_host, irq); + return irq_find_mapping(cpm2_pic_host, irq); } static int cpm2_pic_host_map(struct irq_domain *h, unsigned int virq, @@ -231,7 +229,7 @@ void cpm2_pic_init(struct device_node *node) { int i; - cpm2_intctl = cpm2_map(im_intctl); + cpm2_intctl = &cpm2_immr->im_intctl; /* Clear the CPM IRQ controller, in case it has any bits set * from the bootloader @@ -261,7 +259,8 @@ void cpm2_pic_init(struct device_node *node) out_be32(&cpm2_intctl->ic_scprrl, 0x05309770); /* create a legacy host */ - cpm2_pic_host = irq_domain_add_linear(node, 64, &cpm2_pic_host_ops, NULL); + cpm2_pic_host = irq_domain_create_linear(of_fwnode_handle(node), 64, + &cpm2_pic_host_ops, NULL); if (cpm2_pic_host == NULL) { printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); return; diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c index 7dc1960f8bdb..07ea605ab0e6 100644 --- a/arch/powerpc/sysdev/cpm_common.c +++ b/arch/powerpc/sysdev/cpm_common.c @@ -15,11 +15,9 @@ */ #include <linux/init.h> -#include <linux/of_device.h> #include <linux/spinlock.h> #include <linux/export.h> #include <linux/of.h> -#include <linux/of_address.h> #include <linux/slab.h> #include <asm/udbg.h> @@ -30,10 +28,6 @@ #include <mm/mmu_decl.h> -#if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO) -#include <linux/of_gpio.h> -#endif - static int __init cpm_init(void) { struct device_node *np; @@ -93,32 +87,33 @@ void __init udbg_init_cpm(void) #if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO) +#include <linux/gpio/driver.h> + struct cpm2_ioports { u32 dir, par, sor, odr, dat; u32 res[3]; }; struct cpm2_gpio32_chip { - struct of_mm_gpio_chip mm_gc; + struct gpio_chip gc; + void __iomem *regs; spinlock_t lock; /* shadowed data register to clear/set bits safely */ u32 cpdata; }; -static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) +static void cpm2_gpio32_save_regs(struct cpm2_gpio32_chip *cpm2_gc) { - struct cpm2_gpio32_chip *cpm2_gc = - container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc); - struct cpm2_ioports __iomem *iop = mm_gc->regs; + struct cpm2_ioports __iomem *iop = cpm2_gc->regs; cpm2_gc->cpdata = in_be32(&iop->dat); } static int cpm2_gpio32_get(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); - struct cpm2_ioports __iomem *iop = mm_gc->regs; + struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(gc); + struct cpm2_ioports __iomem *iop = cpm2_gc->regs; u32 pin_mask; pin_mask = 1 << (31 - gpio); @@ -126,11 +121,9 @@ static int cpm2_gpio32_get(struct gpio_chip *gc, unsigned int gpio) return !!(in_be32(&iop->dat) & pin_mask); } -static void __cpm2_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, - int value) +static void __cpm2_gpio32_set(struct cpm2_gpio32_chip *cpm2_gc, u32 pin_mask, int value) { - struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(&mm_gc->gc); - struct cpm2_ioports __iomem *iop = mm_gc->regs; + struct cpm2_ioports __iomem *iop = cpm2_gc->regs; if (value) cpm2_gc->cpdata |= pin_mask; @@ -140,32 +133,32 @@ static void __cpm2_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, out_be32(&iop->dat, cpm2_gc->cpdata); } -static void cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) +static int cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(gc); unsigned long flags; u32 pin_mask = 1 << (31 - gpio); spin_lock_irqsave(&cpm2_gc->lock, flags); - __cpm2_gpio32_set(mm_gc, pin_mask, value); + __cpm2_gpio32_set(cpm2_gc, pin_mask, value); spin_unlock_irqrestore(&cpm2_gc->lock, flags); + + return 0; } static int cpm2_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(gc); - struct cpm2_ioports __iomem *iop = mm_gc->regs; + struct cpm2_ioports __iomem *iop = cpm2_gc->regs; unsigned long flags; u32 pin_mask = 1 << (31 - gpio); spin_lock_irqsave(&cpm2_gc->lock, flags); setbits32(&iop->dir, pin_mask); - __cpm2_gpio32_set(mm_gc, pin_mask, val); + __cpm2_gpio32_set(cpm2_gc, pin_mask, val); spin_unlock_irqrestore(&cpm2_gc->lock, flags); @@ -174,9 +167,8 @@ static int cpm2_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) static int cpm2_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio) { - struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(gc); - struct cpm2_ioports __iomem *iop = mm_gc->regs; + struct cpm2_ioports __iomem *iop = cpm2_gc->regs; unsigned long flags; u32 pin_mask = 1 << (31 - gpio); @@ -193,19 +185,17 @@ int cpm2_gpiochip_add32(struct device *dev) { struct device_node *np = dev->of_node; struct cpm2_gpio32_chip *cpm2_gc; - struct of_mm_gpio_chip *mm_gc; struct gpio_chip *gc; - cpm2_gc = kzalloc(sizeof(*cpm2_gc), GFP_KERNEL); + cpm2_gc = devm_kzalloc(dev, sizeof(*cpm2_gc), GFP_KERNEL); if (!cpm2_gc) return -ENOMEM; spin_lock_init(&cpm2_gc->lock); - mm_gc = &cpm2_gc->mm_gc; - gc = &mm_gc->gc; + gc = &cpm2_gc->gc; - mm_gc->save_regs = cpm2_gpio32_save_regs; + gc->base = -1; gc->ngpio = 32; gc->direction_input = cpm2_gpio32_dir_in; gc->direction_output = cpm2_gpio32_dir_out; @@ -214,6 +204,16 @@ int cpm2_gpiochip_add32(struct device *dev) gc->parent = dev; gc->owner = THIS_MODULE; - return of_mm_gpiochip_add_data(np, mm_gc, cpm2_gc); + gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np); + if (!gc->label) + return -ENOMEM; + + cpm2_gc->regs = devm_of_iomap(dev, np, 0, NULL); + if (IS_ERR(cpm2_gc->regs)) + return PTR_ERR(cpm2_gc->regs); + + cpm2_gpio32_save_regs(cpm2_gc); + + return devm_gpiochip_add_data(dev, gc, cpm2_gc); } #endif /* CONFIG_CPM2 || CONFIG_8xx_GPIO */ diff --git a/arch/powerpc/sysdev/cpm_gpio.c b/arch/powerpc/sysdev/cpm_gpio.c index 0695d26bd301..40f57111e93e 100644 --- a/arch/powerpc/sysdev/cpm_gpio.c +++ b/arch/powerpc/sysdev/cpm_gpio.c @@ -9,7 +9,8 @@ */ #include <linux/module.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> #include <asm/cpm.h> #ifdef CONFIG_8xx_GPIO diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index 1d33b7a5ea83..c0d10c149661 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -24,9 +24,8 @@ #include <linux/suspend.h> #include <linux/memblock.h> #include <linux/gfp.h> -#include <linux/kmemleak.h> +#include <linux/of_address.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/iommu.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> @@ -226,7 +225,7 @@ static void dart_free(struct iommu_table *tbl, long index, long npages) dart_cache_sync(orig_dp, orig_npages); } -static void allocate_dart(void) +static void __init allocate_dart(void) { unsigned long tmp; @@ -243,9 +242,6 @@ static void allocate_dart(void) if (!dart_tablebase) panic("Failed to allocate 16MB below 2GB for DART table\n"); - /* There is no point scanning the DART space for leaks*/ - kmemleak_no_scan((void *)dart_tablebase); - /* Allocate a spare page to map all invalid DART pages. We need to do * that to work around what looks like a problem with the HT bridge * prefetching into invalid pages and corrupting data @@ -404,9 +400,10 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops) } /* Initialize the DART HW */ - if (dart_init(dn) != 0) + if (dart_init(dn) != 0) { + of_node_put(dn); return; - + } /* * U4 supports a DART bypass, we use it for 64-bit capable devices to * improve performance. However, that only works for devices connected @@ -419,6 +416,7 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops) /* Setup pci_dma ops */ set_pci_dma_ops(&dma_iommu_ops); + of_node_put(dn); } #ifdef CONFIG_PM diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S index efeeb1b885a1..e8401b205d38 100644 --- a/arch/powerpc/sysdev/dcr-low.S +++ b/arch/powerpc/sysdev/dcr-low.S @@ -5,13 +5,13 @@ * Copyright (c) 2004 Eugene Surovegin <ebs@ebshome.net> */ +#include <linux/export.h> #include <asm/ppc_asm.h> #include <asm/processor.h> #include <asm/bug.h> -#include <asm/export.h> #define DCR_ACCESS_PROLOG(table) \ - cmpli cr0,r3,1024; \ + cmplwi cr0,r3,1024; \ rlwinm r3,r3,4,18,27; \ lis r5,table@h; \ ori r5,r5,table@l; \ diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c index 22991e1128e3..cb44a69958e7 100644 --- a/arch/powerpc/sysdev/dcr.c +++ b/arch/powerpc/sysdev/dcr.c @@ -8,110 +8,9 @@ #include <linux/kernel.h> #include <linux/export.h> -#include <asm/prom.h> +#include <linux/of_address.h> #include <asm/dcr.h> -#ifdef CONFIG_PPC_DCR_MMIO -static struct device_node *find_dcr_parent(struct device_node *node) -{ - struct device_node *par, *tmp; - const u32 *p; - - for (par = of_node_get(node); par;) { - if (of_get_property(par, "dcr-controller", NULL)) - break; - p = of_get_property(par, "dcr-parent", NULL); - tmp = par; - if (p == NULL) - par = of_get_parent(par); - else - par = of_find_node_by_phandle(*p); - of_node_put(tmp); - } - return par; -} -#endif - -#if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) - -bool dcr_map_ok_generic(dcr_host_t host) -{ - if (host.type == DCR_HOST_NATIVE) - return dcr_map_ok_native(host.host.native); - else if (host.type == DCR_HOST_MMIO) - return dcr_map_ok_mmio(host.host.mmio); - else - return false; -} -EXPORT_SYMBOL_GPL(dcr_map_ok_generic); - -dcr_host_t dcr_map_generic(struct device_node *dev, - unsigned int dcr_n, - unsigned int dcr_c) -{ - dcr_host_t host; - struct device_node *dp; - const char *prop; - - host.type = DCR_HOST_INVALID; - - dp = find_dcr_parent(dev); - if (dp == NULL) - return host; - - prop = of_get_property(dp, "dcr-access-method", NULL); - - pr_debug("dcr_map_generic(dcr-access-method = %s)\n", prop); - - if (!strcmp(prop, "native")) { - host.type = DCR_HOST_NATIVE; - host.host.native = dcr_map_native(dev, dcr_n, dcr_c); - } else if (!strcmp(prop, "mmio")) { - host.type = DCR_HOST_MMIO; - host.host.mmio = dcr_map_mmio(dev, dcr_n, dcr_c); - } - - of_node_put(dp); - return host; -} -EXPORT_SYMBOL_GPL(dcr_map_generic); - -void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c) -{ - if (host.type == DCR_HOST_NATIVE) - dcr_unmap_native(host.host.native, dcr_c); - else if (host.type == DCR_HOST_MMIO) - dcr_unmap_mmio(host.host.mmio, dcr_c); - else /* host.type == DCR_HOST_INVALID */ - WARN_ON(true); -} -EXPORT_SYMBOL_GPL(dcr_unmap_generic); - -u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n) -{ - if (host.type == DCR_HOST_NATIVE) - return dcr_read_native(host.host.native, dcr_n); - else if (host.type == DCR_HOST_MMIO) - return dcr_read_mmio(host.host.mmio, dcr_n); - else /* host.type == DCR_HOST_INVALID */ - WARN_ON(true); - return 0; -} -EXPORT_SYMBOL_GPL(dcr_read_generic); - -void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value) -{ - if (host.type == DCR_HOST_NATIVE) - dcr_write_native(host.host.native, dcr_n, value); - else if (host.type == DCR_HOST_MMIO) - dcr_write_mmio(host.host.mmio, dcr_n, value); - else /* host.type == DCR_HOST_INVALID */ - WARN_ON(true); -} -EXPORT_SYMBOL_GPL(dcr_write_generic); - -#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */ - unsigned int dcr_resource_start(const struct device_node *np, unsigned int index) { @@ -137,86 +36,5 @@ unsigned int dcr_resource_len(const struct device_node *np, unsigned int index) } EXPORT_SYMBOL_GPL(dcr_resource_len); -#ifdef CONFIG_PPC_DCR_MMIO - -static u64 of_translate_dcr_address(struct device_node *dev, - unsigned int dcr_n, - unsigned int *out_stride) -{ - struct device_node *dp; - const u32 *p; - unsigned int stride; - u64 ret = OF_BAD_ADDR; - - dp = find_dcr_parent(dev); - if (dp == NULL) - return OF_BAD_ADDR; - - /* Stride is not properly defined yet, default to 0x10 for Axon */ - p = of_get_property(dp, "dcr-mmio-stride", NULL); - stride = (p == NULL) ? 0x10 : *p; - - /* XXX FIXME: Which property name is to use of the 2 following ? */ - p = of_get_property(dp, "dcr-mmio-range", NULL); - if (p == NULL) - p = of_get_property(dp, "dcr-mmio-space", NULL); - if (p == NULL) - goto done; - - /* Maybe could do some better range checking here */ - ret = of_translate_address(dp, p); - if (ret != OF_BAD_ADDR) - ret += (u64)(stride) * (u64)dcr_n; - if (out_stride) - *out_stride = stride; - - done: - of_node_put(dp); - return ret; -} - -dcr_host_mmio_t dcr_map_mmio(struct device_node *dev, - unsigned int dcr_n, - unsigned int dcr_c) -{ - dcr_host_mmio_t ret = { .token = NULL, .stride = 0, .base = dcr_n }; - u64 addr; - - pr_debug("dcr_map(%pOF, 0x%x, 0x%x)\n", - dev, dcr_n, dcr_c); - - addr = of_translate_dcr_address(dev, dcr_n, &ret.stride); - pr_debug("translates to addr: 0x%llx, stride: 0x%x\n", - (unsigned long long) addr, ret.stride); - if (addr == OF_BAD_ADDR) - return ret; - pr_debug("mapping 0x%x bytes\n", dcr_c * ret.stride); - ret.token = ioremap(addr, dcr_c * ret.stride); - if (ret.token == NULL) - return ret; - pr_debug("mapped at 0x%p -> base is 0x%p\n", - ret.token, ret.token - dcr_n * ret.stride); - ret.token -= dcr_n * ret.stride; - return ret; -} -EXPORT_SYMBOL_GPL(dcr_map_mmio); - -void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c) -{ - dcr_host_mmio_t h = host; - - if (h.token == NULL) - return; - h.token += host.base * h.stride; - iounmap(h.token); - h.token = NULL; -} -EXPORT_SYMBOL_GPL(dcr_unmap_mmio); - -#endif /* defined(CONFIG_PPC_DCR_MMIO) */ - -#ifdef CONFIG_PPC_DCR_NATIVE DEFINE_SPINLOCK(dcr_ind_lock); EXPORT_SYMBOL_GPL(dcr_ind_lock); -#endif /* defined(CONFIG_PPC_DCR_NATIVE) */ - diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c index 00705258ecf9..b6f9774038e1 100644 --- a/arch/powerpc/sysdev/ehv_pic.c +++ b/arch/powerpc/sysdev/ehv_pic.c @@ -42,33 +42,33 @@ static u32 __iomem *mpic_percpu_base_vaddr; * Linux descriptor level callbacks */ -void ehv_pic_unmask_irq(struct irq_data *d) +static void ehv_pic_unmask_irq(struct irq_data *d) { unsigned int src = virq_to_hw(d->irq); ev_int_set_mask(src, 0); } -void ehv_pic_mask_irq(struct irq_data *d) +static void ehv_pic_mask_irq(struct irq_data *d) { unsigned int src = virq_to_hw(d->irq); ev_int_set_mask(src, 1); } -void ehv_pic_end_irq(struct irq_data *d) +static void ehv_pic_end_irq(struct irq_data *d) { unsigned int src = virq_to_hw(d->irq); ev_int_eoi(src); } -void ehv_pic_direct_end_irq(struct irq_data *d) +static void ehv_pic_direct_end_irq(struct irq_data *d) { out_be32(mpic_percpu_base_vaddr + MPIC_EOI / 4, 0); } -int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest, +static int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest, bool force) { unsigned int src = virq_to_hw(d->irq); @@ -109,7 +109,7 @@ static unsigned int ehv_pic_type_to_vecpri(unsigned int type) } } -int ehv_pic_set_irq_type(struct irq_data *d, unsigned int flow_type) +static int ehv_pic_set_irq_type(struct irq_data *d, unsigned int flow_type) { unsigned int src = virq_to_hw(d->irq); unsigned int vecpri, vold, vnew, prio, cpu_dest; @@ -175,7 +175,7 @@ unsigned int ehv_pic_get_irq(void) * this will also setup revmap[] in the slow path for the first * time, next calls will always use fast path by indexing revmap */ - return irq_linear_revmap(global_ehv_pic->irqhost, irq); + return irq_find_mapping(global_ehv_pic->irqhost, irq); } static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node, @@ -256,7 +256,6 @@ void __init ehv_pic_init(void) { struct device_node *np, *np2; struct ehv_pic *ehv_pic; - int coreint_flag = 1; np = of_find_compatible_node(NULL, NULL, "epapr,hv-pic"); if (!np) { @@ -264,17 +263,15 @@ void __init ehv_pic_init(void) return; } - if (!of_find_property(np, "has-external-proxy", NULL)) - coreint_flag = 0; - ehv_pic = kzalloc(sizeof(struct ehv_pic), GFP_KERNEL); if (!ehv_pic) { of_node_put(np); return; } - ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS, - &ehv_pic_host_ops, ehv_pic); + ehv_pic->irqhost = irq_domain_create_linear(of_fwnode_handle(np), + NR_EHV_PIC_INTS, + &ehv_pic_host_ops, ehv_pic); if (!ehv_pic->irqhost) { of_node_put(np); kfree(ehv_pic); @@ -292,8 +289,8 @@ void __init ehv_pic_init(void) ehv_pic->hc_irq = ehv_pic_irq_chip; ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity; - ehv_pic->coreint_flag = coreint_flag; + ehv_pic->coreint_flag = of_property_read_bool(np, "has-external-proxy"); global_ehv_pic = ehv_pic; - irq_set_default_host(global_ehv_pic->irqhost); + irq_set_default_domain(global_ehv_pic->irqhost); } diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h b/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h deleted file mode 100644 index ce370749add9..000000000000 --- a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h +++ /dev/null @@ -1,88 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright 2009-2010, 2012 Freescale Semiconductor, Inc - * - * QorIQ based Cache Controller Memory Mapped Registers - * - * Author: Vivek Mahajan <vivek.mahajan@freescale.com> - */ - -#ifndef __FSL_85XX_CACHE_CTLR_H__ -#define __FSL_85XX_CACHE_CTLR_H__ - -#define L2CR_L2FI 0x40000000 /* L2 flash invalidate */ -#define L2CR_L2IO 0x00200000 /* L2 instruction only */ -#define L2CR_SRAM_ZERO 0x00000000 /* L2SRAM zero size */ -#define L2CR_SRAM_FULL 0x00010000 /* L2SRAM full size */ -#define L2CR_SRAM_HALF 0x00020000 /* L2SRAM half size */ -#define L2CR_SRAM_TWO_HALFS 0x00030000 /* L2SRAM two half sizes */ -#define L2CR_SRAM_QUART 0x00040000 /* L2SRAM one quarter size */ -#define L2CR_SRAM_TWO_QUARTS 0x00050000 /* L2SRAM two quarter size */ -#define L2CR_SRAM_EIGHTH 0x00060000 /* L2SRAM one eighth size */ -#define L2CR_SRAM_TWO_EIGHTH 0x00070000 /* L2SRAM two eighth size */ - -#define L2SRAM_OPTIMAL_SZ_SHIFT 0x00000003 /* Optimum size for L2SRAM */ - -#define L2SRAM_BAR_MSK_LO18 0xFFFFC000 /* Lower 18 bits */ -#define L2SRAM_BARE_MSK_HI4 0x0000000F /* Upper 4 bits */ - -enum cache_sram_lock_ways { - LOCK_WAYS_ZERO, - LOCK_WAYS_EIGHTH, - LOCK_WAYS_TWO_EIGHTH, - LOCK_WAYS_HALF = 4, - LOCK_WAYS_FULL = 8, -}; - -struct mpc85xx_l2ctlr { - u32 ctl; /* 0x000 - L2 control */ - u8 res1[0xC]; - u32 ewar0; /* 0x010 - External write address 0 */ - u32 ewarea0; /* 0x014 - External write address extended 0 */ - u32 ewcr0; /* 0x018 - External write ctrl */ - u8 res2[4]; - u32 ewar1; /* 0x020 - External write address 1 */ - u32 ewarea1; /* 0x024 - External write address extended 1 */ - u32 ewcr1; /* 0x028 - External write ctrl 1 */ - u8 res3[4]; - u32 ewar2; /* 0x030 - External write address 2 */ - u32 ewarea2; /* 0x034 - External write address extended 2 */ - u32 ewcr2; /* 0x038 - External write ctrl 2 */ - u8 res4[4]; - u32 ewar3; /* 0x040 - External write address 3 */ - u32 ewarea3; /* 0x044 - External write address extended 3 */ - u32 ewcr3; /* 0x048 - External write ctrl 3 */ - u8 res5[0xB4]; - u32 srbar0; /* 0x100 - SRAM base address 0 */ - u32 srbarea0; /* 0x104 - SRAM base addr reg ext address 0 */ - u32 srbar1; /* 0x108 - SRAM base address 1 */ - u32 srbarea1; /* 0x10C - SRAM base addr reg ext address 1 */ - u8 res6[0xCF0]; - u32 errinjhi; /* 0xE00 - Error injection mask high */ - u32 errinjlo; /* 0xE04 - Error injection mask low */ - u32 errinjctl; /* 0xE08 - Error injection tag/ecc control */ - u8 res7[0x14]; - u32 captdatahi; /* 0xE20 - Error data high capture */ - u32 captdatalo; /* 0xE24 - Error data low capture */ - u32 captecc; /* 0xE28 - Error syndrome */ - u8 res8[0x14]; - u32 errdet; /* 0xE40 - Error detect */ - u32 errdis; /* 0xE44 - Error disable */ - u32 errinten; /* 0xE48 - Error interrupt enable */ - u32 errattr; /* 0xE4c - Error attribute capture */ - u32 erradrrl; /* 0xE50 - Error address capture low */ - u32 erradrrh; /* 0xE54 - Error address capture high */ - u32 errctl; /* 0xE58 - Error control */ - u8 res9[0x1A4]; -}; - -struct sram_parameters { - unsigned int sram_size; - phys_addr_t sram_offset; -}; - -extern int instantiate_cache_sram(struct platform_device *dev, - struct sram_parameters sram_params); -extern void remove_cache_sram(struct platform_device *dev); - -#endif /* __FSL_85XX_CACHE_CTLR_H__ */ diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c deleted file mode 100644 index a3aeaa5f0f1b..000000000000 --- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c +++ /dev/null @@ -1,147 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2009-2010 Freescale Semiconductor, Inc. - * - * Simple memory allocator abstraction for QorIQ (P1/P2) based Cache-SRAM - * - * Author: Vivek Mahajan <vivek.mahajan@freescale.com> - * - * This file is derived from the original work done - * by Sylvain Munaut for the Bestcomm SRAM allocator. - */ - -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/slab.h> -#include <linux/err.h> -#include <linux/of_platform.h> -#include <linux/pgtable.h> -#include <asm/fsl_85xx_cache_sram.h> - -#include "fsl_85xx_cache_ctlr.h" - -struct mpc85xx_cache_sram *cache_sram; - -void *mpc85xx_cache_sram_alloc(unsigned int size, - phys_addr_t *phys, unsigned int align) -{ - unsigned long offset; - unsigned long flags; - - if (unlikely(cache_sram == NULL)) - return NULL; - - if (!size || (size > cache_sram->size) || (align > cache_sram->size)) { - pr_err("%s(): size(=%x) or align(=%x) zero or too big\n", - __func__, size, align); - return NULL; - } - - if ((align & (align - 1)) || align <= 1) { - pr_err("%s(): align(=%x) must be power of two and >1\n", - __func__, align); - return NULL; - } - - spin_lock_irqsave(&cache_sram->lock, flags); - offset = rh_alloc_align(cache_sram->rh, size, align, NULL); - spin_unlock_irqrestore(&cache_sram->lock, flags); - - if (IS_ERR_VALUE(offset)) - return NULL; - - *phys = cache_sram->base_phys + offset; - - return (unsigned char *)cache_sram->base_virt + offset; -} -EXPORT_SYMBOL(mpc85xx_cache_sram_alloc); - -void mpc85xx_cache_sram_free(void *ptr) -{ - unsigned long flags; - BUG_ON(!ptr); - - spin_lock_irqsave(&cache_sram->lock, flags); - rh_free(cache_sram->rh, ptr - cache_sram->base_virt); - spin_unlock_irqrestore(&cache_sram->lock, flags); -} -EXPORT_SYMBOL(mpc85xx_cache_sram_free); - -int __init instantiate_cache_sram(struct platform_device *dev, - struct sram_parameters sram_params) -{ - int ret = 0; - - if (cache_sram) { - dev_err(&dev->dev, "Already initialized cache-sram\n"); - return -EBUSY; - } - - cache_sram = kzalloc(sizeof(struct mpc85xx_cache_sram), GFP_KERNEL); - if (!cache_sram) { - dev_err(&dev->dev, "Out of memory for cache_sram structure\n"); - return -ENOMEM; - } - - cache_sram->base_phys = sram_params.sram_offset; - cache_sram->size = sram_params.sram_size; - - if (!request_mem_region(cache_sram->base_phys, cache_sram->size, - "fsl_85xx_cache_sram")) { - dev_err(&dev->dev, "%pOF: request memory failed\n", - dev->dev.of_node); - ret = -ENXIO; - goto out_free; - } - - cache_sram->base_virt = ioremap_coherent(cache_sram->base_phys, - cache_sram->size); - if (!cache_sram->base_virt) { - dev_err(&dev->dev, "%pOF: ioremap_coherent failed\n", - dev->dev.of_node); - ret = -ENOMEM; - goto out_release; - } - - cache_sram->rh = rh_create(sizeof(unsigned int)); - if (IS_ERR(cache_sram->rh)) { - dev_err(&dev->dev, "%pOF: Unable to create remote heap\n", - dev->dev.of_node); - ret = PTR_ERR(cache_sram->rh); - goto out_unmap; - } - - rh_attach_region(cache_sram->rh, 0, cache_sram->size); - spin_lock_init(&cache_sram->lock); - - dev_info(&dev->dev, "[base:0x%llx, size:0x%x] configured and loaded\n", - (unsigned long long)cache_sram->base_phys, cache_sram->size); - - return 0; - -out_unmap: - iounmap(cache_sram->base_virt); - -out_release: - release_mem_region(cache_sram->base_phys, cache_sram->size); - -out_free: - kfree(cache_sram); - return ret; -} - -void remove_cache_sram(struct platform_device *dev) -{ - BUG_ON(!cache_sram); - - rh_detach_region(cache_sram->rh, 0, cache_sram->size); - rh_destroy(cache_sram->rh); - - iounmap(cache_sram->base_virt); - release_mem_region(cache_sram->base_phys, cache_sram->size); - - kfree(cache_sram); - cache_sram = NULL; - - dev_info(&dev->dev, "MPC85xx Cache-SRAM driver unloaded\n"); -} diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c deleted file mode 100644 index 2d0af0c517bb..000000000000 --- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c +++ /dev/null @@ -1,216 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Copyright 2009-2010, 2012 Freescale Semiconductor, Inc. - * - * QorIQ (P1/P2) L2 controller init for Cache-SRAM instantiation - * - * Author: Vivek Mahajan <vivek.mahajan@freescale.com> - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/of_platform.h> -#include <asm/io.h> - -#include "fsl_85xx_cache_ctlr.h" - -static char *sram_size; -static char *sram_offset; -struct mpc85xx_l2ctlr __iomem *l2ctlr; - -static int get_cache_sram_params(struct sram_parameters *sram_params) -{ - unsigned long long addr; - unsigned int size; - - if (!sram_size || (kstrtouint(sram_size, 0, &size) < 0)) - return -EINVAL; - - if (!sram_offset || (kstrtoull(sram_offset, 0, &addr) < 0)) - return -EINVAL; - - sram_params->sram_offset = addr; - sram_params->sram_size = size; - - return 0; -} - -static int __init get_size_from_cmdline(char *str) -{ - if (!str) - return 0; - - sram_size = str; - return 1; -} - -static int __init get_offset_from_cmdline(char *str) -{ - if (!str) - return 0; - - sram_offset = str; - return 1; -} - -__setup("cache-sram-size=", get_size_from_cmdline); -__setup("cache-sram-offset=", get_offset_from_cmdline); - -static int mpc85xx_l2ctlr_of_probe(struct platform_device *dev) -{ - long rval; - unsigned int rem; - unsigned char ways; - const unsigned int *prop; - unsigned int l2cache_size; - struct sram_parameters sram_params; - - if (!dev->dev.of_node) { - dev_err(&dev->dev, "Device's OF-node is NULL\n"); - return -EINVAL; - } - - prop = of_get_property(dev->dev.of_node, "cache-size", NULL); - if (!prop) { - dev_err(&dev->dev, "Missing L2 cache-size\n"); - return -EINVAL; - } - l2cache_size = *prop; - - if (get_cache_sram_params(&sram_params)) - return 0; /* fall back to L2 cache only */ - - rem = l2cache_size % sram_params.sram_size; - ways = LOCK_WAYS_FULL * sram_params.sram_size / l2cache_size; - if (rem || (ways & (ways - 1))) { - dev_err(&dev->dev, "Illegal cache-sram-size in command line\n"); - return -EINVAL; - } - - l2ctlr = of_iomap(dev->dev.of_node, 0); - if (!l2ctlr) { - dev_err(&dev->dev, "Can't map L2 controller\n"); - return -EINVAL; - } - - /* - * Write bits[0-17] to srbar0 - */ - out_be32(&l2ctlr->srbar0, - lower_32_bits(sram_params.sram_offset) & L2SRAM_BAR_MSK_LO18); - - /* - * Write bits[18-21] to srbare0 - */ -#ifdef CONFIG_PHYS_64BIT - out_be32(&l2ctlr->srbarea0, - upper_32_bits(sram_params.sram_offset) & L2SRAM_BARE_MSK_HI4); -#endif - - clrsetbits_be32(&l2ctlr->ctl, L2CR_L2E, L2CR_L2FI); - - switch (ways) { - case LOCK_WAYS_EIGHTH: - setbits32(&l2ctlr->ctl, - L2CR_L2E | L2CR_L2FI | L2CR_SRAM_EIGHTH); - break; - - case LOCK_WAYS_TWO_EIGHTH: - setbits32(&l2ctlr->ctl, - L2CR_L2E | L2CR_L2FI | L2CR_SRAM_QUART); - break; - - case LOCK_WAYS_HALF: - setbits32(&l2ctlr->ctl, - L2CR_L2E | L2CR_L2FI | L2CR_SRAM_HALF); - break; - - case LOCK_WAYS_FULL: - default: - setbits32(&l2ctlr->ctl, - L2CR_L2E | L2CR_L2FI | L2CR_SRAM_FULL); - break; - } - eieio(); - - rval = instantiate_cache_sram(dev, sram_params); - if (rval < 0) { - dev_err(&dev->dev, "Can't instantiate Cache-SRAM\n"); - iounmap(l2ctlr); - return -EINVAL; - } - - return 0; -} - -static int mpc85xx_l2ctlr_of_remove(struct platform_device *dev) -{ - BUG_ON(!l2ctlr); - - iounmap(l2ctlr); - remove_cache_sram(dev); - dev_info(&dev->dev, "MPC85xx L2 controller unloaded\n"); - - return 0; -} - -static const struct of_device_id mpc85xx_l2ctlr_of_match[] = { - { - .compatible = "fsl,p2020-l2-cache-controller", - }, - { - .compatible = "fsl,p2010-l2-cache-controller", - }, - { - .compatible = "fsl,p1020-l2-cache-controller", - }, - { - .compatible = "fsl,p1011-l2-cache-controller", - }, - { - .compatible = "fsl,p1013-l2-cache-controller", - }, - { - .compatible = "fsl,p1022-l2-cache-controller", - }, - { - .compatible = "fsl,mpc8548-l2-cache-controller", - }, - { .compatible = "fsl,mpc8544-l2-cache-controller",}, - { .compatible = "fsl,mpc8572-l2-cache-controller",}, - { .compatible = "fsl,mpc8536-l2-cache-controller",}, - { .compatible = "fsl,p1021-l2-cache-controller",}, - { .compatible = "fsl,p1012-l2-cache-controller",}, - { .compatible = "fsl,p1025-l2-cache-controller",}, - { .compatible = "fsl,p1016-l2-cache-controller",}, - { .compatible = "fsl,p1024-l2-cache-controller",}, - { .compatible = "fsl,p1015-l2-cache-controller",}, - { .compatible = "fsl,p1010-l2-cache-controller",}, - { .compatible = "fsl,bsc9131-l2-cache-controller",}, - {}, -}; - -static struct platform_driver mpc85xx_l2ctlr_of_platform_driver = { - .driver = { - .name = "fsl-l2ctlr", - .of_match_table = mpc85xx_l2ctlr_of_match, - }, - .probe = mpc85xx_l2ctlr_of_probe, - .remove = mpc85xx_l2ctlr_of_remove, -}; - -static __init int mpc85xx_l2ctlr_of_init(void) -{ - return platform_driver_register(&mpc85xx_l2ctlr_of_platform_driver); -} - -static void __exit mpc85xx_l2ctlr_of_exit(void) -{ - platform_driver_unregister(&mpc85xx_l2ctlr_of_platform_driver); -} - -subsys_initcall(mpc85xx_l2ctlr_of_init); -module_exit(mpc85xx_l2ctlr_of_exit); - -MODULE_DESCRIPTION("Freescale MPC85xx L2 controller init"); -MODULE_LICENSE("GPL v2"); diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c index 8963eaffb1b7..3dabc9621810 100644 --- a/arch/powerpc/sysdev/fsl_gtm.c +++ b/arch/powerpc/sysdev/fsl_gtm.c @@ -77,7 +77,7 @@ struct gtm { static LIST_HEAD(gtms); /** - * gtm_get_timer - request GTM timer to use it with the rest of GTM API + * gtm_get_timer16 - request GTM timer to use it with the rest of GTM API * Context: non-IRQ * * This function reserves GTM timer for later use. It returns gtm_timer @@ -86,7 +86,7 @@ static LIST_HEAD(gtms); */ struct gtm_timer *gtm_get_timer16(void) { - struct gtm *gtm = NULL; + struct gtm *gtm; int i; list_for_each_entry(gtm, >ms, list_node) { @@ -103,14 +103,14 @@ struct gtm_timer *gtm_get_timer16(void) spin_unlock_irq(>m->lock); } - if (gtm) + if (!list_empty(>ms)) return ERR_PTR(-EBUSY); return ERR_PTR(-ENODEV); } EXPORT_SYMBOL(gtm_get_timer16); /** - * gtm_get_specific_timer - request specific GTM timer + * gtm_get_specific_timer16 - request specific GTM timer * @gtm: specific GTM, pass here GTM's device_node->data * @timer: specific timer number, Timer1 is 0. * Context: non-IRQ @@ -260,7 +260,7 @@ int gtm_set_timer16(struct gtm_timer *tmr, unsigned long usec, bool reload) EXPORT_SYMBOL(gtm_set_timer16); /** - * gtm_set_exact_utimer16 - (re)set 16 bits timer + * gtm_set_exact_timer16 - (re)set 16 bits timer * @tmr: pointer to the gtm_timer structure obtained from gtm_get_timer * @usec: timer interval in microseconds * @reload: if set, the timer will reset upon expiry rather than diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c index 1985e067e952..7ed07232a69a 100644 --- a/arch/powerpc/sysdev/fsl_lbc.c +++ b/arch/powerpc/sysdev/fsl_lbc.c @@ -18,13 +18,14 @@ #include <linux/types.h> #include <linux/io.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/mod_devicetable.h> #include <linux/syscore_ops.h> -#include <asm/prom.h> #include <asm/fsl_lbc.h> static DEFINE_SPINLOCK(fsl_lbc_lock); @@ -37,7 +38,7 @@ EXPORT_SYMBOL(fsl_lbc_ctrl_dev); * * This function converts a base address of lbc into the right format for the * BR register. If the SOC has eLBC then it returns 32bit physical address - * else it convers a 34bit local bus physical address to correct format of + * else it converts a 34bit local bus physical address to correct format of * 32bit address for BR register (Example: MPC8641). */ u32 fsl_lbc_addr(phys_addr_t addr_base) @@ -349,7 +350,7 @@ err: #ifdef CONFIG_SUSPEND /* save lbc registers */ -static int fsl_lbc_syscore_suspend(void) +static int fsl_lbc_syscore_suspend(void *data) { struct fsl_lbc_ctrl *ctrl; struct fsl_lbc_regs __iomem *lbc; @@ -373,7 +374,7 @@ out: } /* restore lbc registers */ -static void fsl_lbc_syscore_resume(void) +static void fsl_lbc_syscore_resume(void *data) { struct fsl_lbc_ctrl *ctrl; struct fsl_lbc_regs __iomem *lbc; @@ -407,10 +408,14 @@ static const struct of_device_id fsl_lbc_match[] = { }; #ifdef CONFIG_SUSPEND -static struct syscore_ops lbc_syscore_pm_ops = { +static const struct syscore_ops lbc_syscore_pm_ops = { .suspend = fsl_lbc_syscore_suspend, .resume = fsl_lbc_syscore_resume, }; + +static struct syscore lbc_syscore_pm = { + .ops = &lbc_syscore_pm_ops, +}; #endif static struct platform_driver fsl_lbc_ctrl_driver = { @@ -424,7 +429,7 @@ static struct platform_driver fsl_lbc_ctrl_driver = { static int __init fsl_lbc_init(void) { #ifdef CONFIG_SUSPEND - register_syscore_ops(&lbc_syscore_pm_ops); + register_syscore(&lbc_syscore_pm); #endif return platform_driver_register(&fsl_lbc_ctrl_driver); } diff --git a/arch/powerpc/sysdev/fsl_mpic_err.c b/arch/powerpc/sysdev/fsl_mpic_err.c index 5fa5fa215541..df06bb6b838f 100644 --- a/arch/powerpc/sysdev/fsl_mpic_err.c +++ b/arch/powerpc/sysdev/fsl_mpic_err.c @@ -58,7 +58,7 @@ static struct irq_chip fsl_mpic_err_chip = { .irq_unmask = fsl_mpic_unmask_err, }; -int mpic_setup_error_int(struct mpic *mpic, int intvec) +int __init mpic_setup_error_int(struct mpic *mpic, int intvec) { int i; @@ -99,7 +99,6 @@ static irqreturn_t fsl_error_int_handler(int irq, void *data) struct mpic *mpic = (struct mpic *) data; u32 eisr, eimr; int errint; - unsigned int cascade_irq; eisr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EISR); eimr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EIMR); @@ -108,13 +107,11 @@ static irqreturn_t fsl_error_int_handler(int irq, void *data) return IRQ_NONE; while (eisr) { + int ret; errint = __builtin_clz(eisr); - cascade_irq = irq_linear_revmap(mpic->irqhost, - mpic->err_int_vecs[errint]); - WARN_ON(!cascade_irq); - if (cascade_irq) { - generic_handle_irq(cascade_irq); - } else { + ret = generic_handle_domain_irq(mpic->irqhost, + mpic->err_int_vecs[errint]); + if (WARN_ON(ret)) { eimr |= 1 << (31 - errint); mpic_fsl_err_write(mpic->err_regs, eimr); } @@ -124,7 +121,7 @@ static irqreturn_t fsl_error_int_handler(int irq, void *data) return IRQ_HANDLED; } -void mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum) +void __init mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum) { unsigned int virq; int ret; diff --git a/arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c b/arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c index c2baa283e624..06d9101a5d49 100644 --- a/arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c +++ b/arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c @@ -75,7 +75,7 @@ static ssize_t fsl_timer_wakeup_store(struct device *dev, if (kstrtoll(buf, 0, &interval)) return -EINVAL; - mutex_lock(&sysfs_lock); + guard(mutex)(&sysfs_lock); if (fsl_wakeup->timer) { disable_irq_wake(fsl_wakeup->timer->irq); @@ -83,31 +83,23 @@ static ssize_t fsl_timer_wakeup_store(struct device *dev, fsl_wakeup->timer = NULL; } - if (!interval) { - mutex_unlock(&sysfs_lock); + if (!interval) return count; - } fsl_wakeup->timer = mpic_request_timer(fsl_mpic_timer_irq, fsl_wakeup, interval); - if (!fsl_wakeup->timer) { - mutex_unlock(&sysfs_lock); + if (!fsl_wakeup->timer) return -EINVAL; - } ret = enable_irq_wake(fsl_wakeup->timer->irq); if (ret) { mpic_free_timer(fsl_wakeup->timer); fsl_wakeup->timer = NULL; - mutex_unlock(&sysfs_lock); - return ret; } mpic_start_timer(fsl_wakeup->timer); - mutex_unlock(&sysfs_lock); - return count; } @@ -116,7 +108,8 @@ static struct device_attribute mpic_attributes = __ATTR(timer_wakeup, 0644, static int __init fsl_wakeup_sys_init(void) { - int ret; + struct device *dev_root; + int ret = -EINVAL; fsl_wakeup = kzalloc(sizeof(struct fsl_mpic_timer_wakeup), GFP_KERNEL); if (!fsl_wakeup) @@ -124,16 +117,26 @@ static int __init fsl_wakeup_sys_init(void) INIT_WORK(&fsl_wakeup->free_work, fsl_free_resource); - ret = device_create_file(mpic_subsys.dev_root, &mpic_attributes); - if (ret) - kfree(fsl_wakeup); + dev_root = bus_get_dev_root(&mpic_subsys); + if (dev_root) { + ret = device_create_file(dev_root, &mpic_attributes); + put_device(dev_root); + if (ret) + kfree(fsl_wakeup); + } return ret; } static void __exit fsl_wakeup_sys_exit(void) { - device_remove_file(mpic_subsys.dev_root, &mpic_attributes); + struct device *dev_root; + + dev_root = bus_get_dev_root(&mpic_subsys); + if (dev_root) { + device_remove_file(dev_root, &mpic_attributes); + put_device(dev_root); + } mutex_lock(&sysfs_lock); diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 808e7118abfc..2a007bfb038d 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -11,11 +11,15 @@ #include <linux/msi.h> #include <linux/pci.h> #include <linux/slab.h> -#include <linux/of_platform.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/property.h> #include <linux/interrupt.h> +#include <linux/irqdomain.h> #include <linux/seq_file.h> #include <sysdev/fsl_soc.h> -#include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <asm/mpic.h> @@ -71,7 +75,7 @@ static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p) srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK; cascade_virq = msi_data->cascade_array[srs]->virq; - seq_printf(p, " fsl-msi-%d", cascade_virq); + seq_printf(p, "fsl-msi-%d", cascade_virq); } @@ -125,17 +129,14 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev) struct fsl_msi *msi_data; irq_hw_number_t hwirq; - for_each_pci_msi_entry(entry, pdev) { - if (!entry->irq) - continue; + msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) { hwirq = virq_to_hw(entry->irq); msi_data = irq_get_chip_data(entry->irq); irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); + entry->irq = 0; msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1); } - - return; } static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq, @@ -211,11 +212,13 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) dev_err(&pdev->dev, "node %pOF has an invalid fsl,msi phandle %u\n", hose->dn, np->phandle); + of_node_put(np); return -EINVAL; } + of_node_put(np); } - for_each_pci_msi_entry(entry, pdev) { + msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) { /* * Loop over all the MSI devices until we find one that has an * available interrupt. @@ -266,7 +269,6 @@ out_free: static irqreturn_t fsl_msi_cascade(int irq, void *data) { - unsigned int cascade_irq; struct fsl_msi *msi_data; int msir_index = -1; u32 msir_value = 0; @@ -279,9 +281,6 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data) msir_index = cascade_data->index; - if (msir_index >= NR_MSI_REG_MAX) - cascade_irq = 0; - switch (msi_data->feature & FSL_PIC_IP_MASK) { case FSL_PIC_IP_MPIC: msir_value = fsl_msi_read(msi_data->msi_regs, @@ -305,15 +304,15 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data) } while (msir_value) { + int err; intr_index = ffs(msir_value) - 1; - cascade_irq = irq_linear_revmap(msi_data->irqhost, + err = generic_handle_domain_irq(msi_data->irqhost, msi_hwirq(msi_data, msir_index, intr_index + have_shift)); - if (cascade_irq) { - generic_handle_irq(cascade_irq); + if (!err) ret = IRQ_HANDLED; - } + have_shift += intr_index + 1; msir_value = msir_value >> (intr_index + 1); } @@ -321,7 +320,7 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data) return ret; } -static int fsl_of_msi_remove(struct platform_device *ofdev) +static void fsl_of_msi_remove(struct platform_device *ofdev) { struct fsl_msi *msi = platform_get_drvdata(ofdev); int virq, i; @@ -344,8 +343,6 @@ static int fsl_of_msi_remove(struct platform_device *ofdev) if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) iounmap(msi->msi_regs); kfree(msi); - - return 0; } static struct lock_class_key fsl_msi_irq_class; @@ -395,7 +392,6 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, static const struct of_device_id fsl_of_msi_ids[]; static int fsl_of_msi_probe(struct platform_device *dev) { - const struct of_device_id *match; struct fsl_msi *msi; struct resource res, msiir; int err, i, j, irq_index, count; @@ -405,10 +401,7 @@ static int fsl_of_msi_probe(struct platform_device *dev) u32 offset; struct pci_controller *phb; - match = of_match_device(fsl_of_msi_ids, &dev->dev); - if (!match) - return -EINVAL; - features = match->data; + features = device_get_match_data(&dev->dev); printk(KERN_DEBUG "Setting up Freescale MSI support\n"); @@ -419,9 +412,8 @@ static int fsl_of_msi_probe(struct platform_device *dev) } platform_set_drvdata(dev, msi); - msi->irqhost = irq_domain_add_linear(dev->dev.of_node, - NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi); - + msi->irqhost = irq_domain_create_linear(dev_fwnode(&dev->dev), NR_MSI_IRQS_MAX, + &fsl_msi_host_ops, msi); if (msi->irqhost == NULL) { dev_err(&dev->dev, "No memory for MSI irqhost\n"); err = -ENOMEM; @@ -571,10 +563,12 @@ static const struct fsl_msi_feature ipic_msi_feature = { .msiir_offset = 0x38, }; +#ifdef CONFIG_EPAPR_PARAVIRT static const struct fsl_msi_feature vmpic_msi_feature = { .fsl_pic_ip = FSL_PIC_IP_VMPIC, .msiir_offset = 0, }; +#endif static const struct of_device_id fsl_of_msi_ids[] = { { diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index b8f76f3fd994..4e501654cb41 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -22,6 +22,8 @@ #include <linux/interrupt.h> #include <linux/memblock.h> #include <linux/log2.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/suspend.h> @@ -29,7 +31,6 @@ #include <linux/uaccess.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <asm/machdep.h> @@ -37,6 +38,7 @@ #include <asm/disassemble.h> #include <asm/ppc-opcode.h> #include <asm/swiotlb.h> +#include <asm/setup.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> @@ -52,10 +54,10 @@ static void quirk_fsl_pcie_early(struct pci_dev *dev) /* if we aren't in host mode don't bother */ pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type); - if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) + if ((hdr_type & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_BRIDGE) return; - dev->class = PCI_CLASS_BRIDGE_PCI << 8; + dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; fsl_pcie_bus_fixup = 1; return; } @@ -179,6 +181,7 @@ static int setup_one_atmu(struct ccsr_pci __iomem *pci, static bool is_kdump(void) { struct device_node *node; + bool ret; node = of_find_node_by_type(NULL, "memory"); if (!node) { @@ -186,7 +189,10 @@ static bool is_kdump(void) return false; } - return of_property_read_bool(node, "linux,usable-memory"); + ret = of_property_read_bool(node, "linux,usable-memory"); + of_node_put(node); + + return ret; } /* atmu setup for fsl pci/pcie controller */ @@ -218,7 +224,7 @@ static void setup_pci_atmu(struct pci_controller *hose) * windows have implemented the default target value as 0xf * for CCSR space.In all Freescale legacy devices the target * of 0xf is reserved for local memory space. 9132 Rev1.0 - * now has local mempry space mapped to target 0x0 instead of + * now has local memory space mapped to target 0x0 instead of * 0xf. Hence adding a workaround to remove the target 0xf * defined for memory space from Inbound window attributes. */ @@ -513,13 +519,14 @@ void fsl_pcibios_fixup_bus(struct pci_bus *bus) } } -int fsl_add_bridge(struct platform_device *pdev, int is_primary) +static int fsl_add_bridge(struct platform_device *pdev, int is_primary) { int len; struct pci_controller *hose; struct resource rsrc; const int *bus_range; u8 hdr_type, progif; + u32 class_code; struct device_node *dev; struct ccsr_pci __iomem *pci; u16 temp; @@ -574,7 +581,7 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary) hose->ops = &fsl_indirect_pcie_ops; /* For PCIE read HEADER_TYPE to identify controller mode */ early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type); - if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) + if ((hdr_type & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_BRIDGE) goto no_bridge; } else { @@ -593,6 +600,13 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary) PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS; if (fsl_pcie_check_link(hose)) hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK; + /* Fix Class Code to PCI_CLASS_BRIDGE_PCI_NORMAL for pre-3.0 controller */ + if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) { + early_read_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, &class_code); + class_code &= 0xff; + class_code |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8; + early_write_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, class_code); + } } else { /* * Set PBFR(PCI Bus Function Register)[10] = 1 to @@ -753,7 +767,7 @@ static int __init mpc83xx_pcie_setup(struct pci_controller *hose, u32 cfg_bar; int ret = -ENOMEM; - pcie = zalloc_maybe_bootmem(sizeof(*pcie), GFP_KERNEL); + pcie = kzalloc(sizeof(*pcie), GFP_KERNEL); if (!pcie) return ret; @@ -929,7 +943,7 @@ u64 fsl_pci_immrbar_base(struct pci_controller *hose) return 0; } -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 static int mcheck_handle_load(struct pt_regs *regs, u32 inst) { unsigned int rd, ra, rb, d; @@ -1106,7 +1120,7 @@ static const struct of_device_id pci_ids[] = { struct device_node *fsl_pci_primary; -void fsl_pci_assign_primary(void) +void __init fsl_pci_assign_primary(void) { struct device_node *np; @@ -1125,6 +1139,19 @@ void fsl_pci_assign_primary(void) } /* + * If there's no PCI host bridge with ISA then check for + * PCI host bridge with alias "pci0" (first PCI host bridge). + */ + np = of_find_node_by_path("pci0"); + if (np && of_match_node(pci_ids, np) && of_device_is_available(np)) { + fsl_pci_primary = np; + of_node_put(np); + return; + } + if (np) + of_node_put(np); + + /* * If there's no PCI host bridge with ISA, arbitrarily * designate one as primary. This can go away once * various bugs with primary-less systems are fixed. @@ -1132,7 +1159,6 @@ void fsl_pci_assign_primary(void) for_each_matching_node(np, pci_ids) { if (of_device_is_available(np)) { fsl_pci_primary = np; - of_node_put(np); return; } } @@ -1232,7 +1258,7 @@ static void fsl_pci_syscore_do_suspend(struct pci_controller *hose) send_pme_turnoff_message(hose); } -static int fsl_pci_syscore_suspend(void) +static int fsl_pci_syscore_suspend(void *data) { struct pci_controller *hose, *tmp; @@ -1265,7 +1291,7 @@ static void fsl_pci_syscore_do_resume(struct pci_controller *hose) setup_pci_atmu(hose); } -static void fsl_pci_syscore_resume(void) +static void fsl_pci_syscore_resume(void *data) { struct pci_controller *hose, *tmp; @@ -1273,10 +1299,14 @@ static void fsl_pci_syscore_resume(void) fsl_pci_syscore_do_resume(hose); } -static struct syscore_ops pci_syscore_pm_ops = { +static const struct syscore_ops pci_syscore_pm_ops = { .suspend = fsl_pci_syscore_suspend, .resume = fsl_pci_syscore_resume, }; + +static struct syscore pci_syscore_pm = { + .ops = &pci_syscore_pm_ops, +}; #endif void fsl_pcibios_fixup_phb(struct pci_controller *phb) @@ -1327,12 +1357,13 @@ static struct platform_driver fsl_pci_driver = { .of_match_table = pci_ids, }, .probe = fsl_pci_probe, + .driver_managed_dma = true, }; static int __init fsl_pci_init(void) { #ifdef CONFIG_PM_SLEEP - register_syscore_ops(&pci_syscore_pm_ops); + register_syscore(&pci_syscore_pm); #endif return platform_driver_register(&fsl_pci_driver); } diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h index 1d7a41205695..3bc4ab9d8341 100644 --- a/arch/powerpc/sysdev/fsl_pci.h +++ b/arch/powerpc/sysdev/fsl_pci.h @@ -18,6 +18,7 @@ struct platform_device; #define PCIE_LTSSM 0x0404 /* PCIE Link Training and Status */ #define PCIE_LTSSM_L0 0x16 /* L0 state */ +#define PCIE_FSL_CSR_CLASSCODE 0x474 /* FSL GPEX CSR */ #define PCIE_IP_REV_2_2 0x02080202 /* PCIE IP block version Rev2.2 */ #define PCIE_IP_REV_3_0 0x02080300 /* PCIE IP block version Rev3.0 */ #define PIWAR_EN 0x80000000 /* Enable */ @@ -111,7 +112,6 @@ struct ccsr_pci { }; -extern int fsl_add_bridge(struct platform_device *pdev, int is_primary); extern void fsl_pcibios_fixup_bus(struct pci_bus *bus); extern void fsl_pcibios_fixup_phb(struct pci_controller *phb); extern int mpc83xx_add_bridge(struct device_node *dev); @@ -120,7 +120,7 @@ u64 fsl_pci_immrbar_base(struct pci_controller *hose); extern struct device_node *fsl_pci_primary; #ifdef CONFIG_PCI -void fsl_pci_assign_primary(void); +void __init fsl_pci_assign_primary(void); #else static inline void fsl_pci_assign_primary(void) {} #endif diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c index 76896de970ca..9f6dd11c1344 100644 --- a/arch/powerpc/sysdev/fsl_pmc.c +++ b/arch/powerpc/sysdev/fsl_pmc.c @@ -13,9 +13,9 @@ #include <linux/export.h> #include <linux/suspend.h> #include <linux/delay.h> -#include <linux/device.h> +#include <linux/mod_devicetable.h> #include <linux/of_address.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> struct pmc_regs { __be32 devdisr; diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 5a95b8ea23d8..f9b214b299e7 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -23,16 +23,17 @@ #include <linux/types.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> -#include <linux/device.h> +#include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/uaccess.h> #include <asm/machdep.h> +#include <asm/rio.h> #include "fsl_rio.h" @@ -69,10 +70,10 @@ static DEFINE_SPINLOCK(fsl_rio_config_lock); -#define __fsl_read_rio_config(x, addr, err, op) \ +#define ___fsl_read_rio_config(x, addr, err, op, barrier) \ __asm__ __volatile__( \ "1: "op" %1,0(%2)\n" \ - " eieio\n" \ + " "barrier"\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %1,-1\n" \ @@ -83,6 +84,14 @@ static DEFINE_SPINLOCK(fsl_rio_config_lock); : "=r" (err), "=r" (x) \ : "b" (addr), "i" (-EFAULT), "0" (err)) +#ifdef CONFIG_BOOKE +#define __fsl_read_rio_config(x, addr, err, op) \ + ___fsl_read_rio_config(x, addr, err, op, "mbar") +#else +#define __fsl_read_rio_config(x, addr, err, op) \ + ___fsl_read_rio_config(x, addr, err, op, "eieio") +#endif + void __iomem *rio_regs_win; void __iomem *rmu_regs_win; resource_size_t rio_law_start; @@ -90,7 +99,7 @@ resource_size_t rio_law_start; struct fsl_rio_dbell *dbell; struct fsl_rio_pw *pw; -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 int fsl_rio_mcheck_exception(struct pt_regs *regs) { const struct exception_table_entry *entry; @@ -108,7 +117,7 @@ int fsl_rio_mcheck_exception(struct pt_regs *regs) __func__); out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); - regs_set_return_msr(regs, regs->msr | MSR_RI); + regs_set_recoverable(regs); regs_set_return_ip(regs, extable_fixup(entry)); return 1; } @@ -295,8 +304,8 @@ static void fsl_rio_inbound_mem_init(struct rio_priv *priv) out_be32(&priv->inb_atmu_regs[i].riwar, 0); } -int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, - u64 rstart, u64 size, u32 flags) +static int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, + u64 rstart, u64 size, u32 flags) { struct rio_priv *priv = mport->priv; u32 base_size; @@ -346,7 +355,7 @@ int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, return 0; } -void fsl_unmap_inb_mem(struct rio_mport *mport, dma_addr_t lstart) +static void fsl_unmap_inb_mem(struct rio_mport *mport, dma_addr_t lstart) { u32 win_start_shift, base_start_shift; struct rio_priv *priv = mport->priv; @@ -434,20 +443,17 @@ static inline void fsl_rio_info(struct device *dev, u32 ccsr) * master port with system-specific info, and registers the * master port with the RapidIO subsystem. */ -int fsl_rio_setup(struct platform_device *dev) +static int fsl_rio_setup(struct platform_device *dev) { struct rio_ops *ops; struct rio_mport *port; struct rio_priv *priv; int rc = 0; - const u32 *dt_range, *cell, *port_index; + const u32 *port_index; u32 active_ports = 0; - struct resource regs, rmu_regs; struct device_node *np, *rmu_node; - int rlen; u32 ccsr; - u64 range_start, range_size; - int paw, aw, sw; + u64 range_start; u32 i; static int tmp; struct device_node *rmu_np[MAX_MSG_UNIT_NUM] = {NULL}; @@ -457,17 +463,7 @@ int fsl_rio_setup(struct platform_device *dev) return -ENODEV; } - rc = of_address_to_resource(dev->dev.of_node, 0, ®s); - if (rc) { - dev_err(&dev->dev, "Can't get %pOF property 'reg'\n", - dev->dev.of_node); - return -EFAULT; - } - dev_info(&dev->dev, "Of-device full name %pOF\n", - dev->dev.of_node); - dev_info(&dev->dev, "Regs: %pR\n", ®s); - - rio_regs_win = ioremap(regs.start, resource_size(®s)); + rio_regs_win = of_iomap(dev->dev.of_node, 0); if (!rio_regs_win) { dev_err(&dev->dev, "Unable to map rio register window\n"); rc = -ENOMEM; @@ -501,13 +497,9 @@ int fsl_rio_setup(struct platform_device *dev) rc = -ENOENT; goto err_rmu; } - rc = of_address_to_resource(rmu_node, 0, &rmu_regs); - if (rc) { - dev_err(&dev->dev, "Can't get %pOF property 'reg'\n", - rmu_node); - goto err_rmu; - } - rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs)); + rmu_regs_win = of_iomap(rmu_node, 0); + + of_node_put(rmu_node); if (!rmu_regs_win) { dev_err(&dev->dev, "Unable to map rmu register window\n"); rc = -ENOMEM; @@ -535,15 +527,12 @@ int fsl_rio_setup(struct platform_device *dev) dbell->bellirq = irq_of_parse_and_map(np, 1); dev_info(&dev->dev, "bellirq: %d\n", dbell->bellirq); - aw = of_n_addr_cells(np); - dt_range = of_get_property(np, "reg", &rlen); - if (!dt_range) { + if (of_property_read_reg(np, 0, &range_start, NULL)) { pr_err("%pOF: unable to find 'reg' property\n", np); rc = -ENOMEM; goto err_pw; } - range_start = of_read_number(dt_range, aw); dbell->dbell_regs = (struct rio_dbell_regs *)(rmu_regs_win + (u32)range_start); @@ -563,19 +552,18 @@ int fsl_rio_setup(struct platform_device *dev) pw->dev = &dev->dev; pw->pwirq = irq_of_parse_and_map(np, 0); dev_info(&dev->dev, "pwirq: %d\n", pw->pwirq); - aw = of_n_addr_cells(np); - dt_range = of_get_property(np, "reg", &rlen); - if (!dt_range) { + if (of_property_read_reg(np, 0, &range_start, NULL)) { pr_err("%pOF: unable to find 'reg' property\n", np); rc = -ENOMEM; goto err; } - range_start = of_read_number(dt_range, aw); pw->pw_regs = (struct rio_pw_regs *)(rmu_regs_win + (u32)range_start); /*set up ports node*/ for_each_child_of_node(dev->dev.of_node, np) { + struct resource res; + port_index = of_get_property(np, "cell-index", NULL); if (!port_index) { dev_err(&dev->dev, "Can't get %pOF property 'cell-index'\n", @@ -583,32 +571,14 @@ int fsl_rio_setup(struct platform_device *dev) continue; } - dt_range = of_get_property(np, "ranges", &rlen); - if (!dt_range) { + if (of_range_to_resource(np, 0, &res)) { dev_err(&dev->dev, "Can't get %pOF property 'ranges'\n", np); continue; } - /* Get node address wide */ - cell = of_get_property(np, "#address-cells", NULL); - if (cell) - aw = *cell; - else - aw = of_n_addr_cells(np); - /* Get node size wide */ - cell = of_get_property(np, "#size-cells", NULL); - if (cell) - sw = *cell; - else - sw = of_n_size_cells(np); - /* Get parent address wide wide */ - paw = of_n_addr_cells(np); - range_start = of_read_number(dt_range + aw, paw); - range_size = of_read_number(dt_range + aw + paw, sw); - - dev_info(&dev->dev, "%pOF: LAW start 0x%016llx, size 0x%016llx.\n", - np, range_start, range_size); + dev_info(&dev->dev, "%pOF: LAW %pR\n", + np, &res); port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); if (!port) @@ -631,9 +601,7 @@ int fsl_rio_setup(struct platform_device *dev) } INIT_LIST_HEAD(&port->dbells); - port->iores.start = range_start; - port->iores.end = port->iores.start + range_size - 1; - port->iores.flags = IORESOURCE_MEM; + port->iores = res; /* struct copy */ port->iores.name = "rio_io_win"; if (request_resource(&iomem_resource, &port->iores) < 0) { diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c index 7a5e2e2b9d06..f956591cb64e 100644 --- a/arch/powerpc/sysdev/fsl_rmu.c +++ b/arch/powerpc/sysdev/fsl_rmu.c @@ -23,8 +23,8 @@ #include <linux/types.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> +#include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/of_platform.h> #include <linux/slab.h> #include "fsl_rio.h" @@ -359,7 +359,7 @@ out: return IRQ_HANDLED; } -void msg_unit_error_handler(void) +static void msg_unit_error_handler(void) { /*XXX: Error recovery is not implemented, we just clear errors */ @@ -1067,9 +1067,6 @@ int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node) struct rio_priv *priv; struct fsl_rmu *rmu; u64 msg_start; - const u32 *msg_addr; - int mlen; - int aw; if (!mport || !mport->priv) return -EINVAL; @@ -1086,16 +1083,12 @@ int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node) if (!rmu) return -ENOMEM; - aw = of_n_addr_cells(node); - msg_addr = of_get_property(node, "reg", &mlen); - if (!msg_addr) { + if (of_property_read_reg(node, 0, &msg_start, NULL)) { pr_err("%pOF: unable to find 'reg' property of message-unit\n", node); kfree(rmu); return -ENOMEM; } - msg_start = of_read_number(msg_addr, aw); - rmu->msg_regs = (struct rio_msg_regs *) (rmu_regs_win + (u32)msg_start); diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index 90ad16161604..3949ceb79e64 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -19,28 +19,21 @@ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/of.h> -#include <linux/of_platform.h> #include <linux/phy.h> #include <linux/spi/spi.h> #include <linux/fsl_devices.h> -#include <linux/fs_enet_pd.h> -#include <linux/fs_uart_pd.h> #include <linux/reboot.h> #include <linux/atomic.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/time.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <sysdev/fsl_soc.h> #include <mm/mmu_decl.h> #include <asm/cpm2.h> #include <asm/fsl_hcalls.h> /* For the Freescale hypervisor */ -extern void init_fcc_ioports(struct fs_platform_info*); -extern void init_fec_ioports(struct fs_platform_info*); -extern void init_smc_ioports(struct fs_uart_platform_info*); static phys_addr_t immrbase = -1; phys_addr_t get_immrbase(void) @@ -52,18 +45,10 @@ phys_addr_t get_immrbase(void) soc = of_find_node_by_type(NULL, "soc"); if (soc) { - int size; - u32 naddr; - const __be32 *prop = of_get_property(soc, "#address-cells", &size); + struct resource res; - if (prop && size == 4) - naddr = be32_to_cpup(prop); - else - naddr = 2; - - prop = of_get_property(soc, "ranges", &size); - if (prop) - immrbase = of_translate_address(soc, prop + naddr); + if (!of_range_to_resource(soc, 0, &res)) + immrbase = res.start; of_node_put(soc); } @@ -175,7 +160,7 @@ static int __init setup_rstcr(void) }; for_each_node_by_name(np, "global-utilities") { - if ((of_get_property(np, "fsl,has-rstcr", NULL))) { + if (of_property_read_bool(np, "fsl,has-rstcr")) { rstcr = of_iomap(np, 0) + 0xb0; if (!rstcr) { printk (KERN_ERR "Error: reset control " diff --git a/arch/powerpc/sysdev/ge/ge_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c index 02553a8ce191..0bc3f0b36528 100644 --- a/arch/powerpc/sysdev/ge/ge_pic.c +++ b/arch/powerpc/sysdev/ge/ge_pic.c @@ -14,12 +14,14 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/interrupt.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/spinlock.h> #include <asm/byteorder.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/irq.h> #include "ge_pic.h" @@ -150,7 +152,7 @@ static struct irq_chip gef_pic_chip = { }; -/* When an interrupt is being configured, this call allows some flexibilty +/* When an interrupt is being configured, this call allows some flexibility * in deciding which irq_chip structure is used */ static int gef_pic_host_map(struct irq_domain *h, unsigned int virq, @@ -212,8 +214,9 @@ void __init gef_pic_init(struct device_node *np) } /* Setup an irq_domain structure */ - gef_pic_irq_host = irq_domain_add_linear(np, GEF_PIC_NUM_IRQS, - &gef_pic_host_ops, NULL); + gef_pic_irq_host = irq_domain_create_linear(of_fwnode_handle(np), + GEF_PIC_NUM_IRQS, + &gef_pic_host_ops, NULL); if (gef_pic_irq_host == NULL) return; @@ -242,7 +245,7 @@ unsigned int gef_pic_get_irq(void) if (active & (0x1 << hwirq)) break; } - virq = irq_linear_revmap(gef_pic_irq_host, + virq = irq_find_mapping(gef_pic_irq_host, (irq_hw_number_t)hwirq); } diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c index aaba0b809032..7dce8278b71e 100644 --- a/arch/powerpc/sysdev/grackle.c +++ b/arch/powerpc/sysdev/grackle.c @@ -9,33 +9,17 @@ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/init.h> +#include <linux/of.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/grackle.h> #define GRACKLE_CFA(b, d, o) (0x80 | ((b) << 8) | ((d) << 16) \ | (((o) & ~3) << 24)) -#define GRACKLE_PICR1_STG 0x00000040 #define GRACKLE_PICR1_LOOPSNOOP 0x00000010 -/* N.B. this is called before bridges is initialized, so we can't - use grackle_pcibios_{read,write}_config_dword. */ -static inline void grackle_set_stg(struct pci_controller* bp, int enable) -{ - unsigned int val; - - out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); - val = in_le32(bp->cfg_data); - val = enable? (val | GRACKLE_PICR1_STG) : - (val & ~GRACKLE_PICR1_STG); - out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8)); - out_le32(bp->cfg_data, val); - (void)in_le32(bp->cfg_data); -} - static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable) { unsigned int val; @@ -56,7 +40,4 @@ void __init setup_grackle(struct pci_controller *hose) pci_add_flags(PCI_REASSIGN_ALL_BUS); if (of_machine_is_compatible("AAPL,PowerBook1998")) grackle_set_loop_snoop(hose, 1); -#if 0 /* Disabled for now, HW problems ??? */ - grackle_set_stg(hose, 1); -#endif } diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c index dc1a151c63d7..99bb2b916949 100644 --- a/arch/powerpc/sysdev/i8259.c +++ b/arch/powerpc/sysdev/i8259.c @@ -6,11 +6,11 @@ #include <linux/ioport.h> #include <linux/interrupt.h> +#include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/i8259.h> -#include <asm/prom.h> static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */ @@ -208,7 +208,7 @@ static const struct irq_domain_ops i8259_host_ops = { .xlate = i8259_host_xlate, }; -struct irq_domain *i8259_get_host(void) +struct irq_domain *__init i8259_get_host(void) { return i8259_host; } @@ -260,8 +260,8 @@ void i8259_init(struct device_node *node, unsigned long intack_addr) raw_spin_unlock_irqrestore(&i8259_lock, flags); /* create a legacy host */ - i8259_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0, - &i8259_host_ops, NULL); + i8259_host = irq_domain_create_legacy(of_fwnode_handle(node), NR_IRQS_LEGACY, 0, 0, + &i8259_host_ops, NULL); if (i8259_host == NULL) { printk(KERN_ERR "i8259: failed to allocate irq host !\n"); return; diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c index 09b36617425e..1aacb403a010 100644 --- a/arch/powerpc/sysdev/indirect_pci.c +++ b/arch/powerpc/sysdev/indirect_pci.c @@ -12,7 +12,6 @@ #include <linux/init.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 7638a50a7c38..290ba8427239 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -18,9 +18,10 @@ #include <linux/device.h> #include <linux/spinlock.h> #include <linux/fsl_devices.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> #include <asm/irq.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/ipic.h> #include "ipic.h" @@ -710,8 +711,9 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) if (ipic == NULL) return NULL; - ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS, - &ipic_host_ops, ipic); + ipic->irqhost = irq_domain_create_linear(of_fwnode_handle(node), + NR_IPIC_INTS, + &ipic_host_ops, ipic); if (ipic->irqhost == NULL) { kfree(ipic); return NULL; @@ -756,18 +758,17 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) ipic_write(ipic->regs, IPIC_SEMSR, temp); primary_ipic = ipic; - irq_set_default_host(primary_ipic->irqhost); + irq_set_default_domain(primary_ipic->irqhost); ipic_write(ipic->regs, IPIC_SIMSR_H, 0); ipic_write(ipic->regs, IPIC_SIMSR_L, 0); - printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS, - primary_ipic->regs); + pr_info("IPIC (%d IRQ sources) at MMIO %pa\n", NR_IPIC_INTS, &res.start); return ipic; } -void ipic_set_default_priority(void) +void __init ipic_set_default_priority(void) { ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT); ipic_write(primary_ipic->regs, IPIC_SIPRR_B, IPIC_PRIORITY_DEFAULT); @@ -800,7 +801,7 @@ unsigned int ipic_get_irq(void) if (irq == 0) /* 0 --> no irq is pending */ return 0; - return irq_linear_revmap(primary_ipic->irqhost, irq); + return irq_find_mapping(primary_ipic->irqhost, irq); } #ifdef CONFIG_SUSPEND @@ -816,7 +817,7 @@ static struct { u32 sercr; } ipic_saved_state; -static int ipic_suspend(void) +static int ipic_suspend(void *data) { struct ipic *ipic = primary_ipic; @@ -847,7 +848,7 @@ static int ipic_suspend(void) return 0; } -static void ipic_resume(void) +static void ipic_resume(void *data) { struct ipic *ipic = primary_ipic; @@ -869,18 +870,22 @@ static void ipic_resume(void) #define ipic_resume NULL #endif -static struct syscore_ops ipic_syscore_ops = { +static const struct syscore_ops ipic_syscore_ops = { .suspend = ipic_suspend, .resume = ipic_resume, }; +static struct syscore ipic_syscore = { + .ops = &ipic_syscore_ops, +}; + static int __init init_ipic_syscore(void) { if (!primary_ipic || !primary_ipic->regs) return -ENODEV; printk(KERN_DEBUG "Registering ipic system core operations\n"); - register_syscore_ops(&ipic_syscore_ops); + register_syscore(&ipic_syscore); return 0; } diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c index 628f9b759c84..eb48210ef98e 100644 --- a/arch/powerpc/sysdev/mmio_nvram.c +++ b/arch/powerpc/sysdev/mmio_nvram.c @@ -10,12 +10,12 @@ #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/of_address.h> #include <linux/spinlock.h> #include <linux/types.h> #include <asm/machdep.h> #include <asm/nvram.h> -#include <asm/prom.h> static void __iomem *mmio_nvram_start; static long mmio_nvram_len; diff --git a/arch/powerpc/sysdev/mpc5xxx_clocks.c b/arch/powerpc/sysdev/mpc5xxx_clocks.c index 834a6d7fbd88..58cee28e2399 100644 --- a/arch/powerpc/sysdev/mpc5xxx_clocks.c +++ b/arch/powerpc/sysdev/mpc5xxx_clocks.c @@ -1,31 +1,36 @@ // SPDX-License-Identifier: GPL-2.0 -/** - * mpc5xxx_get_bus_frequency - Find the bus frequency for a device - * @node: device node - * - * Returns bus frequency (IPS on MPC512x, IPB on MPC52xx), - * or 0 if the bus frequency cannot be found. - */ #include <linux/kernel.h> -#include <linux/of_platform.h> #include <linux/export.h> +#include <linux/property.h> + #include <asm/mpc5xxx.h> -unsigned long mpc5xxx_get_bus_frequency(struct device_node *node) +/** + * mpc5xxx_fwnode_get_bus_frequency - Find the bus frequency for a firmware node + * @fwnode: firmware node + * + * Returns bus frequency (IPS on MPC512x, IPB on MPC52xx), + * or 0 if the bus frequency cannot be found. + */ +unsigned long mpc5xxx_fwnode_get_bus_frequency(struct fwnode_handle *fwnode) { - const unsigned int *p_bus_freq = NULL; + struct fwnode_handle *parent; + u32 bus_freq; + int ret; - of_node_get(node); - while (node) { - p_bus_freq = of_get_property(node, "bus-frequency", NULL); - if (p_bus_freq) - break; + ret = fwnode_property_read_u32(fwnode, "bus-frequency", &bus_freq); + if (!ret) + return bus_freq; - node = of_get_next_parent(node); + fwnode_for_each_parent_node(fwnode, parent) { + ret = fwnode_property_read_u32(parent, "bus-frequency", &bus_freq); + if (!ret) { + fwnode_handle_put(parent); + return bus_freq; + } } - of_node_put(node); - return p_bus_freq ? *p_bus_freq : 0; + return 0; } -EXPORT_SYMBOL(mpc5xxx_get_bus_frequency); +EXPORT_SYMBOL(mpc5xxx_fwnode_get_bus_frequency); diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 995fb2ada507..67e51998d1ae 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -27,9 +27,12 @@ #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/slab.h> +#include <linux/string_choices.h> #include <linux/syscore_ops.h> #include <linux/ratelimit.h> #include <linux/pgtable.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <asm/ptrace.h> #include <asm/signal.h> @@ -47,7 +50,7 @@ #define DBG(fmt...) #endif -struct bus_type mpic_subsys = { +const struct bus_type mpic_subsys = { .name = "mpic", .dev_name = "mpic", }; @@ -353,7 +356,7 @@ static void __init mpic_test_broken_ipi(struct mpic *mpic) mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK); r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0)); - if (r == le32_to_cpu(MPIC_VECPRI_MASK)) { + if (r == swab32(MPIC_VECPRI_MASK)) { printk(KERN_INFO "mpic: Detected reversed IPI registers\n"); mpic->flags |= MPIC_BROKEN_IPI; } @@ -472,9 +475,9 @@ static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase, addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32); } - printk(KERN_DEBUG "mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n", - PCI_SLOT(devfn), PCI_FUNC(devfn), - flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr); + pr_debug("mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n", + PCI_SLOT(devfn), PCI_FUNC(devfn), + str_enabled_disabled(flags & HT_MSI_FLAGS_ENABLE), addr); if (!(flags & HT_MSI_FLAGS_ENABLE)) writeb(flags | HT_MSI_FLAGS_ENABLE, base + HT_MSI_FLAGS); @@ -1258,11 +1261,11 @@ struct mpic * __init mpic_alloc(struct device_node *node, } /* Read extra device-tree properties into the flags variable */ - if (of_get_property(node, "big-endian", NULL)) + if (of_property_read_bool(node, "big-endian")) flags |= MPIC_BIG_ENDIAN; - if (of_get_property(node, "pic-no-reset", NULL)) + if (of_property_read_bool(node, "pic-no-reset")) flags |= MPIC_NO_RESET; - if (of_get_property(node, "single-cpu-affinity", NULL)) + if (of_property_read_bool(node, "single-cpu-affinity")) flags |= MPIC_SINGLE_DEST_CPU; if (of_device_is_compatible(node, "fsl,mpic")) { flags |= MPIC_FSL | MPIC_LARGE_VECTORS; @@ -1323,8 +1326,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, psrc = of_get_property(mpic->node, "protected-sources", &psize); if (psrc) { /* Allocate a bitmap with one bit per interrupt */ - unsigned int mapsize = BITS_TO_LONGS(intvec_top + 1); - mpic->protected = kcalloc(mapsize, sizeof(long), GFP_KERNEL); + mpic->protected = bitmap_zalloc(intvec_top + 1, GFP_KERNEL); BUG_ON(mpic->protected == NULL); for (i = 0; i < psize/sizeof(u32); i++) { if (psrc[i] > intvec_top) @@ -1405,10 +1407,8 @@ struct mpic * __init mpic_alloc(struct device_node *node, * with device trees generated by older versions of QEMU. * fsl_version will be zero if MPIC_FSL is not set. */ - if (fsl_version < 0x400 && (flags & MPIC_ENABLE_COREINT)) { - WARN_ON(ppc_md.get_irq != mpic_get_coreint_irq); + if (fsl_version < 0x400 && (flags & MPIC_ENABLE_COREINT)) ppc_md.get_irq = mpic_get_irq; - } /* Reset */ @@ -1484,9 +1484,9 @@ struct mpic * __init mpic_alloc(struct device_node *node, mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); mpic->isu_mask = (1 << mpic->isu_shift) - 1; - mpic->irqhost = irq_domain_add_linear(mpic->node, - intvec_top, - &mpic_host_ops, mpic); + mpic->irqhost = irq_domain_create_linear(of_fwnode_handle(mpic->node), + intvec_top, + &mpic_host_ops, mpic); /* * FIXME: The code leaks the MPIC object and mappings here; this @@ -1521,7 +1521,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, if (!(mpic->flags & MPIC_SECONDARY)) { mpic_primary = mpic; - irq_set_default_host(mpic->irqhost); + irq_set_default_domain(mpic->irqhost); } return mpic; @@ -1786,7 +1786,7 @@ static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg) return 0; } - return irq_linear_revmap(mpic->irqhost, src); + return irq_find_mapping(mpic->irqhost, src); } unsigned int mpic_get_one_irq(struct mpic *mpic) @@ -1824,7 +1824,7 @@ unsigned int mpic_get_coreint_irq(void) return 0; } - return irq_linear_revmap(mpic->irqhost, src); + return irq_find_mapping(mpic->irqhost, src); #else return 0; #endif @@ -1840,7 +1840,7 @@ unsigned int mpic_get_mcirq(void) } #ifdef CONFIG_SMP -void mpic_request_ipis(void) +void __init mpic_request_ipis(void) { struct mpic *mpic = mpic_primary; int i; @@ -1944,7 +1944,7 @@ static void mpic_suspend_one(struct mpic *mpic) } } -static int mpic_suspend(void) +static int mpic_suspend(void *data) { struct mpic *mpic = mpics; @@ -1986,7 +1986,7 @@ static void mpic_resume_one(struct mpic *mpic) } /* end for loop */ } -static void mpic_resume(void) +static void mpic_resume(void *data) { struct mpic *mpic = mpics; @@ -1996,19 +1996,23 @@ static void mpic_resume(void) } } -static struct syscore_ops mpic_syscore_ops = { +static const struct syscore_ops mpic_syscore_ops = { .resume = mpic_resume, .suspend = mpic_suspend, }; +static struct syscore mpic_syscore = { + .ops = &mpic_syscore_ops, +}; + static int mpic_init_sys(void) { int rc; - register_syscore_ops(&mpic_syscore_ops); + register_syscore(&mpic_syscore); rc = subsys_system_register(&mpic_subsys, NULL); if (rc) { - unregister_syscore_ops(&mpic_syscore_ops); + unregister_syscore(&mpic_syscore); pr_err("mpic: Failed to register subsystem!\n"); return rc; } diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h index 73a31a429d46..bb460ff57a06 100644 --- a/arch/powerpc/sysdev/mpic.h +++ b/arch/powerpc/sysdev/mpic.h @@ -8,8 +8,8 @@ #ifdef CONFIG_PCI_MSI extern void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq); -extern int mpic_msi_init_allocator(struct mpic *mpic); -extern int mpic_u3msi_init(struct mpic *mpic); +int __init mpic_msi_init_allocator(struct mpic *mpic); +int __init mpic_u3msi_init(struct mpic *mpic); #else static inline void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq) @@ -24,7 +24,7 @@ static inline int mpic_u3msi_init(struct mpic *mpic) #endif #if defined(CONFIG_PCI_MSI) && defined(CONFIG_PPC_PASEMI) -int mpic_pasemi_msi_init(struct mpic *mpic); +int __init mpic_pasemi_msi_init(struct mpic *mpic); #else static inline int mpic_pasemi_msi_init(struct mpic *mpic) { return -1; } #endif @@ -37,8 +37,8 @@ extern void mpic_reset_core(int cpu); #ifdef CONFIG_FSL_SOC extern int mpic_map_error_int(struct mpic *mpic, unsigned int virq, irq_hw_number_t hw); -extern void mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum); -extern int mpic_setup_error_int(struct mpic *mpic, int intvec); +void __init mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum); +int __init mpic_setup_error_int(struct mpic *mpic, int intvec); #else static inline int mpic_map_error_int(struct mpic *mpic, unsigned int virq, irq_hw_number_t hw) { diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index 36ec0bdd8b63..7b449cc51aef 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -7,19 +7,21 @@ */ #include <linux/list.h> -#include <linux/of_platform.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/export.h> #include <linux/slab.h> -#include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <asm/mpic_msgr.h> #define MPIC_MSGR_REGISTERS_PER_BLOCK 4 #define MPIC_MSGR_STRIDE 0x10 -#define MPIC_MSGR_MER_OFFSET 0x100 +#define MPIC_MSGR_MER_OFFSET (0x100 / sizeof(u32)) #define MSGR_INUSE 0 #define MSGR_FREE 1 @@ -99,7 +101,7 @@ void mpic_msgr_disable(struct mpic_msgr *msgr) EXPORT_SYMBOL_GPL(mpic_msgr_disable); /* The following three functions are used to compute the order and number of - * the message register blocks. They are clearly very inefficent. However, + * the message register blocks. They are clearly very inefficient. However, * they are called *only* a few times during device initialization. */ static unsigned int mpic_msgr_number_of_blocks(void) @@ -115,11 +117,12 @@ static unsigned int mpic_msgr_number_of_blocks(void) for (;;) { snprintf(buf, sizeof(buf), "mpic-msgr-block%d", count); - if (!of_find_property(aliases, buf, NULL)) + if (!of_property_present(aliases, buf)) break; count += 1; } + of_node_put(aliases); } return count; @@ -143,12 +146,18 @@ static int mpic_msgr_block_number(struct device_node *node) for (index = 0; index < number_of_blocks; ++index) { struct property *prop; + struct device_node *tn; snprintf(buf, sizeof(buf), "mpic-msgr-block%d", index); prop = of_find_property(aliases, buf, NULL); - if (node == of_find_node_by_path(prop->value)) + tn = of_find_node_by_path(prop->value); + if (node == tn) { + of_node_put(tn); break; + } + of_node_put(tn); } + of_node_put(aliases); return index == number_of_blocks ? -1 : index; } @@ -226,7 +235,7 @@ static int mpic_msgr_probe(struct platform_device *dev) reg_number = block_number * MPIC_MSGR_REGISTERS_PER_BLOCK + i; msgr->base = msgr_block_addr + i * MPIC_MSGR_STRIDE; - msgr->mer = (u32 *)((u8 *)msgr->base + MPIC_MSGR_MER_OFFSET); + msgr->mer = msgr->base + MPIC_MSGR_MER_OFFSET; msgr->in_use = MSGR_FREE; msgr->num = i; raw_spin_lock_init(&msgr->lock); diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c index 4695c04320ae..34246c8e01c2 100644 --- a/arch/powerpc/sysdev/mpic_msi.c +++ b/arch/powerpc/sysdev/mpic_msi.c @@ -4,10 +4,11 @@ */ #include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/of_irq.h> #include <linux/bitmap.h> #include <linux/msi.h> #include <asm/mpic.h> -#include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <asm/msi_bitmap.h> @@ -24,7 +25,7 @@ void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq) } #ifdef CONFIG_MPIC_U3_HT_IRQS -static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) +static int __init mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) { irq_hw_number_t hwirq; const struct irq_domain_ops *ops = mpic->irqhost->ops; @@ -37,7 +38,7 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) /* Reserve source numbers we know are reserved in the HW. * * This is a bit of a mix of U3 and U4 reserves but that's going - * to work fine, we have plenty enugh numbers left so let's just + * to work fine, we have plenty enough numbers left so let's just * mark anything we don't like reserved. */ for (i = 0; i < 8; i++) @@ -68,13 +69,13 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) return 0; } #else -static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) +static int __init mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) { return -1; } #endif -int mpic_msi_init_allocator(struct mpic *mpic) +int __init mpic_msi_init_allocator(struct mpic *mpic) { int rc; diff --git a/arch/powerpc/sysdev/mpic_timer.c b/arch/powerpc/sysdev/mpic_timer.c index a42a20280035..60f5b3934b51 100644 --- a/arch/powerpc/sysdev/mpic_timer.c +++ b/arch/powerpc/sysdev/mpic_timer.c @@ -16,7 +16,6 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/syscore_ops.h> #include <sysdev/fsl_soc.h> @@ -255,7 +254,7 @@ EXPORT_SYMBOL(mpic_start_timer); /** * mpic_stop_timer - stop hardware timer - * @handle: the timer to be stoped + * @handle: the timer to be stopped * * The timer periodically generates an interrupt. Unless user stops the timer. */ @@ -384,7 +383,7 @@ struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev, } EXPORT_SYMBOL(mpic_request_timer); -static int timer_group_get_freq(struct device_node *np, +static int __init timer_group_get_freq(struct device_node *np, struct timer_group_priv *priv) { u32 div; @@ -411,7 +410,7 @@ static int timer_group_get_freq(struct device_node *np, return 0; } -static int timer_group_get_irq(struct device_node *np, +static int __init timer_group_get_irq(struct device_node *np, struct timer_group_priv *priv) { const u32 all_timer[] = { 0, TIMERS_PER_GROUP }; @@ -459,7 +458,7 @@ static int timer_group_get_irq(struct device_node *np, return 0; } -static void timer_group_init(struct device_node *np) +static void __init timer_group_init(struct device_node *np) { struct timer_group_priv *priv; unsigned int i = 0; @@ -520,7 +519,7 @@ out: kfree(priv); } -static void mpic_timer_resume(void) +static void mpic_timer_resume(void *data) { struct timer_group_priv *priv; @@ -536,10 +535,14 @@ static const struct of_device_id mpic_timer_ids[] = { {}, }; -static struct syscore_ops mpic_timer_syscore_ops = { +static const struct syscore_ops mpic_timer_syscore_ops = { .resume = mpic_timer_resume, }; +static struct syscore mpic_timer_syscore = { + .ops = &mpic_timer_syscore_ops, +}; + static int __init mpic_timer_init(void) { struct device_node *np = NULL; @@ -547,7 +550,7 @@ static int __init mpic_timer_init(void) for_each_matching_node(np, mpic_timer_ids) timer_group_init(np); - register_syscore_ops(&mpic_timer_syscore_ops); + register_syscore(&mpic_timer_syscore); if (list_empty(&timer_group_list)) return -ENODEV; diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c index 3861023d378a..492cb03c0b62 100644 --- a/arch/powerpc/sysdev/mpic_u3msi.c +++ b/arch/powerpc/sysdev/mpic_u3msi.c @@ -5,9 +5,9 @@ */ #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/msi.h> #include <asm/mpic.h> -#include <asm/prom.h> #include <asm/hw_irq.h> #include <asm/ppc-pci.h> #include <asm/msi_bitmap.h> @@ -78,7 +78,7 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq) /* U4 PCIe MSIs need to write to the special register in * the bridge that generates interrupts. There should be - * theorically a register at 0xf8005000 where you just write + * theoretically a register at 0xf8005000 where you just write * the MSI number and that triggers the right interrupt, but * unfortunately, this is busted in HW, the bridge endian swaps * the value and hits the wrong nibble in the register. @@ -104,17 +104,13 @@ static void u3msi_teardown_msi_irqs(struct pci_dev *pdev) struct msi_desc *entry; irq_hw_number_t hwirq; - for_each_pci_msi_entry(entry, pdev) { - if (!entry->irq) - continue; - + msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) { hwirq = virq_to_hw(entry->irq); irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); + entry->irq = 0; msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1); } - - return; } static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) @@ -136,7 +132,7 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) return -ENXIO; } - for_each_pci_msi_entry(entry, pdev) { + msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) { hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1); if (hwirq < 0) { pr_debug("u3msi: failed allocating hwirq\n"); @@ -174,7 +170,7 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) return 0; } -int mpic_u3msi_init(struct mpic *mpic) +int __init mpic_u3msi_init(struct mpic *mpic) { int rc; struct pci_controller *phb; diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index fdd3e17150fc..456a4f64ae0a 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -8,6 +8,7 @@ #include <linux/kmemleak.h> #include <linux/bitmap.h> #include <linux/memblock.h> +#include <linux/of.h> #include <asm/msi_bitmap.h> #include <asm/setup.h> @@ -123,10 +124,7 @@ int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, if (bmp->bitmap_from_slab) bmp->bitmap = kzalloc(size, GFP_KERNEL); else { - bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES); - if (!bmp->bitmap) - panic("%s: Failed to allocate %u bytes\n", __func__, - size); + bmp->bitmap = memblock_alloc_or_panic(size, SMP_CACHE_BYTES); /* the bitmap won't be freed from memblock allocator */ kmemleak_not_leak(bmp->bitmap); } diff --git a/arch/powerpc/sysdev/of_rtc.c b/arch/powerpc/sysdev/of_rtc.c index 1f408d34a6a7..2211937d3788 100644 --- a/arch/powerpc/sysdev/of_rtc.c +++ b/arch/powerpc/sysdev/of_rtc.c @@ -5,12 +5,14 @@ * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation. */ #include <linux/kernel.h> -#include <linux/of.h> #include <linux/init.h> +#include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/slab.h> +#include <asm/prom.h> + static __initdata struct { const char *compatible; char *plat_name; diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c deleted file mode 100644 index 9c8744e09a9c..000000000000 --- a/arch/powerpc/sysdev/pmi.c +++ /dev/null @@ -1,268 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * pmi driver - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 - * - * PMI (Platform Management Interrupt) is a way to communicate - * with the BMC (Baseboard Management Controller) via interrupts. - * Unlike IPMI it is bidirectional and has a low latency. - * - * Author: Christian Krafft <krafft@de.ibm.com> - */ - -#include <linux/interrupt.h> -#include <linux/slab.h> -#include <linux/completion.h> -#include <linux/spinlock.h> -#include <linux/module.h> -#include <linux/workqueue.h> -#include <linux/of_device.h> -#include <linux/of_platform.h> - -#include <asm/io.h> -#include <asm/pmi.h> -#include <asm/prom.h> - -struct pmi_data { - struct list_head handler; - spinlock_t handler_spinlock; - spinlock_t pmi_spinlock; - struct mutex msg_mutex; - pmi_message_t msg; - struct completion *completion; - struct platform_device *dev; - int irq; - u8 __iomem *pmi_reg; - struct work_struct work; -}; - -static struct pmi_data *data; - -static irqreturn_t pmi_irq_handler(int irq, void *dev_id) -{ - u8 type; - int rc; - - spin_lock(&data->pmi_spinlock); - - type = ioread8(data->pmi_reg + PMI_READ_TYPE); - pr_debug("pmi: got message of type %d\n", type); - - if (type & PMI_ACK && !data->completion) { - printk(KERN_WARNING "pmi: got unexpected ACK message.\n"); - rc = -EIO; - goto unlock; - } - - if (data->completion && !(type & PMI_ACK)) { - printk(KERN_WARNING "pmi: expected ACK, but got %d\n", type); - rc = -EIO; - goto unlock; - } - - data->msg.type = type; - data->msg.data0 = ioread8(data->pmi_reg + PMI_READ_DATA0); - data->msg.data1 = ioread8(data->pmi_reg + PMI_READ_DATA1); - data->msg.data2 = ioread8(data->pmi_reg + PMI_READ_DATA2); - rc = 0; -unlock: - spin_unlock(&data->pmi_spinlock); - - if (rc == -EIO) { - rc = IRQ_HANDLED; - goto out; - } - - if (data->msg.type & PMI_ACK) { - complete(data->completion); - rc = IRQ_HANDLED; - goto out; - } - - schedule_work(&data->work); - - rc = IRQ_HANDLED; -out: - return rc; -} - - -static const struct of_device_id pmi_match[] = { - { .type = "ibm,pmi", .name = "ibm,pmi" }, - { .type = "ibm,pmi" }, - {}, -}; - -MODULE_DEVICE_TABLE(of, pmi_match); - -static void pmi_notify_handlers(struct work_struct *work) -{ - struct pmi_handler *handler; - - spin_lock(&data->handler_spinlock); - list_for_each_entry(handler, &data->handler, node) { - pr_debug("pmi: notifying handler %p\n", handler); - if (handler->type == data->msg.type) - handler->handle_pmi_message(data->msg); - } - spin_unlock(&data->handler_spinlock); -} - -static int pmi_of_probe(struct platform_device *dev) -{ - struct device_node *np = dev->dev.of_node; - int rc; - - if (data) { - printk(KERN_ERR "pmi: driver has already been initialized.\n"); - rc = -EBUSY; - goto out; - } - - data = kzalloc(sizeof(struct pmi_data), GFP_KERNEL); - if (!data) { - printk(KERN_ERR "pmi: could not allocate memory.\n"); - rc = -ENOMEM; - goto out; - } - - data->pmi_reg = of_iomap(np, 0); - if (!data->pmi_reg) { - printk(KERN_ERR "pmi: invalid register address.\n"); - rc = -EFAULT; - goto error_cleanup_data; - } - - INIT_LIST_HEAD(&data->handler); - - mutex_init(&data->msg_mutex); - spin_lock_init(&data->pmi_spinlock); - spin_lock_init(&data->handler_spinlock); - - INIT_WORK(&data->work, pmi_notify_handlers); - - data->dev = dev; - - data->irq = irq_of_parse_and_map(np, 0); - if (!data->irq) { - printk(KERN_ERR "pmi: invalid interrupt.\n"); - rc = -EFAULT; - goto error_cleanup_iomap; - } - - rc = request_irq(data->irq, pmi_irq_handler, 0, "pmi", NULL); - if (rc) { - printk(KERN_ERR "pmi: can't request IRQ %d: returned %d\n", - data->irq, rc); - goto error_cleanup_iomap; - } - - printk(KERN_INFO "pmi: found pmi device at addr %p.\n", data->pmi_reg); - - goto out; - -error_cleanup_iomap: - iounmap(data->pmi_reg); - -error_cleanup_data: - kfree(data); - -out: - return rc; -} - -static int pmi_of_remove(struct platform_device *dev) -{ - struct pmi_handler *handler, *tmp; - - free_irq(data->irq, NULL); - iounmap(data->pmi_reg); - - spin_lock(&data->handler_spinlock); - - list_for_each_entry_safe(handler, tmp, &data->handler, node) - list_del(&handler->node); - - spin_unlock(&data->handler_spinlock); - - kfree(data); - data = NULL; - - return 0; -} - -static struct platform_driver pmi_of_platform_driver = { - .probe = pmi_of_probe, - .remove = pmi_of_remove, - .driver = { - .name = "pmi", - .of_match_table = pmi_match, - }, -}; -module_platform_driver(pmi_of_platform_driver); - -int pmi_send_message(pmi_message_t msg) -{ - unsigned long flags; - DECLARE_COMPLETION_ONSTACK(completion); - - if (!data) - return -ENODEV; - - mutex_lock(&data->msg_mutex); - - data->msg = msg; - pr_debug("pmi_send_message: msg is %08x\n", *(u32*)&msg); - - data->completion = &completion; - - spin_lock_irqsave(&data->pmi_spinlock, flags); - iowrite8(msg.data0, data->pmi_reg + PMI_WRITE_DATA0); - iowrite8(msg.data1, data->pmi_reg + PMI_WRITE_DATA1); - iowrite8(msg.data2, data->pmi_reg + PMI_WRITE_DATA2); - iowrite8(msg.type, data->pmi_reg + PMI_WRITE_TYPE); - spin_unlock_irqrestore(&data->pmi_spinlock, flags); - - pr_debug("pmi_send_message: wait for completion\n"); - - wait_for_completion_interruptible_timeout(data->completion, - PMI_TIMEOUT); - - data->completion = NULL; - - mutex_unlock(&data->msg_mutex); - - return 0; -} -EXPORT_SYMBOL_GPL(pmi_send_message); - -int pmi_register_handler(struct pmi_handler *handler) -{ - if (!data) - return -ENODEV; - - spin_lock(&data->handler_spinlock); - list_add_tail(&handler->node, &data->handler); - spin_unlock(&data->handler_spinlock); - - return 0; -} -EXPORT_SYMBOL_GPL(pmi_register_handler); - -void pmi_unregister_handler(struct pmi_handler *handler) -{ - if (!data) - return; - - pr_debug("pmi: unregistering handler %p\n", handler); - - spin_lock(&data->handler_spinlock); - list_del(&handler->node); - spin_unlock(&data->handler_spinlock); -} -EXPORT_SYMBOL_GPL(pmi_unregister_handler); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); -MODULE_DESCRIPTION("IBM Platform Management Interrupt driver"); diff --git a/arch/powerpc/sysdev/rtc_cmos_setup.c b/arch/powerpc/sysdev/rtc_cmos_setup.c index af0f9beddca9..9a232ae5e360 100644 --- a/arch/powerpc/sysdev/rtc_cmos_setup.c +++ b/arch/powerpc/sysdev/rtc_cmos_setup.c @@ -14,8 +14,8 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/mc146818rtc.h> +#include <linux/of_address.h> -#include <asm/prom.h> static int __init add_rtc(void) { @@ -66,4 +66,5 @@ static int __init add_rtc(void) } fs_initcall(add_rtc); +MODULE_DESCRIPTION("PPC RTC CMOS driver"); MODULE_LICENSE("GPL"); diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c index 4c4a6efd5e5f..db520c40cb6f 100644 --- a/arch/powerpc/sysdev/tsi108_dev.c +++ b/arch/powerpc/sysdev/tsi108_dev.c @@ -16,13 +16,14 @@ #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/of_net.h> #include <asm/tsi108.h> #include <linux/atomic.h> #include <asm/io.h> #include <asm/irq.h> -#include <asm/prom.h> #include <mm/mmu_decl.h> #undef DEBUG @@ -44,20 +45,19 @@ phys_addr_t get_csrbase(void) tsi = of_find_node_by_type(NULL, "tsi-bridge"); if (tsi) { - unsigned int size; - const void *prop = of_get_property(tsi, "reg", &size); - tsi108_csr_base = of_translate_address(tsi, prop); + struct resource res; + of_address_to_resource(tsi, 0, &res); + tsi108_csr_base = res.start; of_node_put(tsi); } return tsi108_csr_base; } +EXPORT_SYMBOL(get_csrbase); u32 get_vir_csrbase(void) { return (u32) (ioremap(get_csrbase(), 0x10000)); } - -EXPORT_SYMBOL(get_csrbase); EXPORT_SYMBOL(get_vir_csrbase); static int __init tsi108_eth_of_init(void) @@ -132,7 +132,7 @@ static int __init tsi108_eth_of_init(void) * driver itself to phylib and use a non-misleading * name for the workaround flag - it's not actually to * do with the model of PHY in use */ - if (of_get_property(phy, "txc-rxc-delay-disable", NULL)) + if (of_property_read_bool(phy, "txc-rxc-delay-disable")) tsi_eth_data.phy_type = TSI108_PHY_BCM54XX; of_node_put(phy); diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c index 042bb38fa5c2..07d0f6a83879 100644 --- a/arch/powerpc/sysdev/tsi108_pci.c +++ b/arch/powerpc/sysdev/tsi108_pci.c @@ -12,7 +12,9 @@ #include <linux/init.h> #include <linux/pci.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/interrupt.h> +#include <linux/of_address.h> #include <asm/byteorder.h> #include <asm/io.h> @@ -23,7 +25,6 @@ #include <asm/tsi108.h> #include <asm/tsi108_pci.h> #include <asm/tsi108_irq.h> -#include <asm/prom.h> #undef DEBUG #ifdef DEBUG @@ -216,9 +217,8 @@ int __init tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary) (hose)->ops = &tsi108_direct_pci_ops; - printk(KERN_INFO "Found tsi108 PCI host bridge at 0x%08x. " - "Firmware bus number: %d->%d\n", - rsrc.start, hose->first_busno, hose->last_busno); + pr_info("Found tsi108 PCI host bridge at 0x%pa. Firmware bus number: %d->%d\n", + &rsrc.start, hose->first_busno, hose->last_busno); /* Interpret the "ranges" property */ /* This also maps the I/O region and sets isa_io/mem_base */ @@ -257,7 +257,7 @@ static void tsi108_pci_int_unmask(u_int irq) mb(); } -static void init_pci_source(void) +static void __init init_pci_source(void) { tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL, 0x0000ff00); @@ -404,8 +404,8 @@ void __init tsi108_pci_int_init(struct device_node *node) { DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); - pci_irq_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0, - &pci_irq_domain_ops, NULL); + pci_irq_host = irq_domain_create_legacy(of_fwnode_handle(node), NR_IRQS_LEGACY, 0, 0, + &pci_irq_domain_ops, NULL); if (pci_irq_host == NULL) { printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n"); return; diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c index d38bbeed219b..4de57ba52236 100644 --- a/arch/powerpc/sysdev/udbg_memcons.c +++ b/arch/powerpc/sysdev/udbg_memcons.c @@ -41,7 +41,7 @@ struct memcons memcons = { .input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE], }; -void memcons_putc(char c) +static void memcons_putc(char c) { char *new_output_pos; @@ -54,7 +54,7 @@ void memcons_putc(char c) memcons.output_pos = new_output_pos; } -int memcons_getc_poll(void) +static int memcons_getc_poll(void) { char c; char *new_input_pos; @@ -77,7 +77,7 @@ int memcons_getc_poll(void) return -1; } -int memcons_getc(void) +static int memcons_getc(void) { int c; @@ -92,7 +92,7 @@ int memcons_getc(void) return c; } -void udbg_init_memcons(void) +void __init udbg_init_memcons(void) { udbg_putc = memcons_putc; udbg_getc = memcons_getc; diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c index 6765d9e264a3..cf8db19a4f7d 100644 --- a/arch/powerpc/sysdev/xics/icp-hv.c +++ b/arch/powerpc/sysdev/xics/icp-hv.c @@ -162,7 +162,7 @@ static const struct icp_ops icp_hv_ops = { #endif }; -int icp_hv_init(void) +int __init icp_hv_init(void) { struct device_node *np; diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c index 7d13d2ef5a90..4e89158a577c 100644 --- a/arch/powerpc/sysdev/xics/icp-native.c +++ b/arch/powerpc/sysdev/xics/icp-native.c @@ -6,15 +6,16 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/spinlock.h> #include <linux/module.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/irq.h> @@ -144,27 +145,6 @@ static void icp_native_cause_ipi(int cpu) icp_native_set_qirr(cpu, IPI_PRIORITY); } -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE -void icp_native_cause_ipi_rm(int cpu) -{ - /* - * Currently not used to send IPIs to another CPU - * on the same core. Only caller is KVM real mode. - * Need the physical address of the XICS to be - * previously saved in kvm_hstate in the paca. - */ - void __iomem *xics_phys; - - /* - * Just like the cause_ipi functions, it is required to - * include a full barrier before causing the IPI. - */ - xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys; - mb(); - __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); -} -#endif - /* * Called when an interrupt is received on an off-line CPU to * clear the interrupt, so that the CPU can go back to nap mode. @@ -235,6 +215,8 @@ static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr, rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation", cpu, hw_id); + if (!rname) + return -ENOMEM; if (!request_mem_region(addr, size, rname)) { pr_warn("icp_native: Could not reserve ICP MMIO for CPU %d, interrupt server #0x%x\n", cpu, hw_id); @@ -258,7 +240,7 @@ static int __init icp_native_init_one_node(struct device_node *np, unsigned int ilen; const __be32 *ireg; int i; - int reg_tuple_size; + int num_reg; int num_servers = 0; /* This code does the theorically broken assumption that the interrupt @@ -279,21 +261,14 @@ static int __init icp_native_init_one_node(struct device_node *np, num_servers = of_read_number(ireg + 1, 1); } - ireg = of_get_property(np, "reg", &ilen); - if (!ireg) { - pr_err("icp_native: Can't find interrupt reg property"); - return -1; - } - - reg_tuple_size = (of_n_addr_cells(np) + of_n_size_cells(np)) * 4; - if (((ilen % reg_tuple_size) != 0) - || (num_servers && (num_servers != (ilen / reg_tuple_size)))) { + num_reg = of_address_count(np); + if (num_servers && (num_servers != num_reg)) { pr_err("icp_native: ICP reg len (%d) != num servers (%d)", - ilen / reg_tuple_size, num_servers); + num_reg, num_servers); return -1; } - for (i = 0; i < (ilen / reg_tuple_size); i++) { + for (i = 0; i < num_reg; i++) { struct resource r; int err; diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index 675d708863d5..4dae624b9f2f 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c @@ -184,7 +184,7 @@ static const struct icp_ops icp_opal_ops = { #endif }; -int icp_opal_init(void) +int __init icp_opal_init(void) { struct device_node *np; @@ -196,6 +196,7 @@ int icp_opal_init(void) printk("XICS: Using OPAL ICP fallbacks\n"); + of_node_put(np); return 0; } diff --git a/arch/powerpc/sysdev/xics/ics-native.c b/arch/powerpc/sysdev/xics/ics-native.c index d450502f4053..112c8a1e8159 100644 --- a/arch/powerpc/sysdev/xics/ics-native.c +++ b/arch/powerpc/sysdev/xics/ics-native.c @@ -15,11 +15,11 @@ #include <linux/init.h> #include <linux/cpu.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/spinlock.h> #include <linux/msi.h> #include <linux/list.h> -#include <asm/prom.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/irq.h> @@ -131,19 +131,15 @@ static struct irq_chip ics_native_irq_chip = { .irq_retrigger = xics_retrigger, }; -static int ics_native_map(struct ics *ics, unsigned int virq) +static int ics_native_check(struct ics *ics, unsigned int hw_irq) { - unsigned int vec = (unsigned int)virq_to_hw(virq); struct ics_native *in = to_ics_native(ics); - pr_devel("%s: vec=0x%x\n", __func__, vec); + pr_devel("%s: hw_irq=0x%x\n", __func__, hw_irq); - if (vec < in->ibase || vec >= (in->ibase + in->icount)) + if (hw_irq < in->ibase || hw_irq >= (in->ibase + in->icount)) return -EINVAL; - irq_set_chip_and_handler(virq, &ics_native_irq_chip, handle_fasteoi_irq); - irq_set_chip_data(virq, ics); - return 0; } @@ -177,10 +173,11 @@ static int ics_native_host_match(struct ics *ics, struct device_node *node) } static struct ics ics_native_template = { - .map = ics_native_map, + .check = ics_native_check, .mask_unknown = ics_native_mask_unknown, .get_server = ics_native_get_server, .host_match = ics_native_host_match, + .chip = &ics_native_irq_chip, }; static int __init ics_native_add_one(struct device_node *np) diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c index 823f6c9664cd..5fe73dabab79 100644 --- a/arch/powerpc/sysdev/xics/ics-opal.c +++ b/arch/powerpc/sysdev/xics/ics-opal.c @@ -18,7 +18,6 @@ #include <linux/spinlock.h> #include <linux/msi.h> -#include <asm/prom.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/irq.h> @@ -62,17 +61,6 @@ static void ics_opal_unmask_irq(struct irq_data *d) static unsigned int ics_opal_startup(struct irq_data *d) { -#ifdef CONFIG_PCI_MSI - /* - * The generic MSI code returns with the interrupt disabled on the - * card, using the MSI mask bits. Firmware doesn't appear to unmask - * at that level, so we do it here by hand. - */ - if (irq_data_get_msi_desc(d)) - pci_msi_unmask_irq(d); -#endif - - /* unmask it */ ics_opal_unmask_irq(d); return 0; } @@ -123,7 +111,6 @@ static int ics_opal_set_affinity(struct irq_data *d, __func__, d->irq, hw_irq, rc); return -1; } - server = be16_to_cpu(oserver); wanted_server = xics_get_irq_server(d->irq, cpumask, 1); if (wanted_server < 0) { @@ -133,7 +120,7 @@ static int ics_opal_set_affinity(struct irq_data *d, } server = ics_opal_mangle_server(wanted_server); - pr_devel("ics-hal: set-affinity irq %d [hw 0x%x] server: 0x%x/0x%x\n", + pr_debug("ics-hal: set-affinity irq %d [hw 0x%x] server: 0x%x/0x%x\n", d->irq, hw_irq, wanted_server, server); rc = opal_set_xive(hw_irq, server, priority); @@ -157,26 +144,13 @@ static struct irq_chip ics_opal_irq_chip = { .irq_retrigger = xics_retrigger, }; -static int ics_opal_map(struct ics *ics, unsigned int virq); -static void ics_opal_mask_unknown(struct ics *ics, unsigned long vec); -static long ics_opal_get_server(struct ics *ics, unsigned long vec); - static int ics_opal_host_match(struct ics *ics, struct device_node *node) { return 1; } -/* Only one global & state struct ics */ -static struct ics ics_hal = { - .map = ics_opal_map, - .mask_unknown = ics_opal_mask_unknown, - .get_server = ics_opal_get_server, - .host_match = ics_opal_host_match, -}; - -static int ics_opal_map(struct ics *ics, unsigned int virq) +static int ics_opal_check(struct ics *ics, unsigned int hw_irq) { - unsigned int hw_irq = (unsigned int)virq_to_hw(virq); int64_t rc; __be16 server; int8_t priority; @@ -189,9 +163,6 @@ static int ics_opal_map(struct ics *ics, unsigned int virq) if (rc != OPAL_SUCCESS) return -ENXIO; - irq_set_chip_and_handler(virq, &ics_opal_irq_chip, handle_fasteoi_irq); - irq_set_chip_data(virq, &ics_hal); - return 0; } @@ -222,6 +193,15 @@ static long ics_opal_get_server(struct ics *ics, unsigned long vec) return ics_opal_unmangle_server(be16_to_cpu(server)); } +/* Only one global & state struct ics */ +static struct ics ics_hal = { + .check = ics_opal_check, + .mask_unknown = ics_opal_mask_unknown, + .get_server = ics_opal_get_server, + .host_match = ics_opal_host_match, + .chip = &ics_opal_irq_chip, +}; + int __init ics_opal_init(void) { if (!firmware_has_feature(FW_FEATURE_OPAL)) diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c index 4cf18000f07c..b772a833d9b7 100644 --- a/arch/powerpc/sysdev/xics/ics-rtas.c +++ b/arch/powerpc/sysdev/xics/ics-rtas.c @@ -10,7 +10,6 @@ #include <linux/spinlock.h> #include <linux/msi.h> -#include <asm/prom.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/irq.h> @@ -24,19 +23,6 @@ static int ibm_set_xive; static int ibm_int_on; static int ibm_int_off; -static int ics_rtas_map(struct ics *ics, unsigned int virq); -static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec); -static long ics_rtas_get_server(struct ics *ics, unsigned long vec); -static int ics_rtas_host_match(struct ics *ics, struct device_node *node); - -/* Only one global & state struct ics */ -static struct ics ics_rtas = { - .map = ics_rtas_map, - .mask_unknown = ics_rtas_mask_unknown, - .get_server = ics_rtas_get_server, - .host_match = ics_rtas_host_match, -}; - static void ics_rtas_unmask_irq(struct irq_data *d) { unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); @@ -50,8 +36,8 @@ static void ics_rtas_unmask_irq(struct irq_data *d) server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0); - call_status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, hw_irq, - server, DEFAULT_PRIORITY); + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, + DEFAULT_PRIORITY); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive irq %u server %x returned %d\n", @@ -60,7 +46,7 @@ static void ics_rtas_unmask_irq(struct irq_data *d) } /* Now unmask the interrupt (often a no-op) */ - call_status = rtas_call_reentrant(ibm_int_on, 1, 1, NULL, hw_irq); + call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", __func__, hw_irq, call_status); @@ -70,15 +56,6 @@ static void ics_rtas_unmask_irq(struct irq_data *d) static unsigned int ics_rtas_startup(struct irq_data *d) { -#ifdef CONFIG_PCI_MSI - /* - * The generic MSI code returns with the interrupt disabled on the - * card, using the MSI mask bits. Firmware doesn't appear to unmask - * at that level, so we do it here by hand. - */ - if (irq_data_get_msi_desc(d)) - pci_msi_unmask_irq(d); -#endif /* unmask it */ ics_rtas_unmask_irq(d); return 0; @@ -91,7 +68,7 @@ static void ics_rtas_mask_real_irq(unsigned int hw_irq) if (hw_irq == XICS_IPI) return; - call_status = rtas_call_reentrant(ibm_int_off, 1, 1, NULL, hw_irq); + call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", __func__, hw_irq, call_status); @@ -99,8 +76,8 @@ static void ics_rtas_mask_real_irq(unsigned int hw_irq) } /* Have to set XIVE to 0xff to be able to remove a slot */ - call_status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, hw_irq, - xics_default_server, 0xff); + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, + xics_default_server, 0xff); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", __func__, hw_irq, call_status); @@ -131,7 +108,7 @@ static int ics_rtas_set_affinity(struct irq_data *d, if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) return -1; - status = rtas_call_reentrant(ibm_get_xive, 1, 3, xics_status, hw_irq); + status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq); if (status) { printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", @@ -146,8 +123,11 @@ static int ics_rtas_set_affinity(struct irq_data *d, return -1; } - status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, - hw_irq, irq_server, xics_status[1]); + pr_debug("%s: irq %d [hw 0x%x] server: 0x%x\n", __func__, d->irq, + hw_irq, irq_server); + + status = rtas_call(ibm_set_xive, 3, 1, NULL, + hw_irq, irq_server, xics_status[1]); if (status) { printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", @@ -169,9 +149,8 @@ static struct irq_chip ics_rtas_irq_chip = { .irq_retrigger = xics_retrigger, }; -static int ics_rtas_map(struct ics *ics, unsigned int virq) +static int ics_rtas_check(struct ics *ics, unsigned int hw_irq) { - unsigned int hw_irq = (unsigned int)virq_to_hw(virq); int status[2]; int rc; @@ -179,13 +158,10 @@ static int ics_rtas_map(struct ics *ics, unsigned int virq) return -EINVAL; /* Check if RTAS knows about this interrupt */ - rc = rtas_call_reentrant(ibm_get_xive, 1, 3, status, hw_irq); + rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq); if (rc) return -ENXIO; - irq_set_chip_and_handler(virq, &ics_rtas_irq_chip, handle_fasteoi_irq); - irq_set_chip_data(virq, &ics_rtas); - return 0; } @@ -198,7 +174,7 @@ static long ics_rtas_get_server(struct ics *ics, unsigned long vec) { int rc, status[2]; - rc = rtas_call_reentrant(ibm_get_xive, 1, 3, status, vec); + rc = rtas_call(ibm_get_xive, 1, 3, status, vec); if (rc) return -1; return status[0]; @@ -213,12 +189,21 @@ static int ics_rtas_host_match(struct ics *ics, struct device_node *node) return !of_device_is_compatible(node, "chrp,iic"); } +/* Only one global & state struct ics */ +static struct ics ics_rtas = { + .check = ics_rtas_check, + .mask_unknown = ics_rtas_mask_unknown, + .get_server = ics_rtas_get_server, + .host_match = ics_rtas_host_match, + .chip = &ics_rtas_irq_chip, +}; + __init int ics_rtas_init(void) { - ibm_get_xive = rtas_token("ibm,get-xive"); - ibm_set_xive = rtas_token("ibm,set-xive"); - ibm_int_on = rtas_token("ibm,int-on"); - ibm_int_off = rtas_token("ibm,int-off"); + ibm_get_xive = rtas_function_token(RTAS_FN_IBM_GET_XIVE); + ibm_set_xive = rtas_function_token(RTAS_FN_IBM_SET_XIVE); + ibm_int_on = rtas_function_token(RTAS_FN_IBM_INT_ON); + ibm_int_off = rtas_function_token(RTAS_FN_IBM_INT_OFF); /* We enable the RTAS "ICS" if RTAS is present with the * appropriate tokens diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index b14c502e56a8..c3fa539a9898 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -6,6 +6,7 @@ #include <linux/threads.h> #include <linux/kernel.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/debugfs.h> #include <linux/smp.h> #include <linux/interrupt.h> @@ -17,7 +18,6 @@ #include <linux/spinlock.h> #include <linux/delay.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/machdep.h> @@ -38,7 +38,7 @@ DEFINE_PER_CPU(struct xics_cppr, xics_cppr); struct irq_domain *xics_host; -static LIST_HEAD(ics_list); +static struct ics *xics_ics; void xics_update_irq_servers(void) { @@ -111,18 +111,17 @@ void xics_setup_cpu(void) void xics_mask_unknown_vec(unsigned int vec) { - struct ics *ics; - pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec); - list_for_each_entry(ics, &ics_list, link) - ics->mask_unknown(ics, vec); + if (WARN_ON(!xics_ics)) + return; + xics_ics->mask_unknown(xics_ics, vec); } #ifdef CONFIG_SMP -static void xics_request_ipi(void) +static void __init xics_request_ipi(void) { unsigned int ipi; @@ -133,7 +132,7 @@ static void xics_request_ipi(void) * IPIs are marked IRQF_PERCPU. The handler was set in map. */ BUG_ON(request_irq(ipi, icp_ops->ipi_action, - IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL)); + IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL)); } void __init xics_smp_probe(void) @@ -147,7 +146,7 @@ void __init xics_smp_probe(void) #endif /* CONFIG_SMP */ -void xics_teardown_cpu(void) +noinstr void xics_teardown_cpu(void) { struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr); @@ -160,7 +159,7 @@ void xics_teardown_cpu(void) icp_ops->teardown_cpu(); } -void xics_kexec_teardown_cpu(int secondary) +noinstr void xics_kexec_teardown_cpu(int secondary) { xics_teardown_cpu(); @@ -184,6 +183,8 @@ void xics_migrate_irqs_away(void) unsigned int irq, virq; struct irq_desc *desc; + pr_debug("%s: CPU %u\n", __func__, cpu); + /* If we used to be the default server, move to the new "boot_cpuid" */ if (hw_cpu == xics_default_server) xics_update_irq_servers(); @@ -198,7 +199,7 @@ void xics_migrate_irqs_away(void) struct irq_chip *chip; long server; unsigned long flags; - struct ics *ics; + struct irq_data *irqd; /* We can't set affinity on ISA interrupts */ if (virq < NR_IRQS_LEGACY) @@ -206,9 +207,11 @@ void xics_migrate_irqs_away(void) /* We only need to migrate enabled IRQS */ if (!desc->action) continue; - if (desc->irq_data.domain != xics_host) + /* We need a mapping in the XICS IRQ domain */ + irqd = irq_domain_get_irq_data(xics_host, virq); + if (!irqd) continue; - irq = desc->irq_data.hwirq; + irq = irqd_to_hwirq(irqd); /* We need to get IPIs still. */ if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) continue; @@ -219,13 +222,10 @@ void xics_migrate_irqs_away(void) raw_spin_lock_irqsave(&desc->lock, flags); /* Locate interrupt server */ - server = -1; - ics = irq_desc_get_chip_data(desc); - if (ics) - server = ics->get_server(ics, irq); + server = xics_ics->get_server(xics_ics, irq); if (server < 0) { - printk(KERN_ERR "%s: Can't find server for irq %d\n", - __func__, irq); + pr_err("%s: Can't find server for irq %d/%x\n", + __func__, virq, irq); goto unlock; } @@ -307,13 +307,9 @@ int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, static int xics_host_match(struct irq_domain *h, struct device_node *node, enum irq_domain_bus_token bus_token) { - struct ics *ics; - - list_for_each_entry(ics, &ics_list, link) - if (ics->host_match(ics, node)) - return 1; - - return 0; + if (WARN_ON(!xics_ics)) + return 0; + return xics_ics->host_match(xics_ics, node) ? 1 : 0; } /* Dummies */ @@ -327,12 +323,10 @@ static struct irq_chip xics_ipi_chip = { .irq_unmask = xics_ipi_unmask, }; -static int xics_host_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) +static int xics_host_map(struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hwirq) { - struct ics *ics; - - pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); + pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hwirq); /* * Mark interrupts as edge sensitive by default so that resend @@ -342,18 +336,23 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq, irq_clear_status_flags(virq, IRQ_LEVEL); /* Don't call into ICS for IPIs */ - if (hw == XICS_IPI) { + if (hwirq == XICS_IPI) { irq_set_chip_and_handler(virq, &xics_ipi_chip, handle_percpu_irq); return 0; } - /* Let the ICS setup the chip data */ - list_for_each_entry(ics, &ics_list, link) - if (ics->map(ics, virq) == 0) - return 0; + if (WARN_ON(!xics_ics)) + return -EINVAL; + + if (xics_ics->check(xics_ics, hwirq)) + return -EINVAL; + + /* Let the ICS be the chip data for the XICS domain. For ICS native */ + irq_domain_set_info(domain, virq, hwirq, xics_ics->chip, + xics_ics, handle_fasteoi_irq, NULL, NULL); - return -EINVAL; + return 0; } static int xics_host_xlate(struct irq_domain *h, struct device_node *ct, @@ -412,22 +411,76 @@ int xics_retrigger(struct irq_data *data) return 0; } +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY +static int xics_host_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *hwirq, unsigned int *type) +{ + return xics_host_xlate(d, to_of_node(fwspec->fwnode), fwspec->param, + fwspec->param_count, hwirq, type); +} + +static int xics_host_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct irq_fwspec *fwspec = arg; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + int i, rc; + + rc = xics_host_domain_translate(domain, fwspec, &hwirq, &type); + if (rc) + return rc; + + pr_debug("%s %d/%lx #%d\n", __func__, virq, hwirq, nr_irqs); + + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, hwirq + i, xics_ics->chip, + xics_ics, handle_fasteoi_irq, NULL, NULL); + + return 0; +} + +static void xics_host_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + pr_debug("%s %d #%d\n", __func__, virq, nr_irqs); +} +#endif + static const struct irq_domain_ops xics_host_ops = { +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + .alloc = xics_host_domain_alloc, + .free = xics_host_domain_free, + .translate = xics_host_domain_translate, +#endif .match = xics_host_match, .map = xics_host_map, .xlate = xics_host_xlate, }; -static void __init xics_init_host(void) +static int __init xics_allocate_domain(void) { - xics_host = irq_domain_add_tree(NULL, &xics_host_ops, NULL); - BUG_ON(xics_host == NULL); - irq_set_default_host(xics_host); + struct fwnode_handle *fn; + + fn = irq_domain_alloc_named_fwnode("XICS"); + if (!fn) + return -ENOMEM; + + xics_host = irq_domain_create_tree(fn, &xics_host_ops, NULL); + if (!xics_host) { + irq_domain_free_fwnode(fn); + return -ENOMEM; + } + + irq_set_default_domain(xics_host); + return 0; } void __init xics_register_ics(struct ics *ics) { - list_add(&ics->link, &ics_list); + if (WARN_ONCE(xics_ics, "XICS: Source Controller is already defined !")) + return; + xics_ics = ics; } static void __init xics_get_server_size(void) @@ -484,6 +537,8 @@ void __init xics_init(void) /* Initialize common bits */ xics_get_server_size(); xics_update_irq_servers(); - xics_init_host(); + rc = xics_allocate_domain(); + if (rc < 0) + pr_err("XICS: Failed to create IRQ domain"); xics_setup_cpu(); } diff --git a/arch/powerpc/sysdev/xive/Kconfig b/arch/powerpc/sysdev/xive/Kconfig index 97796c6b63f0..785c292d104b 100644 --- a/arch/powerpc/sysdev/xive/Kconfig +++ b/arch/powerpc/sysdev/xive/Kconfig @@ -3,7 +3,6 @@ config PPC_XIVE bool select PPC_SMP_MUXED_IPI select HARDIRQS_SW_RESEND - select IRQ_DOMAIN_NOMAP config PPC_XIVE_NATIVE bool diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index dbdbbc2f1dc5..8d0123b0ae84 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -9,6 +9,7 @@ #include <linux/threads.h> #include <linux/kernel.h> #include <linux/irq.h> +#include <linux/irqdomain.h> #include <linux/debugfs.h> #include <linux/smp.h> #include <linux/interrupt.h> @@ -21,8 +22,6 @@ #include <linux/msi.h> #include <linux/vmalloc.h> -#include <asm/debugfs.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/machdep.h> @@ -67,6 +66,7 @@ static struct irq_domain *xive_irq_domain; static struct xive_ipi_desc { unsigned int irq; char name[16]; + atomic_t started; } *xive_ipis; /* @@ -85,6 +85,16 @@ static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu); #define XIVE_INVALID_TARGET (-1) /* + * Global toggle to switch on/off StoreEOI + */ +static bool xive_store_eoi = true; + +static bool xive_is_store_eoi(struct xive_irq_data *xd) +{ + return xd->flags & XIVE_IRQ_FLAG_STORE_EOI && xive_store_eoi; +} + +/* * Read the next entry in a queue, return its content if it's valid * or 0 if there is no new entry. * @@ -208,7 +218,7 @@ static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) { u64 val; - if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) + if (offset == XIVE_ESB_SET_PQ_10 && xive_is_store_eoi(xd)) offset |= XIVE_ESB_LD_ST_MO; if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) @@ -227,6 +237,21 @@ static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data) out_be64(xd->eoi_mmio + offset, data); } +#if defined(CONFIG_XMON) || defined(CONFIG_DEBUG_FS) +static void xive_irq_data_dump(struct xive_irq_data *xd, char *buffer, size_t size) +{ + u64 val = xive_esb_read(xd, XIVE_ESB_GET); + + snprintf(buffer, size, "flags=%c%c%c PQ=%c%c 0x%016llx 0x%016llx", + xive_is_store_eoi(xd) ? 'S' : ' ', + xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', + xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', + val & XIVE_ESB_VAL_P ? 'P' : '-', + val & XIVE_ESB_VAL_Q ? 'Q' : '-', + xd->trig_page, xd->eoi_page); +} +#endif + #ifdef CONFIG_XMON static notrace void xive_dump_eq(const char *name, struct xive_q *q) { @@ -252,11 +277,10 @@ notrace void xmon_xive_do_dump(int cpu) #ifdef CONFIG_SMP { - u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); + char buffer[128]; - xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi, - val & XIVE_ESB_VAL_P ? 'P' : '-', - val & XIVE_ESB_VAL_Q ? 'Q' : '-'); + xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer)); + xmon_printf("IPI=0x%08x %s", xc->hw_ipi, buffer); } #endif xive_dump_eq("EQ", &xc->queue[xive_irq_priority]); @@ -291,15 +315,11 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d) d = xive_get_irq_data(hw_irq); if (d) { - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); - u64 val = xive_esb_read(xd, XIVE_ESB_GET); - - xmon_printf("flags=%c%c%c PQ=%c%c", - xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', - xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', - xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', - val & XIVE_ESB_VAL_P ? 'P' : '-', - val & XIVE_ESB_VAL_Q ? 'Q' : '-'); + char buffer[128]; + + xive_irq_data_dump(irq_data_get_irq_chip_data(d), + buffer, sizeof(buffer)); + xmon_printf("%s", buffer); } xmon_printf("\n"); @@ -312,11 +332,10 @@ void xmon_xive_get_irq_all(void) struct irq_desc *desc; for_each_irq_desc(i, desc) { - struct irq_data *d = irq_desc_get_irq_data(desc); - unsigned int hwirq = (unsigned int)irqd_to_hwirq(d); + struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i); - if (d->domain == xive_irq_domain) - xmon_xive_get_irq_config(hwirq, d); + if (d) + xmon_xive_get_irq_config(irqd_to_hwirq(d), d); } } @@ -364,7 +383,7 @@ static unsigned int xive_get_irq(void) * CPU. * * If we find that there is indeed more in there, we call - * force_external_irq_replay() to make Linux synthetize an + * force_external_irq_replay() to make Linux synthesize an * external interrupt on the next call to local_irq_restore(). */ static void xive_do_queue_eoi(struct xive_cpu *xc) @@ -386,7 +405,7 @@ static void xive_do_source_eoi(struct xive_irq_data *xd) xd->stale_p = false; /* If the XIVE supports the new "store EOI facility, use it */ - if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) { + if (xive_is_store_eoi(xd)) { xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); return; } @@ -418,7 +437,7 @@ static void xive_do_source_eoi(struct xive_irq_data *xd) /* irq_chip eoi callback, called with irq descriptor lock held */ static void xive_irq_eoi(struct irq_data *d) { - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); struct xive_cpu *xc = __this_cpu_read(xive_cpu); DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", @@ -452,6 +471,8 @@ static void xive_do_source_set_mask(struct xive_irq_data *xd, { u64 val; + pr_debug("%s: HW 0x%x %smask\n", __func__, xd->hw_irq, mask ? "" : "un"); + /* * If the interrupt had P set, it may be in a queue. * @@ -574,7 +595,7 @@ static int xive_pick_irq_target(struct irq_data *d, const struct cpumask *affinity) { static unsigned int fuzz; - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); cpumask_var_t mask; int cpu = -1; @@ -607,24 +628,14 @@ static int xive_pick_irq_target(struct irq_data *d, static unsigned int xive_irq_startup(struct irq_data *d) { - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); int target, rc; xd->saved_p = false; xd->stale_p = false; - pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n", - d->irq, hw_irq, d); -#ifdef CONFIG_PCI_MSI - /* - * The generic MSI code returns with the interrupt disabled on the - * card, using the MSI mask bits. Firmware doesn't appear to unmask - * at that level, so we do it here by hand. - */ - if (irq_data_get_msi_desc(d)) - pci_msi_unmask_irq(d); -#endif + pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d); /* Pick a target */ target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d)); @@ -662,11 +673,10 @@ static unsigned int xive_irq_startup(struct irq_data *d) /* called with irq descriptor lock held */ static void xive_irq_shutdown(struct irq_data *d) { - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); - pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n", - d->irq, hw_irq, d); + pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d); if (WARN_ON(xd->target == XIVE_INVALID_TARGET)) return; @@ -688,18 +698,18 @@ static void xive_irq_shutdown(struct irq_data *d) static void xive_irq_unmask(struct irq_data *d) { - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); - pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd); + pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd); xive_do_source_set_mask(xd, false); } static void xive_irq_mask(struct irq_data *d) { - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); - pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd); + pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd); xive_do_source_set_mask(xd, true); } @@ -708,21 +718,17 @@ static int xive_irq_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) { - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); u32 target, old_target; int rc = 0; - pr_devel("xive_irq_set_affinity: irq %d\n", d->irq); + pr_debug("%s: irq %d/0x%x\n", __func__, d->irq, hw_irq); /* Is this valid ? */ - if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) + if (!cpumask_intersects(cpumask, cpu_online_mask)) return -EINVAL; - /* Don't do anything if the interrupt isn't started */ - if (!irqd_is_started(d)) - return IRQ_SET_MASK_OK; - /* * If existing target is already in the new mask, and is * online then do nothing. @@ -758,7 +764,7 @@ static int xive_irq_set_affinity(struct irq_data *d, return rc; } - pr_devel(" target: 0x%x\n", target); + pr_debug(" target: 0x%x\n", target); xd->target = target; /* Give up previous target */ @@ -770,14 +776,14 @@ static int xive_irq_set_affinity(struct irq_data *d, static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type) { - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); /* * We only support these. This has really no effect other than setting * the corresponding descriptor bits mind you but those will in turn * affect the resend function when re-enabling an edge interrupt. * - * Set set the default to edge as explained in map(). + * Set the default to edge as explained in map(). */ if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE) flow_type = IRQ_TYPE_EDGE_RISING; @@ -809,7 +815,7 @@ static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type) static int xive_irq_retrigger(struct irq_data *d) { - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); /* This should be only for MSIs */ if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) @@ -831,7 +837,7 @@ static int xive_irq_retrigger(struct irq_data *d) */ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) { - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); int rc; u8 pq; @@ -868,7 +874,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) * * This also tells us that it's in flight to a host queue * or has already been fetched but hasn't been EOIed yet - * by the host. This it's potentially using up a host + * by the host. Thus it's potentially using up a host * queue slot. This is important to know because as long * as this is the case, we must not hard-unmask it when * "returning" that interrupt to the host. @@ -945,7 +951,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) static int xive_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, bool *state) { - struct xive_irq_data *xd = irq_data_get_irq_handler_data(data); + struct xive_irq_data *xd = irq_data_get_irq_chip_data(data); u8 pq; switch (which) { @@ -960,7 +966,8 @@ static int xive_get_irqchip_state(struct irq_data *data, * interrupt to be inactive in that case. */ *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p && - (xd->saved_p || !!(pq & XIVE_ESB_VAL_P)); + (xd->saved_p || (!!(pq & XIVE_ESB_VAL_P) && + !irqd_irq_disabled(data))); return 0; default: return -EINVAL; @@ -989,6 +996,8 @@ EXPORT_SYMBOL_GPL(is_xive_irq); void xive_cleanup_irq_data(struct xive_irq_data *xd) { + pr_debug("%s for HW 0x%x\n", __func__, xd->hw_irq); + if (xd->eoi_mmio) { iounmap(xd->eoi_mmio); if (xd->eoi_mmio == xd->trig_mmio) @@ -1002,21 +1011,20 @@ void xive_cleanup_irq_data(struct xive_irq_data *xd) } EXPORT_SYMBOL_GPL(xive_cleanup_irq_data); -static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) +static struct xive_irq_data *xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) { struct xive_irq_data *xd; int rc; xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL); if (!xd) - return -ENOMEM; + return ERR_PTR(-ENOMEM); rc = xive_ops->populate_irq_data(hw, xd); if (rc) { kfree(xd); - return rc; + return ERR_PTR(rc); } xd->target = XIVE_INVALID_TARGET; - irq_set_handler_data(virq, xd); /* * Turn OFF by default the interrupt being mapped. A side @@ -1027,16 +1035,16 @@ static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) */ xive_esb_read(xd, XIVE_ESB_SET_PQ_01); - return 0; + return xd; } static void xive_irq_free_data(unsigned int virq) { - struct xive_irq_data *xd = irq_get_handler_data(virq); + struct xive_irq_data *xd = irq_get_chip_data(virq); if (!xd) return; - irq_set_handler_data(virq, NULL); + irq_set_chip_data(virq, NULL); xive_cleanup_irq_data(xd); kfree(xd); } @@ -1120,7 +1128,7 @@ static const struct irq_domain_ops xive_ipi_irq_domain_ops = { .alloc = xive_ipi_irq_domain_alloc, }; -static int __init xive_request_ipi(void) +static int __init xive_init_ipis(void) { struct fwnode_handle *fwnode; struct irq_domain *ipi_domain; @@ -1144,10 +1152,6 @@ static int __init xive_request_ipi(void) struct xive_ipi_desc *xid = &xive_ipis[node]; struct xive_ipi_alloc_info info = { node }; - /* Skip nodes without CPUs */ - if (cpumask_empty(cpumask_of_node(node))) - continue; - /* * Map one IPI interrupt per node for all cpus of that node. * Since the HW interrupt number doesn't have any meaning, @@ -1159,11 +1163,6 @@ static int __init xive_request_ipi(void) xid->irq = ret; snprintf(xid->name, sizeof(xid->name), "IPI-%d", node); - - ret = request_irq(xid->irq, xive_muxed_ipi_action, - IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL); - - WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret); } return ret; @@ -1178,6 +1177,22 @@ out: return ret; } +static int xive_request_ipi(unsigned int cpu) +{ + struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)]; + int ret; + + if (atomic_inc_return(&xid->started) > 1) + return 0; + + ret = request_irq(xid->irq, xive_muxed_ipi_action, + IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD, + xid->name, NULL); + + WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret); + return ret; +} + static int xive_setup_cpu_ipi(unsigned int cpu) { unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); @@ -1192,6 +1207,9 @@ static int xive_setup_cpu_ipi(unsigned int cpu) if (xc->hw_ipi != XIVE_BAD_IRQ) return 0; + /* Register the IPI */ + xive_request_ipi(cpu); + /* Grab an IPI from the backend, this will populate xc->hw_ipi */ if (xive_ops->get_ipi(cpu, xc)) return -EIO; @@ -1212,8 +1230,8 @@ static int xive_setup_cpu_ipi(unsigned int cpu) pr_err("Failed to map IPI CPU %d\n", cpu); return -EIO; } - pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu, - xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); + pr_debug("CPU %d HW IPI 0x%x, virq %d, trig_mmio=%p\n", cpu, + xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); /* Unmask it */ xive_do_source_set_mask(&xc->ipi_data, false); @@ -1221,7 +1239,7 @@ static int xive_setup_cpu_ipi(unsigned int cpu) return 0; } -static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) +noinstr static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) { unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); @@ -1231,6 +1249,8 @@ static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) if (xc->hw_ipi == XIVE_BAD_IRQ) return; + /* TODO: clear IPI mapping */ + /* Mask the IPI */ xive_do_source_set_mask(&xc->ipi_data, true); @@ -1253,7 +1273,7 @@ void __init xive_smp_probe(void) smp_ops->cause_ipi = xive_cause_ipi; /* Register the IPI */ - xive_request_ipi(); + xive_init_ipis(); /* Allocate and setup IPI for the boot CPU */ xive_setup_cpu_ipi(smp_processor_id()); @@ -1264,7 +1284,7 @@ void __init xive_smp_probe(void) static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { - int rc; + struct xive_irq_data *xd; /* * Mark interrupts as edge sensitive by default so that resend @@ -1272,11 +1292,12 @@ static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq, */ irq_clear_status_flags(virq, IRQ_LEVEL); - rc = xive_irq_alloc_data(virq, hw); - if (rc) - return rc; + xd = xive_irq_alloc_data(virq, hw); + if (IS_ERR(xd)) + return PTR_ERR(xd); irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq); + irq_set_chip_data(virq, xd); return 0; } @@ -1344,7 +1365,7 @@ static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d, seq_printf(m, "%*sXIVE:\n", ind, ""); ind++; - xd = irq_data_get_irq_handler_data(irqd); + xd = irq_data_get_irq_chip_data(irqd); if (!xd) { seq_printf(m, "%*snot assigned\n", ind, ""); return; @@ -1366,7 +1387,71 @@ static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d, } #endif +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY +static int xive_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + return xive_irq_domain_xlate(d, to_of_node(fwspec->fwnode), + fwspec->param, fwspec->param_count, + hwirq, type); +} + +static int xive_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct irq_fwspec *fwspec = arg; + struct xive_irq_data *xd; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + int i, rc; + + rc = xive_irq_domain_translate(domain, fwspec, &hwirq, &type); + if (rc) + return rc; + + pr_debug("%s %d/0x%lx #%d\n", __func__, virq, hwirq, nr_irqs); + + for (i = 0; i < nr_irqs; i++) { + /* TODO: call xive_irq_domain_map() */ + + /* + * Mark interrupts as edge sensitive by default so that resend + * actually works. Will fix that up below if needed. + */ + irq_clear_status_flags(virq, IRQ_LEVEL); + + /* allocates and sets handler data */ + xd = xive_irq_alloc_data(virq + i, hwirq + i); + if (IS_ERR(xd)) + return PTR_ERR(xd); + + irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &xive_irq_chip, xd); + irq_set_handler(virq + i, handle_fasteoi_irq); + } + + return 0; +} + +static void xive_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + int i; + + pr_debug("%s %d #%d\n", __func__, virq, nr_irqs); + + for (i = 0; i < nr_irqs; i++) + xive_irq_free_data(virq + i); +} +#endif + static const struct irq_domain_ops xive_irq_domain_ops = { +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + .alloc = xive_irq_domain_alloc, + .free = xive_irq_domain_free, + .translate = xive_irq_domain_translate, +#endif .match = xive_irq_domain_match, .map = xive_irq_domain_map, .unmap = xive_irq_domain_unmap, @@ -1378,11 +1463,10 @@ static const struct irq_domain_ops xive_irq_domain_ops = { static void __init xive_init_host(struct device_node *np) { - xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ, - &xive_irq_domain_ops, NULL); + xive_irq_domain = irq_domain_create_tree(of_fwnode_handle(np), &xive_irq_domain_ops, NULL); if (WARN_ON(xive_irq_domain == NULL)) return; - irq_set_default_host(xive_irq_domain); + irq_set_default_domain(xive_irq_domain); } static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) @@ -1440,7 +1524,7 @@ static void xive_setup_cpu(void) #ifdef CONFIG_SMP void xive_smp_setup_cpu(void) { - pr_devel("SMP setup CPU %d\n", smp_processor_id()); + pr_debug("SMP setup CPU %d\n", smp_processor_id()); /* This will have already been done on the boot CPU */ if (smp_processor_id() != boot_cpuid) @@ -1496,7 +1580,7 @@ static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) cpu, irq); #endif raw_spin_lock(&desc->lock); - xd = irq_desc_get_handler_data(desc); + xd = irq_desc_get_chip_data(desc); /* * Clear saved_p to indicate that it's no longer pending @@ -1549,7 +1633,7 @@ void xive_flush_interrupt(void) #endif /* CONFIG_SMP */ -void xive_teardown_cpu(void) +noinstr void xive_teardown_cpu(void) { struct xive_cpu *xc = __this_cpu_read(xive_cpu); unsigned int cpu = smp_processor_id(); @@ -1586,10 +1670,10 @@ bool __init xive_core_init(struct device_node *np, const struct xive_ops *ops, ppc_md.get_irq = xive_get_irq; __xive_enabled = true; - pr_devel("Initializing host..\n"); + pr_debug("Initializing host..\n"); xive_init_host(np); - pr_devel("Initializing boot CPU..\n"); + pr_debug("Initializing boot CPU..\n"); /* Allocate per-CPU data and queues */ xive_prepare_cpu(smp_processor_id()); @@ -1623,40 +1707,40 @@ __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift) static int __init xive_off(char *arg) { xive_cmdline_disabled = true; - return 0; + return 1; } __setup("xive=off", xive_off); -static void xive_debug_show_cpu(struct seq_file *m, int cpu) +static int __init xive_store_eoi_cmdline(char *arg) +{ + if (!arg) + return 1; + + if (strncmp(arg, "off", 3) == 0) { + pr_info("StoreEOI disabled on kernel command line\n"); + xive_store_eoi = false; + } + return 1; +} +__setup("xive.store-eoi=", xive_store_eoi_cmdline); + +#ifdef CONFIG_DEBUG_FS +static void xive_debug_show_ipi(struct seq_file *m, int cpu) { struct xive_cpu *xc = per_cpu(xive_cpu, cpu); - seq_printf(m, "CPU %d:", cpu); + seq_printf(m, "CPU %d: ", cpu); if (xc) { seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); #ifdef CONFIG_SMP { - u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); + char buffer[128]; - seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi, - val & XIVE_ESB_VAL_P ? 'P' : '-', - val & XIVE_ESB_VAL_Q ? 'Q' : '-'); + xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer)); + seq_printf(m, "IPI=0x%08x %s", xc->hw_ipi, buffer); } #endif - { - struct xive_q *q = &xc->queue[xive_irq_priority]; - u32 i0, i1, idx; - - if (q->qpage) { - idx = q->idx; - i0 = be32_to_cpup(q->qpage + idx); - idx = (idx + 1) & q->msk; - i1 = be32_to_cpup(q->qpage + idx); - seq_printf(m, "EQ idx=%d T=%d %08x %08x ...", - q->idx, q->toggle, i0, i1); - } - } } seq_puts(m, "\n"); } @@ -1668,8 +1752,7 @@ static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d) u32 target; u8 prio; u32 lirq; - struct xive_irq_data *xd; - u64 val; + char buffer[128]; rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); if (rc) { @@ -1680,43 +1763,101 @@ static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d) seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", hw_irq, target, prio, lirq); - xd = irq_data_get_irq_handler_data(d); - val = xive_esb_read(xd, XIVE_ESB_GET); - seq_printf(m, "flags=%c%c%c PQ=%c%c", - xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', - xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', - xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', - val & XIVE_ESB_VAL_P ? 'P' : '-', - val & XIVE_ESB_VAL_Q ? 'Q' : '-'); + xive_irq_data_dump(irq_data_get_irq_chip_data(d), buffer, sizeof(buffer)); + seq_puts(m, buffer); seq_puts(m, "\n"); } -static int xive_core_debug_show(struct seq_file *m, void *private) +static int xive_irq_debug_show(struct seq_file *m, void *private) { unsigned int i; struct irq_desc *desc; + + for_each_irq_desc(i, desc) { + struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i); + + if (d) + xive_debug_show_irq(m, d); + } + return 0; +} +DEFINE_SHOW_ATTRIBUTE(xive_irq_debug); + +static int xive_ipi_debug_show(struct seq_file *m, void *private) +{ int cpu; if (xive_ops->debug_show) xive_ops->debug_show(m, private); - for_each_possible_cpu(cpu) - xive_debug_show_cpu(m, cpu); + for_each_online_cpu(cpu) + xive_debug_show_ipi(m, cpu); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(xive_ipi_debug); - for_each_irq_desc(i, desc) { - struct irq_data *d = irq_desc_get_irq_data(desc); +static void xive_eq_debug_show_one(struct seq_file *m, struct xive_q *q, u8 prio) +{ + int i; - if (d->domain == xive_irq_domain) - xive_debug_show_irq(m, d); + seq_printf(m, "EQ%d idx=%d T=%d\n", prio, q->idx, q->toggle); + if (q->qpage) { + for (i = 0; i < q->msk + 1; i++) { + if (!(i % 8)) + seq_printf(m, "%05d ", i); + seq_printf(m, "%08x%s", be32_to_cpup(q->qpage + i), + (i + 1) % 8 ? " " : "\n"); + } } + seq_puts(m, "\n"); +} + +static int xive_eq_debug_show(struct seq_file *m, void *private) +{ + int cpu = (long)m->private; + struct xive_cpu *xc = per_cpu(xive_cpu, cpu); + + if (xc) + xive_eq_debug_show_one(m, &xc->queue[xive_irq_priority], + xive_irq_priority); return 0; } -DEFINE_SHOW_ATTRIBUTE(xive_core_debug); +DEFINE_SHOW_ATTRIBUTE(xive_eq_debug); + +static void xive_core_debugfs_create(void) +{ + struct dentry *xive_dir; + struct dentry *xive_eq_dir; + long cpu; + char name[16]; + + xive_dir = debugfs_create_dir("xive", arch_debugfs_dir); + if (IS_ERR(xive_dir)) + return; + + debugfs_create_file("ipis", 0400, xive_dir, + NULL, &xive_ipi_debug_fops); + debugfs_create_file("interrupts", 0400, xive_dir, + NULL, &xive_irq_debug_fops); + xive_eq_dir = debugfs_create_dir("eqs", xive_dir); + for_each_possible_cpu(cpu) { + snprintf(name, sizeof(name), "cpu%ld", cpu); + debugfs_create_file(name, 0400, xive_eq_dir, (void *)cpu, + &xive_eq_debug_fops); + } + debugfs_create_bool("store-eoi", 0600, xive_dir, &xive_store_eoi); + + if (xive_ops->debug_create) + xive_ops->debug_create(xive_dir); +} +#else +static inline void xive_core_debugfs_create(void) { } +#endif /* CONFIG_DEBUG_FS */ int xive_core_debug_init(void) { - if (xive_enabled()) - debugfs_create_file("xive", 0400, powerpc_debugfs_root, - NULL, &xive_core_debug_fops); + if (xive_enabled() && IS_ENABLED(CONFIG_DEBUG_FS)) + xive_core_debugfs_create(); + return 0; } diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 57e3f1540435..a0934b516933 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -13,6 +13,7 @@ #include <linux/seq_file.h> #include <linux/init.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/delay.h> @@ -21,7 +22,6 @@ #include <linux/kmemleak.h> #include <asm/machdep.h> -#include <asm/prom.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/irq.h> @@ -41,6 +41,7 @@ static u32 xive_queue_shift; static u32 xive_pool_vps = XIVE_INVALID_VP; static struct kmem_cache *xive_provision_cache; static bool xive_has_single_esc; +bool xive_has_save_restore; int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) { @@ -62,6 +63,8 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) opal_flags = be64_to_cpu(flags); if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI) data->flags |= XIVE_IRQ_FLAG_STORE_EOI; + if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI2) + data->flags |= XIVE_IRQ_FLAG_STORE_EOI; if (opal_flags & OPAL_XIVE_IRQ_LSI) data->flags |= XIVE_IRQ_FLAG_LSI; data->eoi_page = be64_to_cpu(eoi_page); @@ -412,7 +415,7 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) return; } - /* Grab it's CAM value */ + /* Grab its CAM value */ rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL); if (rc) { pr_err("Failed to get pool VP info CPU %d\n", cpu); @@ -458,6 +461,14 @@ void xive_native_sync_queue(u32 hw_irq) } EXPORT_SYMBOL_GPL(xive_native_sync_queue); +#ifdef CONFIG_DEBUG_FS +static int xive_native_debug_create(struct dentry *xive_dir) +{ + debugfs_create_bool("save-restore", 0600, xive_dir, &xive_has_save_restore); + return 0; +} +#endif + static const struct xive_ops xive_native_ops = { .populate_irq_data = xive_native_populate_irq_data, .configure_irq = xive_native_configure_irq, @@ -475,10 +486,13 @@ static const struct xive_ops xive_native_ops = { .get_ipi = xive_native_get_ipi, .put_ipi = xive_native_put_ipi, #endif /* CONFIG_SMP */ +#ifdef CONFIG_DEBUG_FS + .debug_create = xive_native_debug_create, +#endif /* CONFIG_DEBUG_FS */ .name = "native", }; -static bool xive_parse_provisioning(struct device_node *np) +static bool __init xive_parse_provisioning(struct device_node *np) { int rc; @@ -518,16 +532,16 @@ static bool xive_parse_provisioning(struct device_node *np) return true; } -static void xive_native_setup_pools(void) +static void __init xive_native_setup_pools(void) { /* Allocate a pool big enough */ - pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids); + pr_debug("Allocating VP block for pool size %u\n", nr_cpu_ids); xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids); if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP)) - pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n"); + pr_err("Failed to allocate pool VP, KVM might not function\n"); - pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n", + pr_debug("Pool VPs allocated at 0x%x for %u max CPUs\n", xive_pool_vps, nr_cpu_ids); } @@ -545,9 +559,7 @@ bool __init xive_native_init(void) struct device_node *np; struct resource r; void __iomem *tima; - struct property *prop; u8 max_prio = 7; - const __be32 *p; u32 val, cpu; s64 rc; @@ -565,12 +577,12 @@ bool __init xive_native_init(void) /* Resource 1 is HV window */ if (of_address_to_resource(np, 1, &r)) { pr_err("Failed to get thread mgmnt area resource\n"); - return false; + goto err_put; } tima = ioremap(r.start, resource_size(&r)); if (!tima) { pr_err("Failed to map thread mgmnt area\n"); - return false; + goto err_put; } /* Read number of priorities */ @@ -578,15 +590,16 @@ bool __init xive_native_init(void) max_prio = val - 1; /* Iterate the EQ sizes and pick one */ - of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) { + of_property_for_each_u32(np, "ibm,xive-eq-sizes", val) { xive_queue_shift = val; if (val == PAGE_SHIFT) break; } /* Do we support single escalation */ - if (of_get_property(np, "single-escalation-support", NULL) != NULL) - xive_has_single_esc = true; + xive_has_single_esc = of_property_read_bool(np, "single-escalation-support"); + + xive_has_save_restore = of_property_read_bool(np, "vp-save-restore"); /* Configure Thread Management areas for KVM */ for_each_possible_cpu(cpu) @@ -595,19 +608,19 @@ bool __init xive_native_init(void) /* Resource 2 is OS window */ if (of_address_to_resource(np, 2, &r)) { pr_err("Failed to get thread mgmnt area resource\n"); - return false; + goto err_put; } xive_tima_os = r.start; - /* Grab size of provisionning pages */ + /* Grab size of provisioning pages */ xive_parse_provisioning(np); /* Switch the XIVE to exploitation mode */ rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL); if (rc) { pr_err("Switch to exploitation mode failed with error %lld\n", rc); - return false; + goto err_put; } /* Setup some dummy HV pool VPs */ @@ -617,10 +630,15 @@ bool __init xive_native_init(void) if (!xive_core_init(np, &xive_native_ops, tima, TM_QW3_HV_PHYS, max_prio)) { opal_xive_reset(OPAL_XIVE_MODE_EMU); - return false; + goto err_put; } + of_node_put(np); pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10)); return true; + +err_put: + of_node_put(np); + return false; } static bool xive_native_provision_pages(void) @@ -752,6 +770,12 @@ bool xive_native_has_single_escalation(void) } EXPORT_SYMBOL_GPL(xive_native_has_single_escalation); +bool xive_native_has_save_restore(void) +{ + return xive_has_save_restore; +} +EXPORT_SYMBOL_GPL(xive_native_has_save_restore); + int xive_native_get_queue_info(u32 vp_id, u32 prio, u64 *out_qpage, u64 *out_qsize, @@ -776,7 +800,7 @@ int xive_native_get_queue_info(u32 vp_id, u32 prio, if (out_qpage) *out_qpage = be64_to_cpu(qpage); if (out_qsize) - *out_qsize = be32_to_cpu(qsize); + *out_qsize = be64_to_cpu(qsize); if (out_qeoi_page) *out_qeoi_page = be64_to_cpu(qeoi_page); if (out_escalate_irq) diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index f143b6f111ac..5aedbe3e8e6a 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -7,12 +7,16 @@ #include <linux/types.h> #include <linux/irq.h> +#include <linux/seq_file.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/bitmap.h> #include <linux/cpumask.h> #include <linux/mm.h> #include <linux/delay.h> @@ -44,7 +48,7 @@ struct xive_irq_bitmap { static LIST_HEAD(xive_irq_bitmaps); -static int xive_irq_bitmap_add(int base, int count) +static int __init xive_irq_bitmap_add(int base, int count) { struct xive_irq_bitmap *xibm; @@ -55,7 +59,7 @@ static int xive_irq_bitmap_add(int base, int count) spin_lock_init(&xibm->lock); xibm->base = base; xibm->count = count; - xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL); + xibm->bitmap = bitmap_zalloc(xibm->count, GFP_KERNEL); if (!xibm->bitmap) { kfree(xibm); return -ENOMEM; @@ -67,6 +71,17 @@ static int xive_irq_bitmap_add(int base, int count) return 0; } +static void xive_irq_bitmap_remove_all(void) +{ + struct xive_irq_bitmap *xibm, *tmp; + + list_for_each_entry_safe(xibm, tmp, &xive_irq_bitmaps, list) { + list_del(&xibm->list); + bitmap_free(xibm->bitmap); + kfree(xibm); + } +} + static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm) { int irq; @@ -173,7 +188,7 @@ static long plpar_int_get_source_info(unsigned long flags, } while (plpar_busy_delay(rc)); if (rc) { - pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc); + pr_err("H_INT_GET_SOURCE_INFO lisn=0x%lx failed %ld\n", lisn, rc); return rc; } @@ -182,8 +197,8 @@ static long plpar_int_get_source_info(unsigned long flags, *trig_page = retbuf[2]; *esb_shift = retbuf[3]; - pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n", - retbuf[0], retbuf[1], retbuf[2], retbuf[3]); + pr_debug("H_INT_GET_SOURCE_INFO lisn=0x%lx flags=0x%lx eoi=0x%lx trig=0x%lx shift=0x%lx\n", + lisn, retbuf[0], retbuf[1], retbuf[2], retbuf[3]); return 0; } @@ -200,8 +215,8 @@ static long plpar_int_set_source_config(unsigned long flags, long rc; - pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n", - flags, lisn, target, prio, sw_irq); + pr_debug("H_INT_SET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx target=%ld prio=%ld sw_irq=%ld\n", + flags, lisn, target, prio, sw_irq); do { @@ -210,7 +225,7 @@ static long plpar_int_set_source_config(unsigned long flags, } while (plpar_busy_delay(rc)); if (rc) { - pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n", + pr_err("H_INT_SET_SOURCE_CONFIG lisn=0x%lx target=%ld prio=%ld failed %ld\n", lisn, target, prio, rc); return rc; } @@ -227,7 +242,7 @@ static long plpar_int_get_source_config(unsigned long flags, unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; long rc; - pr_devel("H_INT_GET_SOURCE_CONFIG flags=%lx lisn=%lx\n", flags, lisn); + pr_debug("H_INT_GET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx\n", flags, lisn); do { rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn, @@ -235,7 +250,7 @@ static long plpar_int_get_source_config(unsigned long flags, } while (plpar_busy_delay(rc)); if (rc) { - pr_err("H_INT_GET_SOURCE_CONFIG lisn=%ld failed %ld\n", + pr_err("H_INT_GET_SOURCE_CONFIG lisn=0x%lx failed %ld\n", lisn, rc); return rc; } @@ -244,8 +259,8 @@ static long plpar_int_get_source_config(unsigned long flags, *prio = retbuf[1]; *sw_irq = retbuf[2]; - pr_devel("H_INT_GET_SOURCE_CONFIG target=%lx prio=%lx sw_irq=%lx\n", - retbuf[0], retbuf[1], retbuf[2]); + pr_debug("H_INT_GET_SOURCE_CONFIG target=%ld prio=%ld sw_irq=%ld\n", + retbuf[0], retbuf[1], retbuf[2]); return 0; } @@ -273,8 +288,8 @@ static long plpar_int_get_queue_info(unsigned long flags, *esn_page = retbuf[0]; *esn_size = retbuf[1]; - pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n", - retbuf[0], retbuf[1]); + pr_debug("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld page=0x%lx size=0x%lx\n", + target, priority, retbuf[0], retbuf[1]); return 0; } @@ -289,8 +304,8 @@ static long plpar_int_set_queue_config(unsigned long flags, { long rc; - pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n", - flags, target, priority, qpage, qsize); + pr_debug("H_INT_SET_QUEUE_CONFIG flags=0x%lx target=%ld priority=0x%lx qpage=0x%lx qsize=0x%lx\n", + flags, target, priority, qpage, qsize); do { rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target, @@ -298,7 +313,7 @@ static long plpar_int_set_queue_config(unsigned long flags, } while (plpar_busy_delay(rc)); if (rc) { - pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n", + pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=0x%lx returned %ld\n", target, priority, qpage, rc); return rc; } @@ -315,7 +330,7 @@ static long plpar_int_sync(unsigned long flags, unsigned long lisn) } while (plpar_busy_delay(rc)); if (rc) { - pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc); + pr_err("H_INT_SYNC lisn=0x%lx returned %ld\n", lisn, rc); return rc; } @@ -333,8 +348,8 @@ static long plpar_int_esb(unsigned long flags, unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; long rc; - pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n", - flags, lisn, offset, in_data); + pr_debug("H_INT_ESB flags=0x%lx lisn=0x%lx offset=0x%lx in=0x%lx\n", + flags, lisn, offset, in_data); do { rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset, @@ -342,7 +357,7 @@ static long plpar_int_esb(unsigned long flags, } while (plpar_busy_delay(rc)); if (rc) { - pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n", + pr_err("H_INT_ESB lisn=0x%lx offset=0x%lx returned %ld\n", lisn, offset, rc); return rc; } @@ -425,6 +440,7 @@ static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift); if (!data->trig_mmio) { + iounmap(data->eoi_mmio); pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq); return -ENOMEM; } @@ -653,6 +669,9 @@ static int xive_spapr_debug_show(struct seq_file *m, void *private) struct xive_irq_bitmap *xibm; char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + list_for_each_entry(xibm, &xive_irq_bitmaps, list) { memset(buf, 0, PAGE_SIZE); bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count); @@ -687,7 +706,7 @@ static const struct xive_ops xive_spapr_ops = { /* * get max priority from "/ibm,plat-res-int-priorities" */ -static bool xive_get_max_prio(u8 *max_prio) +static bool __init xive_get_max_prio(u8 *max_prio) { struct device_node *rootdn; const __be32 *reg; @@ -701,6 +720,7 @@ static bool xive_get_max_prio(u8 *max_prio) } reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len); + of_node_put(rootdn); if (!reg) { pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n"); return false; @@ -741,7 +761,7 @@ static bool xive_get_max_prio(u8 *max_prio) return true; } -static const u8 *get_vec5_feature(unsigned int index) +static const u8 *__init get_vec5_feature(unsigned int index) { unsigned long root, chosen; int size; @@ -795,12 +815,11 @@ bool __init xive_spapr_init(void) struct device_node *np; struct resource r; void __iomem *tima; - struct property *prop; u8 max_prio; u32 val; u32 len; const __be32 *reg; - int i; + int i, err; if (xive_spapr_disabled()) return false; @@ -816,35 +835,38 @@ bool __init xive_spapr_init(void) /* Resource 1 is the OS ring TIMA */ if (of_address_to_resource(np, 1, &r)) { pr_err("Failed to get thread mgmnt area resource\n"); - return false; + goto err_put; } tima = ioremap(r.start, resource_size(&r)); if (!tima) { pr_err("Failed to map thread mgmnt area\n"); - return false; + goto err_put; } if (!xive_get_max_prio(&max_prio)) - return false; + goto err_unmap; /* Feed the IRQ number allocator with the ranges given in the DT */ reg = of_get_property(np, "ibm,xive-lisn-ranges", &len); if (!reg) { pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n"); - return false; + goto err_unmap; } if (len % (2 * sizeof(u32)) != 0) { pr_err("invalid 'ibm,xive-lisn-ranges' property\n"); - return false; + goto err_unmap; } - for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2) - xive_irq_bitmap_add(be32_to_cpu(reg[0]), - be32_to_cpu(reg[1])); + for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2) { + err = xive_irq_bitmap_add(be32_to_cpu(reg[0]), + be32_to_cpu(reg[1])); + if (err < 0) + goto err_mem_free; + } /* Iterate the EQ sizes and pick one */ - of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) { + of_property_for_each_u32(np, "ibm,xive-eq-sizes", val) { xive_queue_shift = val; if (val == PAGE_SHIFT) break; @@ -852,10 +874,19 @@ bool __init xive_spapr_init(void) /* Initialize XIVE core with our backend */ if (!xive_core_init(np, &xive_spapr_ops, tima, TM_QW1_OS, max_prio)) - return false; + goto err_mem_free; + of_node_put(np); pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10)); return true; + +err_mem_free: + xive_irq_bitmap_remove_all(); +err_unmap: + iounmap(tima); +err_put: + of_node_put(np); + return false; } machine_arch_initcall(pseries, xive_core_debug_init); diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h index 504e7edce358..fe6d95d54af9 100644 --- a/arch/powerpc/sysdev/xive/xive-internal.h +++ b/arch/powerpc/sysdev/xive/xive-internal.h @@ -58,6 +58,7 @@ struct xive_ops { void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc); #endif int (*debug_show)(struct seq_file *m, void *private); + int (*debug_create)(struct dentry *xive_dir); const char *name; }; @@ -72,5 +73,6 @@ static inline u32 xive_alloc_order(u32 queue_shift) } extern bool xive_cmdline_disabled; +extern bool xive_has_save_restore; #endif /* __XIVE_INTERNAL_H */ |
