summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-16 09:26:42 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-16 09:26:42 -0800
commitde399813b521ea7e38bbfb5e5b620b5e202e5783 (patch)
treeceb8302f9d6a7a4f2e25b64c5dc42c1fb80b435b
parent57ca04ab440168e101da746ef9edd1ec583b7214 (diff)
parentc6f6634721c871bfab4235e1cbcad208d3063798 (diff)
Merge tag 'powerpc-4.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "Highlights include: - Support for the kexec_file_load() syscall, which is a prereq for secure and trusted boot. - Prevent kernel execution of userspace on P9 Radix (similar to SMEP/PXN). - Sort the exception tables at build time, to save time at boot, and store them as relative offsets to save space in the kernel image & memory. - Allow building the kernel with thin archives, which should allow us to build an allyesconfig once some other fixes land. - Build fixes to allow us to correctly rebuild when changing the kernel endian from big to little or vice versa. - Plumbing so that we can avoid doing a full mm TLB flush on P9 Radix. - Initial stack protector support (-fstack-protector). - Support for dumping the radix (aka. Linux) and hash page tables via debugfs. - Fix an oops in cxl coredump generation when cxl_get_fd() is used. - Freescale updates from Scott: "Highlights include 8xx hugepage support, qbman fixes/cleanup, device tree updates, and some misc cleanup." - Many and varied fixes and minor enhancements as always. Thanks to: Alexey Kardashevskiy, Andrew Donnellan, Aneesh Kumar K.V, Anshuman Khandual, Anton Blanchard, Balbir Singh, Bartlomiej Zolnierkiewicz, Christophe Jaillet, Christophe Leroy, Denis Kirjanov, Elimar Riesebieter, Frederic Barrat, Gautham R. Shenoy, Geliang Tang, Geoff Levand, Jack Miller, Johan Hovold, Lars-Peter Clausen, Libin, Madhavan Srinivasan, Michael Neuling, Nathan Fontenot, Naveen N. Rao, Nicholas Piggin, Pan Xinhui, Peter Senna Tschudin, Rashmica Gupta, Rui Teng, Russell Currey, Scott Wood, Simon Guo, Suraj Jitindar Singh, Thiago Jung Bauermann, Tobias Klauser, Vaibhav Jain" [ And thanks to Michael, who took time off from a new baby to get this pull request done. - Linus ] * tag 'powerpc-4.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (174 commits) powerpc/fsl/dts: add FMan node for t1042d4rdb powerpc/fsl/dts: add sg_2500_aqr105_phy4 alias on t1024rdb powerpc/fsl/dts: add QMan and BMan nodes on t1024 powerpc/fsl/dts: add QMan and BMan nodes on t1023 soc/fsl/qman: test: use DEFINE_SPINLOCK() powerpc/fsl-lbc: use DEFINE_SPINLOCK() powerpc/8xx: Implement support of hugepages powerpc: get hugetlbpage handling more generic powerpc: port 64 bits pgtable_cache to 32 bits powerpc/boot: Request no dynamic linker for boot wrapper soc/fsl/bman: Use resource_size instead of computation soc/fsl/qe: use builtin_platform_driver powerpc/fsl_pmc: use builtin_platform_driver powerpc/83xx/suspend: use builtin_platform_driver powerpc/ftrace: Fix the comments for ftrace_modify_code powerpc/perf: macros for power9 format encoding powerpc/perf: power9 raw event format encoding powerpc/perf: update attribute_group data structure powerpc/perf: factor out the event format field powerpc/mm/iommu, vfio/spapr: Put pages on VFIO container shutdown ...
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt1
-rw-r--r--arch/powerpc/Kconfig41
-rw-r--r--arch/powerpc/Kconfig.debug16
-rw-r--r--arch/powerpc/Makefile14
-rw-r--r--arch/powerpc/boot/Makefile30
-rw-r--r--arch/powerpc/boot/dts/fsl/t1023rdb.dts29
-rw-r--r--arch/powerpc/boot/dts/fsl/t1023si-post.dtsi103
-rw-r--r--arch/powerpc/boot/dts/fsl/t1024qds.dts29
-rw-r--r--arch/powerpc/boot/dts/fsl/t1024rdb.dts33
-rw-r--r--arch/powerpc/boot/dts/fsl/t1042d4rdb.dts52
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240rdb.dts4
-rw-r--r--arch/powerpc/boot/ps3-head.S5
-rw-r--r--arch/powerpc/boot/ps3.c8
-rwxr-xr-xarch/powerpc/boot/wrapper24
-rw-r--r--arch/powerpc/configs/amigaone_defconfig10
-rw-r--r--arch/powerpc/configs/cell_defconfig7
-rw-r--r--arch/powerpc/configs/chrp32_defconfig10
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config6
-rw-r--r--arch/powerpc/configs/g5_defconfig5
-rw-r--r--arch/powerpc/configs/maple_defconfig9
-rw-r--r--arch/powerpc/configs/pasemi_defconfig3
-rw-r--r--arch/powerpc/configs/pmac32_defconfig15
-rw-r--r--arch/powerpc/configs/powernv_defconfig9
-rw-r--r--arch/powerpc/configs/ppc64_defconfig14
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig6
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig10
-rw-r--r--arch/powerpc/configs/pseries_defconfig16
-rw-r--r--arch/powerpc/configs/storcenter_defconfig5
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h1
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgalloc.h44
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h43
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h (renamed from arch/powerpc/include/asm/book3s/64/hugetlb-radix.h)28
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h18
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h28
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h2
-rw-r--r--arch/powerpc/include/asm/cache.h3
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h107
-rw-r--r--arch/powerpc/include/asm/debug.h2
-rw-r--r--arch/powerpc/include/asm/futex.h14
-rw-r--r--arch/powerpc/include/asm/head-64.h8
-rw-r--r--arch/powerpc/include/asm/hugetlb.h21
-rw-r--r--arch/powerpc/include/asm/hvcall.h30
-rw-r--r--arch/powerpc/include/asm/io.h19
-rw-r--r--arch/powerpc/include/asm/kexec.h16
-rw-r--r--arch/powerpc/include/asm/kprobes.h7
-rw-r--r--arch/powerpc/include/asm/machdep.h4
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h35
-rw-r--r--arch/powerpc/include/asm/mmu.h23
-rw-r--r--arch/powerpc/include/asm/mmu_context.h20
-rw-r--r--arch/powerpc/include/asm/module.h4
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgalloc.h44
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h45
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h1
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-4k.h3
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-64k.h3
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h19
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h37
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h2
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h20
-rw-r--r--arch/powerpc/include/asm/processor.h2
-rw-r--r--arch/powerpc/include/asm/prom.h6
-rw-r--r--arch/powerpc/include/asm/reg.h5
-rw-r--r--arch/powerpc/include/asm/reg_8xx.h2
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/stackprotector.h40
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/uaccess.h52
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h6
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/Makefile11
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S2
-rw-r--r--arch/powerpc/kernel/eeh.c84
-rw-r--r--arch/powerpc/kernel/eeh_driver.c12
-rw-r--r--arch/powerpc/kernel/eeh_event.c4
-rw-r--r--arch/powerpc/kernel/eeh_pe.c4
-rw-r--r--arch/powerpc/kernel/entry_32.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/ftrace.c12
-rw-r--r--arch/powerpc/kernel/head_64.S19
-rw-r--r--arch/powerpc/kernel/head_8xx.S119
-rw-r--r--arch/powerpc/kernel/kexec_elf_64.c663
-rw-r--r--arch/powerpc/kernel/kprobes.c31
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c2
-rw-r--r--arch/powerpc/kernel/machine_kexec_file_64.c338
-rw-r--r--arch/powerpc/kernel/mce.c3
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/misc_64.S6
-rw-r--r--arch/powerpc/kernel/module_64.c5
-rw-r--r--arch/powerpc/kernel/of_platform.c7
-rw-r--r--arch/powerpc/kernel/process.c24
-rw-r--r--arch/powerpc/kernel/prom.c23
-rw-r--r--arch/powerpc/kernel/prom_init.c295
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/smp.c6
-rw-r--r--arch/powerpc/kernel/traps.c48
-rw-r--r--arch/powerpc/lib/checksum_32.S47
-rw-r--r--arch/powerpc/lib/checksum_64.S20
-rw-r--r--arch/powerpc/lib/copy_32.S55
-rw-r--r--arch/powerpc/lib/copyuser_64.S271
-rw-r--r--arch/powerpc/lib/copyuser_power7.S20
-rw-r--r--arch/powerpc/lib/ldstfp.S24
-rw-r--r--arch/powerpc/lib/sstep.c16
-rw-r--r--arch/powerpc/lib/string.S11
-rw-r--r--arch/powerpc/lib/string_64.S16
-rw-r--r--arch/powerpc/mm/Makefile5
-rw-r--r--arch/powerpc/mm/copro_fault.c3
-rw-r--r--arch/powerpc/mm/dump_hashpagetable.c551
-rw-r--r--arch/powerpc/mm/dump_linuxpagetables.c442
-rw-r--r--arch/powerpc/mm/fault.c17
-rw-r--r--arch/powerpc/mm/hash_native_64.c10
-rw-r--r--arch/powerpc/mm/hugetlbpage.c216
-rw-r--r--arch/powerpc/mm/init-common.c107
-rw-r--r--arch/powerpc/mm/init_64.c77
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c6
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c60
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c3
-rw-r--r--arch/powerpc/mm/pgtable-radix.c40
-rw-r--r--arch/powerpc/mm/pgtable.c2
-rw-r--r--arch/powerpc/mm/pgtable_32.c37
-rw-r--r--arch/powerpc/mm/tlb-radix.c18
-rw-r--r--arch/powerpc/mm/tlb_nohash.c21
-rw-r--r--arch/powerpc/perf/isa207-common.c91
-rw-r--r--arch/powerpc/perf/isa207-common.h30
-rw-r--r--arch/powerpc/perf/power8-pmu.c39
-rw-r--r--arch/powerpc/perf/power9-pmu.c114
-rw-r--r--arch/powerpc/platforms/40x/Kconfig12
-rw-r--r--arch/powerpc/platforms/44x/Kconfig56
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c6
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c2
-rw-r--r--arch/powerpc/platforms/85xx/smp.c8
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig11
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype1
-rw-r--r--arch/powerpc/platforms/cell/Kconfig9
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c2
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c4
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c4
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal.c24
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c40
-rw-r--r--arch/powerpc/platforms/powernv/pci.c6
-rw-r--r--arch/powerpc/platforms/powernv/setup.c6
-rw-r--r--arch/powerpc/platforms/ps3/setup.c4
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig11
-rw-r--r--arch/powerpc/platforms/pseries/Makefile4
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c34
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c142
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c8
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c7
-rw-r--r--arch/powerpc/platforms/pseries/ibmebus.c (renamed from arch/powerpc/kernel/ibmebus.c)314
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c24
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c1
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h19
-rw-r--r--arch/powerpc/platforms/pseries/setup.c4
-rw-r--r--arch/powerpc/platforms/pseries/vio.c (renamed from arch/powerpc/kernel/vio.c)3
-rw-r--r--arch/powerpc/purgatory/.gitignore2
-rw-r--r--arch/powerpc/purgatory/Makefile15
-rw-r--r--arch/powerpc/purgatory/trampoline.S128
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c2
-rw-r--r--arch/powerpc/sysdev/fsl_pmc.c6
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c8
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c48
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c6
-rw-r--r--arch/powerpc/xmon/xmon.c6
-rw-r--r--arch/x86/kernel/crash.c37
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c48
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/misc/cxl/api.c147
-rw-r--r--drivers/misc/cxl/context.c17
-rw-r--r--drivers/misc/cxl/cxl.h6
-rw-r--r--drivers/misc/cxl/debugfs.c6
-rw-r--r--drivers/misc/cxl/file.c5
-rw-r--r--drivers/misc/cxl/guest.c2
-rw-r--r--drivers/misc/cxl/irq.c2
-rw-r--r--drivers/misc/cxl/native.c20
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/phb.c2
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c10
-rw-r--r--drivers/soc/fsl/qbman/bman.c8
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c3
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c17
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h1
-rw-r--r--drivers/soc/fsl/qbman/qman.c233
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c3
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c41
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h17
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c27
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c38
-rw-r--r--drivers/soc/fsl/qe/qe.c6
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c328
-rw-r--r--include/linux/kexec.h36
-rw-r--r--include/soc/fsl/qman.h62
-rw-r--r--kernel/kexec_file.c141
-rw-r--r--kernel/kexec_internal.h16
-rw-r--r--scripts/sortextable.c2
-rw-r--r--tools/testing/selftests/powerpc/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/.gitignore3
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/context_switch.c2
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/null_syscall.c157
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h2
-rw-r--r--tools/testing/selftests/powerpc/copyloops/validate.c2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr.h10
-rw-r--r--tools/testing/selftests/powerpc/include/basic_asm.h (renamed from tools/testing/selftests/powerpc/basic_asm.h)37
-rw-r--r--tools/testing/selftests/powerpc/include/fpu_asm.h (renamed from tools/testing/selftests/powerpc/fpu_asm.h)0
-rw-r--r--tools/testing/selftests/powerpc/include/gpr_asm.h (renamed from tools/testing/selftests/powerpc/gpr_asm.h)0
-rw-r--r--tools/testing/selftests/powerpc/include/instructions.h (renamed from tools/testing/selftests/powerpc/instructions.h)0
-rw-r--r--tools/testing/selftests/powerpc/include/reg.h145
-rw-r--r--tools/testing/selftests/powerpc/include/subunit.h (renamed from tools/testing/selftests/powerpc/subunit.h)0
-rw-r--r--tools/testing/selftests/powerpc/include/utils.h (renamed from tools/testing/selftests/powerpc/utils.h)0
-rw-r--r--tools/testing/selftests/powerpc/include/vmx_asm.h (renamed from tools/testing/selftests/powerpc/vmx_asm.h)0
-rw-r--r--tools/testing/selftests/powerpc/include/vsx_asm.h (renamed from tools/testing/selftests/powerpc/vsx_asm.h)0
-rw-r--r--tools/testing/selftests/powerpc/lib/reg.S397
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_asm.S4
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_asm.S4
-rw-r--r--tools/testing/selftests/powerpc/math/vsx_asm.S4
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c143
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h39
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c37
-rw-r--r--tools/testing/selftests/powerpc/pmu/lib.c6
-rw-r--r--tools/testing/selftests/powerpc/primitives/asm/firmware.h0
l---------tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h1
-rw-r--r--tools/testing/selftests/powerpc/primitives/asm/processor.h0
-rw-r--r--tools/testing/selftests/powerpc/primitives/linux/stringify.h0
-rw-r--r--tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c21
-rw-r--r--tools/testing/selftests/powerpc/ptrace/.gitignore10
-rw-r--r--tools/testing/selftests/powerpc/ptrace/Makefile14
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c123
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h74
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tar.c135
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tar.h50
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c158
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c169
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c174
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c185
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c168
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c160
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c168
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c117
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h127
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace.h711
-rw-r--r--tools/testing/selftests/powerpc/reg.h60
-rw-r--r--tools/testing/selftests/powerpc/signal/signal.S2
-rw-r--r--tools/testing/selftests/powerpc/stringloops/memcmp.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal.S10
-rw-r--r--tools/testing/selftests/powerpc/tm/tm.h2
259 files changed, 8698 insertions, 2634 deletions
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index ff86fdcbd353..cdd7b48826c3 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -169,4 +169,5 @@ ti,tsc2003 I2C Touch-Screen Controller
ti,tmp102 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface
ti,tmp103 Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface
ti,tmp275 Digital Temperature Sensor
+winbond,w83793 Winbond/Nuvoton H/W Monitor
winbond,wpct301 i2c trusted platform module (TPM)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c7f120aaa98f..3da87e198878 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -80,6 +80,7 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK
config PPC
bool
default y
+ select BUILDTIME_EXTABLE_SORT
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select BINFMT_ELF
@@ -163,6 +164,7 @@ config PPC
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_KERNEL_GZIP
+ select HAVE_CC_STACKPROTECTOR
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@@ -396,6 +398,14 @@ config MPROFILE_KERNEL
depends on PPC64 && CPU_LITTLE_ENDIAN
def_bool !DISABLE_MPROFILE_KERNEL
+config USE_THIN_ARCHIVES
+ bool "Build the kernel using thin archives"
+ default n
+ select THIN_ARCHIVES
+ help
+ Build the kernel using thin archives.
+ If you're unsure say N.
+
config IOMMU_HELPER
def_bool PPC64
@@ -456,6 +466,19 @@ config KEXEC
interface is strongly in flux, so no good recommendation can be
made.
+config KEXEC_FILE
+ bool "kexec file based system call"
+ select KEXEC_CORE
+ select BUILD_BIN2C
+ depends on PPC64
+ depends on CRYPTO=y
+ depends on CRYPTO_SHA256=y
+ help
+ This is a new version of the kexec system call. This call is
+ file based and takes in file descriptors as system call arguments
+ for kernel and initramfs as opposed to a list of segments as is the
+ case for the older kexec call.
+
config RELOCATABLE
bool "Build a relocatable kernel"
depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
@@ -479,6 +502,15 @@ config RELOCATABLE
setting can still be useful to bootwrappers that need to know the
load address of the kernel (eg. u-boot/mkimage).
+config RELOCATABLE_TEST
+ bool "Test relocatable kernel"
+ depends on (PPC64 && RELOCATABLE)
+ default n
+ help
+ This runs the relocatable kernel at the address it was initially
+ loaded at, which tends to be non-zero and therefore test the
+ relocation code.
+
config CRASH_DUMP
bool "Build a kdump crash kernel"
depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
@@ -490,7 +522,7 @@ config CRASH_DUMP
config FA_DUMP
bool "Firmware-assisted dump"
- depends on PPC64 && PPC_RTAS && CRASH_DUMP && KEXEC
+ depends on PPC64 && PPC_RTAS && CRASH_DUMP && KEXEC_CORE
help
A robust mechanism to get reliable kernel crash dump with
assistance from firmware. This approach does not use kexec,
@@ -549,6 +581,13 @@ config ARCH_SPARSEMEM_DEFAULT
config SYS_SUPPORTS_HUGETLBFS
bool
+config ILLEGAL_POINTER_VALUE
+ hex
+ # This is roughly half way between the top of user space and the bottom
+ # of kernel space, which seems about as good as we can get.
+ default 0x5deadbeef0000000 if PPC64
+ default 0
+
source "mm/Kconfig"
config ARCH_MEMORY_PROBE
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 63292f64b25a..949258d412d0 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -354,4 +354,20 @@ config FAIL_IOMMU
If you are unsure, say N.
+config PPC_PTDUMP
+ bool "Export kernel pagetable layout to userspace via debugfs"
+ depends on DEBUG_KERNEL
+ select DEBUG_FS
+ help
+ This option exports the state of the kernel pagetables to a
+ debugfs file. This is only useful for kernel developers who are
+ working in architecture specific areas of the kernel - probably
+ not a good idea to enable this feature in a production kernel.
+
+ If you are unsure, say N.
+
+config PPC_HTDUMP
+ def_bool y
+ depends on PPC_PTDUMP && PPC_BOOK3S
+
endmenu
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 617dece67924..31286fa7873c 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -23,7 +23,7 @@ CROSS32AR := $(CROSS32_COMPILE)ar
ifeq ($(HAS_BIARCH),y)
ifeq ($(CROSS32_COMPILE),)
CROSS32CC := $(CC) -m32
-CROSS32AR := GNUTARGET=elf32-powerpc $(AR)
+KBUILD_ARFLAGS += --target=elf32-powerpc
endif
endif
@@ -85,7 +85,7 @@ ifeq ($(HAS_BIARCH),y)
override AS += -a$(BITS)
override LD += -m elf$(BITS)$(LDEMULATION)
override CC += -m$(BITS)
-override AR := GNUTARGET=elf$(BITS)-$(GNUTARGET) $(AR)
+KBUILD_ARFLAGS += --target=elf$(BITS)-$(GNUTARGET)
endif
LDFLAGS_vmlinux-y := -Bstatic
@@ -121,6 +121,7 @@ CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD)
ifeq ($(CONFIG_PPC_BOOK3S_64),y)
CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4)
+CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4
else
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
endif
@@ -249,6 +250,7 @@ core-y += arch/powerpc/kernel/ \
core-$(CONFIG_XMON) += arch/powerpc/xmon/
core-$(CONFIG_KVM) += arch/powerpc/kvm/
core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/
+core-$(CONFIG_KEXEC_FILE) += arch/powerpc/purgatory/
drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
@@ -275,16 +277,16 @@ zImage: relocs_check
endif
$(BOOT_TARGETS1): vmlinux
- $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+ $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
$(BOOT_TARGETS2): vmlinux
- $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+ $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
bootwrapper_install:
- $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+ $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
%.dtb: scripts
- $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+ $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
# Used to create 'merged defconfigs'
# To use it $(call) it with the first argument as the base defconfig
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 9d47f2efa830..e82f333cc84a 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -172,10 +172,6 @@ $(addprefix $(obj)/,$(libfdt) $(libfdtheader)): $(obj)/%: $(srctree)/scripts/dtc
$(obj)/empty.c:
$(Q)touch $@
-$(obj)/zImage.lds: $(obj)/%: $(srctree)/$(src)/%.S
- $(CROSS32CC) $(cpp_flags) -E -Wp,-MD,$(depfile) -P -Upowerpc \
- -D__ASSEMBLY__ -DLINKER_SCRIPT -o $@ $<
-
$(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S
$(Q)cp $< $@
@@ -357,17 +353,17 @@ $(addprefix $(obj)/, $(initrd-y)): $(obj)/ramdisk.image.gz
# Don't put the ramdisk on the pattern rule; when its missing make will try
# the pattern rule with less dependencies that also matches (even with the
# hard dependency listed).
-$(obj)/zImage.initrd.%: vmlinux $(wrapperbits)
+$(obj)/zImage.initrd.%: vmlinux $(wrapperbits) FORCE
$(call if_changed,wrap,$*,,,$(obj)/ramdisk.image.gz)
-$(addprefix $(obj)/, $(sort $(filter zImage.%, $(image-y)))): vmlinux $(wrapperbits)
+$(addprefix $(obj)/, $(sort $(filter zImage.%, $(image-y)))): vmlinux $(wrapperbits) FORCE
$(call if_changed,wrap,$(subst $(obj)/zImage.,,$@))
# dtbImage% - a dtbImage is a zImage with an embedded device tree blob
-$(obj)/dtbImage.initrd.%: vmlinux $(wrapperbits) $(obj)/%.dtb
+$(obj)/dtbImage.initrd.%: vmlinux $(wrapperbits) $(obj)/%.dtb FORCE
$(call if_changed,wrap,$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
-$(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/%.dtb
+$(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/%.dtb FORCE
$(call if_changed,wrap,$*,,$(obj)/$*.dtb)
# This cannot be in the root of $(src) as the zImage rule always adds a $(obj)
@@ -375,31 +371,31 @@ $(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/%.dtb
$(obj)/vmlinux.strip: vmlinux
$(STRIP) -s -R .comment $< -o $@
-$(obj)/uImage: vmlinux $(wrapperbits)
+$(obj)/uImage: vmlinux $(wrapperbits) FORCE
$(call if_changed,wrap,uboot)
-$(obj)/uImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/uImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
$(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
-$(obj)/uImage.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/uImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
$(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb)
-$(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
$(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
-$(obj)/cuImage.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/cuImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
$(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb)
-$(obj)/simpleImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/simpleImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
$(call if_changed,wrap,simpleboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
-$(obj)/simpleImage.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/simpleImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
$(call if_changed,wrap,simpleboot-$*,,$(obj)/$*.dtb)
-$(obj)/treeImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/treeImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
$(call if_changed,wrap,treeboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
-$(obj)/treeImage.%: vmlinux $(obj)/%.dtb $(wrapperbits)
+$(obj)/treeImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
$(call if_changed,wrap,treeboot-$*,,$(obj)/$*.dtb)
# Rule to build device tree blobs
diff --git a/arch/powerpc/boot/dts/fsl/t1023rdb.dts b/arch/powerpc/boot/dts/fsl/t1023rdb.dts
index 29757623e5ba..5ba6fbfca274 100644
--- a/arch/powerpc/boot/dts/fsl/t1023rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1023rdb.dts
@@ -41,6 +41,27 @@
#size-cells = <2>;
interrupt-parent = <&mpic>;
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+
+ qman_fqd: qman-fqd {
+ size = <0 0x400000>;
+ alignment = <0 0x400000>;
+ };
+
+ qman_pfdr: qman-pfdr {
+ size = <0 0x2000000>;
+ alignment = <0 0x2000000>;
+ };
+ };
+
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -72,6 +93,14 @@
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
+ qportals: qman-portals@ff6000000 {
+ ranges = <0x0 0xf 0xf6000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
index 6e0b4892a740..da2894c59479 100644
--- a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
@@ -34,6 +34,21 @@
#include <dt-bindings/thermal/thermal.h>
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
+&qman_fqd {
+ compatible = "fsl,qman-fqd";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
+&qman_pfdr {
+ compatible = "fsl,qman-pfdr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
&ifc {
#address-cells = <2>;
#size-cells = <1>;
@@ -180,6 +195,92 @@
};
};
+&bportals {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ compatible = "simple-bus";
+
+ bman-portal@0 {
+ cell-index = <0x0>;
+ compatible = "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x1000000 0x1000>;
+ interrupts = <105 2 0 0>;
+ };
+ bman-portal@4000 {
+ cell-index = <0x1>;
+ compatible = "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x1001000 0x1000>;
+ interrupts = <107 2 0 0>;
+ };
+ bman-portal@8000 {
+ cell-index = <2>;
+ compatible = "fsl,bman-portal";
+ reg = <0x8000 0x4000>, <0x1002000 0x1000>;
+ interrupts = <109 2 0 0>;
+ };
+ bman-portal@c000 {
+ cell-index = <0x3>;
+ compatible = "fsl,bman-portal";
+ reg = <0xc000 0x4000>, <0x1003000 0x1000>;
+ interrupts = <111 2 0 0>;
+ };
+ bman-portal@10000 {
+ cell-index = <0x4>;
+ compatible = "fsl,bman-portal";
+ reg = <0x10000 0x4000>, <0x1004000 0x1000>;
+ interrupts = <113 2 0 0>;
+ };
+ bman-portal@14000 {
+ cell-index = <0x5>;
+ compatible = "fsl,bman-portal";
+ reg = <0x14000 0x4000>, <0x1005000 0x1000>;
+ interrupts = <115 2 0 0>;
+ };
+};
+
+&qportals {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ compatible = "simple-bus";
+
+ qportal0: qman-portal@0 {
+ compatible = "fsl,qman-portal";
+ reg = <0x0 0x4000>, <0x1000000 0x1000>;
+ interrupts = <104 0x2 0 0>;
+ cell-index = <0x0>;
+ };
+ qportal1: qman-portal@4000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x4000 0x4000>, <0x1001000 0x1000>;
+ interrupts = <106 0x2 0 0>;
+ cell-index = <0x1>;
+ };
+ qportal2: qman-portal@8000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x8000 0x4000>, <0x1002000 0x1000>;
+ interrupts = <108 0x2 0 0>;
+ cell-index = <0x2>;
+ };
+ qportal3: qman-portal@c000 {
+ compatible = "fsl,qman-portal";
+ reg = <0xc000 0x4000>, <0x1003000 0x1000>;
+ interrupts = <110 0x2 0 0>;
+ cell-index = <0x3>;
+ };
+ qportal4: qman-portal@10000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x10000 0x4000>, <0x1004000 0x1000>;
+ interrupts = <112 0x2 0 0>;
+ cell-index = <0x4>;
+ };
+ qportal5: qman-portal@14000 {
+ compatible = "fsl,qman-portal";
+ reg = <0x14000 0x4000>, <0x1005000 0x1000>;
+ interrupts = <114 0x2 0 0>;
+ cell-index = <0x5>;
+ };
+};
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -413,6 +514,8 @@
};
/include/ "qoriq-sec5.0-0.dtsi"
+/include/ "qoriq-qman3.dtsi"
+/include/ "qoriq-bman1.dtsi"
/include/ "qoriq-fman3l-0.dtsi"
/include/ "qoriq-fman3-0-10g-0-best-effort.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/t1024qds.dts b/arch/powerpc/boot/dts/fsl/t1024qds.dts
index 772143da367f..d6858b7cd93f 100644
--- a/arch/powerpc/boot/dts/fsl/t1024qds.dts
+++ b/arch/powerpc/boot/dts/fsl/t1024qds.dts
@@ -41,6 +41,27 @@
#size-cells = <2>;
interrupt-parent = <&mpic>;
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+
+ qman_fqd: qman-fqd {
+ size = <0 0x400000>;
+ alignment = <0 0x400000>;
+ };
+
+ qman_pfdr: qman-pfdr {
+ size = <0 0x2000000>;
+ alignment = <0 0x2000000>;
+ };
+ };
+
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -80,6 +101,14 @@
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
+ qportals: qman-portals@ff6000000 {
+ ranges = <0x0 0xf 0xf6000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/fsl/t1024rdb.dts b/arch/powerpc/boot/dts/fsl/t1024rdb.dts
index 302cdd22b4bb..73a645324bc1 100644
--- a/arch/powerpc/boot/dts/fsl/t1024rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1024rdb.dts
@@ -41,6 +41,31 @@
#size-cells = <2>;
interrupt-parent = <&mpic>;
+ aliases {
+ sg_2500_aqr105_phy4 = &sg_2500_aqr105_phy4;
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+
+ qman_fqd: qman-fqd {
+ size = <0 0x400000>;
+ alignment = <0 0x400000>;
+ };
+
+ qman_pfdr: qman-pfdr {
+ size = <0 0x2000000>;
+ alignment = <0 0x2000000>;
+ };
+ };
+
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -82,6 +107,14 @@
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
+ qportals: qman-portals@ff6000000 {
+ ranges = <0x0 0xf 0xf6000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts b/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts
index 2a5a90dd272e..fcd2aeb5b8ac 100644
--- a/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1042d4rdb.dts
@@ -48,6 +48,58 @@
"fsl,deepsleep-cpld";
};
};
+
+ soc: soc@ffe000000 {
+ fman0: fman@400000 {
+ ethernet@e0000 {
+ phy-handle = <&phy_sgmii_0>;
+ phy-connection-type = "sgmii";
+ };
+
+ ethernet@e2000 {
+ phy-handle = <&phy_sgmii_1>;
+ phy-connection-type = "sgmii";
+ };
+
+ ethernet@e4000 {
+ phy-handle = <&phy_sgmii_2>;
+ phy-connection-type = "sgmii";
+ };
+
+ ethernet@e6000 {
+ phy-handle = <&phy_rgmii_0>;
+ phy-connection-type = "rgmii";
+ };
+
+ ethernet@e8000 {
+ phy-handle = <&phy_rgmii_1>;
+ phy-connection-type = "rgmii";
+ };
+
+ mdio0: mdio@fc000 {
+ phy_sgmii_0: ethernet-phy@02 {
+ reg = <0x02>;
+ };
+
+ phy_sgmii_1: ethernet-phy@03 {
+ reg = <0x03>;
+ };
+
+ phy_sgmii_2: ethernet-phy@01 {
+ reg = <0x01>;
+ };
+
+ phy_rgmii_0: ethernet-phy@04 {
+ reg = <0x04>;
+ };
+
+ phy_rgmii_1: ethernet-phy@05 {
+ reg = <0x05>;
+ };
+ };
+ };
+ };
+
};
#include "t1042si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/t4240rdb.dts b/arch/powerpc/boot/dts/fsl/t4240rdb.dts
index cc0a264b8acb..8166c660712a 100644
--- a/arch/powerpc/boot/dts/fsl/t4240rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t4240rdb.dts
@@ -125,6 +125,10 @@
};
i2c@118000 {
+ hwmon@2f {
+ compatible = "winbond,w83793";
+ reg = <0x2f>;
+ };
eeprom@52 {
compatible = "at24,24c256";
reg = <0x52>;
diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S
index b6fcbaf5027b..3dc44b05fb97 100644
--- a/arch/powerpc/boot/ps3-head.S
+++ b/arch/powerpc/boot/ps3-head.S
@@ -57,11 +57,6 @@ __system_reset_overlay:
bctr
1:
- /* Save the value at addr zero for a null pointer write check later. */
-
- li r4, 0
- lwz r3, 0(r4)
-
/* Primary delays then goes to _zimage_start in wrapper. */
or 31, 31, 31 /* db16cyc */
diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c
index 4ec2d86d3c50..a05558a7e51a 100644
--- a/arch/powerpc/boot/ps3.c
+++ b/arch/powerpc/boot/ps3.c
@@ -119,13 +119,12 @@ void ps3_copy_vectors(void)
flush_cache((void *)0x100, 512);
}
-void platform_init(unsigned long null_check)
+void platform_init(void)
{
const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */
void *chosen;
unsigned long ft_addr;
u64 rm_size;
- unsigned long val;
console_ops.write = ps3_console_write;
platform_ops.exit = ps3_exit;
@@ -153,11 +152,6 @@ void platform_init(unsigned long null_check)
printf(" flat tree at 0x%lx\n\r", ft_addr);
- val = *(unsigned long *)0;
-
- if (val != null_check)
- printf("null check failed: %lx != %lx\n\r", val, null_check);
-
((kernel_entry_t)0)(ft_addr, 0, NULL);
ps3_exit();
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 404b3aabdb4d..76fe3ccfd381 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -181,6 +181,28 @@ case "$elfformat" in
elf32-powerpc) format=elf32ppc ;;
esac
+ld_version()
+{
+ # Poached from scripts/ld-version.sh, but we don't want to call that because
+ # this script (wrapper) is distributed separately from the kernel source.
+ # Extract linker version number from stdin and turn into single number.
+ awk '{
+ gsub(".*\\)", "");
+ gsub(".*version ", "");
+ gsub("-.*", "");
+ split($1,a, ".");
+ print a[1]*100000000 + a[2]*1000000 + a[3]*10000;
+ exit
+ }'
+}
+
+# Do not include PT_INTERP segment when linking pie. Non-pie linking
+# just ignores this option.
+LD_VERSION=$(${CROSS}ld --version | ld_version)
+LD_NO_DL_MIN_VERSION=$(echo 2.26 | ld_version)
+if [ "$LD_VERSION" -ge "$LD_NO_DL_MIN_VERSION" ] ; then
+ nodl="--no-dynamic-linker"
+fi
platformo=$object/"$platform".o
lds=$object/zImage.lds
@@ -446,7 +468,7 @@ if [ "$platform" != "miboot" ]; then
text_start="-Ttext $link_address"
fi
#link everything
- ${CROSS}ld -m $format -T $lds $text_start $pie -o "$ofile" \
+ ${CROSS}ld -m $format -T $lds $text_start $pie $nodl -o "$ofile" \
$platformo $tmp $object/wrapper.a
rm $tmp
fi
diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig
index 8b83ce8a01e7..8d3e3c41258d 100644
--- a/arch/powerpc/configs/amigaone_defconfig
+++ b/arch/powerpc/configs/amigaone_defconfig
@@ -45,12 +45,6 @@ CONFIG_PARPORT_PC_FIFO=y
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-# CONFIG_IDEPCI_PCIBUS_ORDER is not set
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_SIIMAGE=y
-CONFIG_BLK_DEV_VIA82CXXX=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -61,6 +55,10 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
# CONFIG_SCSI_SYM53C8XX_MMIO is not set
+CONFIG_ATA=y
+CONFIG_PATA_SIL680=y
+CONFIG_PATA_VIA=y
+CONFIG_ATA_GENERIC=y
CONFIG_NETDEVICES=y
CONFIG_VORTEX=y
CONFIG_8139CP=y
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 7b6f30dece34..2d7fcbe047ac 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -108,16 +108,15 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_IDE=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_AEC62XX=y
-CONFIG_BLK_DEV_SIIMAGE=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=y
CONFIG_ATA=y
CONFIG_SATA_PROMISE=y
+CONFIG_PATA_ARTOP=y
CONFIG_PATA_PDC2027X=m
+CONFIG_PATA_SIL680=y
+CONFIG_ATA_GENERIC=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index ac9a50da2dc6..1f6f90cd8aff 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -42,12 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_SL82C105=y
-CONFIG_BLK_DEV_VIA82CXXX=y
-CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
@@ -56,6 +50,10 @@ CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
+CONFIG_ATA=y
+CONFIG_PATA_VIA=y
+CONFIG_PATA_WINBOND=y
+CONFIG_ATA_GENERIC=y
CONFIG_NETDEVICES=y
CONFIG_PCNET32=y
CONFIG_NET_TULIP=y
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index 1a61e81ab0cd..cc49c95494da 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -44,6 +44,7 @@ CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAME_WARN=1024
CONFIG_FTL=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_HIGH_RES_TIMERS=y
@@ -104,8 +105,13 @@ CONFIG_PACKET=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_PERF_EVENTS=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
CONFIG_QNX4FS_FS=m
CONFIG_RCU_TRACE=y
+CONFIG_RESET_CONTROLLER=y
CONFIG_ROOT_NFS=y
CONFIG_SYSV_FS=m
CONFIG_SYSVIPC=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 3b2511c090d8..e18f2e06553f 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -60,10 +60,6 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_CDROM_PKTCDVD=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_IDE_PMAC=y
-CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
@@ -73,6 +69,7 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
CONFIG_ATA=y
CONFIG_SATA_SVW=y
+CONFIG_PATA_MACIO=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 27abfab31219..c4018179e219 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -39,16 +39,15 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_IDE_TASK_IOCTL=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_AMD74XX=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_IPR=y
CONFIG_ATA=y
+CONFIG_PATA_AMD=y
+CONFIG_ATA_GENERIC=y
CONFIG_NETDEVICES=y
CONFIG_AMD8111_ETH=y
CONFIG_TIGON3=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 76f4edd441d3..5553c5ce4274 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -58,9 +58,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_EEPROM_LEGACY=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_IDE_TASK_IOCTL=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_CHR_DEV_OSST=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index e5a674d4a716..fc1e7a7388b8 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -117,15 +117,6 @@ CONFIG_CONNECTOR=y
CONFIG_MAC_FLOPPY=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECS=m
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_PDC202XX_NEW=y
-CONFIG_BLK_DEV_SL82C105=y
-CONFIG_BLK_DEV_IDE_PMAC=y
-CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
-CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
@@ -140,6 +131,12 @@ CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0
CONFIG_SCSI_MESH=y
CONFIG_SCSI_MAC53C94=y
+CONFIG_ATA=y
+CONFIG_PATA_MACIO=y
+CONFIG_PATA_PDC2027X=y
+CONFIG_PATA_WINBOND=y
+CONFIG_PATA_PCMCIA=m
+CONFIG_ATA_GENERIC=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
CONFIG_MD_LINEAR=m
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index d98b6eb3254f..e4d53fe5976a 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -49,6 +49,7 @@ CONFIG_BINFMT_MISC=m
CONFIG_PPC_TRANSACTIONAL_MEM=y
CONFIG_HOTPLUG_CPU=y
CONFIG_KEXEC=y
+CONFIG_KEXEC_FILE=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_NUMA=y
CONFIG_MEMORY_HOTPLUG=y
@@ -241,10 +242,6 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
@@ -300,7 +297,10 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPT_CRC32C_VPMSUM=m
+CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -308,6 +308,7 @@ CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SHA1_PPC=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 58a98d40086f..0396126ba6a8 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -46,6 +46,7 @@ CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
CONFIG_PPC_TRANSACTIONAL_MEM=y
CONFIG_KEXEC=y
+CONFIG_KEXEC_FILE=y
CONFIG_CRASH_DUMP=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_MEMORY_HOTREMOVE=y
@@ -85,12 +86,6 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_VIRTIO_BLK=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_AMD74XX=y
-CONFIG_BLK_DEV_IDE_PMAC=y
-CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
@@ -120,6 +115,9 @@ CONFIG_SATA_AHCI=y
CONFIG_SATA_SIL24=y
CONFIG_SATA_MV=y
CONFIG_SATA_SVW=y
+CONFIG_PATA_AMD=y
+CONFIG_PATA_MACIO=y
+CONFIG_ATA_GENERIC=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=y
@@ -335,7 +333,10 @@ CONFIG_PPC_EARLY_DEBUG=y
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPT_CRC32C_VPMSUM=m
+CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -343,6 +344,7 @@ CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SHA1_PPC=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index fd2edd650c20..11a3473f9e2e 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -59,10 +59,6 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_AMD74XX=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
@@ -79,6 +75,8 @@ CONFIG_SCSI_DEBUG=m
CONFIG_ATA=y
CONFIG_SATA_SIL24=y
CONFIG_SATA_SVW=y
+CONFIG_PATA_AMD=y
+CONFIG_ATA_GENERIC=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 8fbf49801233..3ce91a3df27f 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -378,13 +378,6 @@ CONFIG_EEPROM_AT24=m
CONFIG_EEPROM_LEGACY=m
CONFIG_EEPROM_MAX6875=m
CONFIG_EEPROM_93CX6=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_IDE_TASK_IOCTL=y
-# CONFIG_IDEPCI_PCIBUS_ORDER is not set
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_IDE_PMAC=y
-CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
CONFIG_RAID_ATTRS=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
@@ -411,13 +404,14 @@ CONFIG_ATA=y
CONFIG_SATA_FSL=m
CONFIG_PDC_ADMA=m
CONFIG_ATA_PIIX=m
+CONFIG_PATA_MACIO=y
CONFIG_PATA_MPC52xx=m
CONFIG_PATA_OPTIDMA=m
CONFIG_PATA_SCH=m
CONFIG_PATA_VIA=m
CONFIG_PATA_PLATFORM=m
CONFIG_PATA_OF_PLATFORM=m
-CONFIG_ATA_GENERIC=m
+CONFIG_ATA_GENERIC=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 8a3bc016b732..5a06bdde1674 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -52,6 +52,7 @@ CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
CONFIG_PPC_TRANSACTIONAL_MEM=y
CONFIG_KEXEC=y
+CONFIG_KEXEC_FILE=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
@@ -92,10 +93,6 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_VIRTIO_BLK=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_AMD74XX=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
@@ -122,7 +119,8 @@ CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
-# CONFIG_ATA_SFF is not set
+CONFIG_PATA_AMD=y
+CONFIG_ATA_GENERIC=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=y
@@ -244,10 +242,6 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
@@ -302,7 +296,10 @@ CONFIG_XMON=y
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPT_CRC32C_VPMSUM=m
+CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -310,6 +307,7 @@ CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SHA1_PPC=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index e9122b15e5fd..74bca2eccd0f 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -36,12 +36,11 @@ CONFIG_NFTL_RW=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_VIA82CXXX=y
-CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_ATA=y
+CONFIG_PATA_VIA=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=y
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index e0baba1535e6..81592562e0f8 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -13,7 +13,6 @@
*/
#include <linux/threads.h>
-#include <linux/kprobes.h>
#include <asm/cacheflush.h>
#include <asm/checksum.h>
#include <asm/uaccess.h>
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index 8e21bb492dca..d310546e5d9d 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -2,14 +2,42 @@
#define _ASM_POWERPC_BOOK3S_32_PGALLOC_H
#include <linux/threads.h>
+#include <linux/slab.h>
-/* For 32-bit, all levels of page tables are just drawn from get_free_page() */
-#define MAX_PGTABLE_INDEX_SIZE 0
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation. For PTE pages (which are linked to a struct
+ * page for now, and drawn from the main get_free_pages() pool), the
+ * allocation size will be (2^index_size * sizeof(pointer)) and
+ * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer. In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value. This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE 0xf
extern void __bad_pte(pmd_t *pmd);
-extern pgd_t *pgd_alloc(struct mm_struct *mm);
-extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+extern struct kmem_cache *pgtable_cache[];
+#define PGT_CACHE(shift) ({ \
+ BUG_ON(!(shift)); \
+ pgtable_cache[(shift) - 1]; \
+ })
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
/*
* We don't have any real pmd's, and this code never triggers because
@@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
static inline void pgtable_free(void *table, unsigned index_size)
{
- BUG_ON(index_size); /* 32-bit doesn't use this */
- free_page((unsigned long)table);
+ if (!index_size) {
+ free_page((unsigned long)table);
+ } else {
+ BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(index_size), table);
+ }
}
#define check_pgt_cache() do { } while (0)
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 6b8b2d57fdc8..012223638815 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -8,6 +8,23 @@
/* And here we include common definitions */
#include <asm/pte-common.h>
+#define PTE_INDEX_SIZE PTE_SHIFT
+#define PMD_INDEX_SIZE 0
+#define PUD_INDEX_SIZE 0
+#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
+
+#define PMD_CACHE_INDEX PMD_INDEX_SIZE
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE 0
+#define PUD_TABLE_SIZE 0
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif /* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+
/*
* The normal case is that PTEs are 32-bits and we have a 1-page
* 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
@@ -19,14 +36,10 @@
* -Matt
*/
/* PGDIR_SHIFT determines what a top-level page table entry can map */
-#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
+#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define PTRS_PER_PTE (1 << PTE_SHIFT)
-#define PTRS_PER_PMD 1
-#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
-
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
/*
* This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
@@ -82,12 +95,8 @@
extern unsigned long ioremap_bot;
-/*
- * entries per page directory level: our page-table tree is two-level, so
- * we don't really have any PMD directory.
- */
-#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
-#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS 0
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
@@ -224,7 +233,8 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline void __ptep_set_access_flags(struct mm_struct *mm,
- pte_t *ptep, pte_t entry)
+ pte_t *ptep, pte_t entry,
+ unsigned long address)
{
unsigned long set = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
@@ -283,15 +293,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
-#ifndef CONFIG_PPC_4K_PAGES
-void pgtable_cache_init(void);
-#else
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-#endif
-
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
pmd_t **pmdp);
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 1af837c561ba..1c64bc6330bc 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -16,9 +16,6 @@
#define H_PUD_TABLE_SIZE (sizeof(pud_t) << H_PUD_INDEX_SIZE)
#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << H_PGD_INDEX_SIZE)
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT PMD_SHIFT
-
/* PTE flags to conserve for HPTE identification */
#define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | \
H_PAGE_F_SECOND | H_PAGE_F_GIX)
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 5aae4f530c21..f3dd21efa2ea 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -6,9 +6,6 @@
#define H_PUD_INDEX_SIZE 5
#define H_PGD_INDEX_SIZE 12
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT PAGE_SHIFT
-
#define H_PAGE_COMBO 0x00001000 /* this is a combo 4k page */
#define H_PAGE_4K_PFN 0x00002000 /* PFN is for a single 4k page */
/*
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index c45189aa7476..c62f14d0bec1 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_RADIX_H
-#define _ASM_POWERPC_BOOK3S_64_HUGETLB_RADIX_H
+#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
+#define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
/*
* For radix we want generic code to handle hugetlb. But then if we want
* both hash and radix to be enabled together we need to workaround the
@@ -21,9 +21,33 @@ static inline int hstate_get_psize(struct hstate *hstate)
return MMU_PAGE_2M;
else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
return MMU_PAGE_1G;
+ else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
+ return MMU_PAGE_16M;
+ else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
+ return MMU_PAGE_16G;
else {
WARN(1, "Wrong huge page shift\n");
return mmu_virtual_psize;
}
}
+
+#define arch_make_huge_pte arch_make_huge_pte
+static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+ struct page *page, int writable)
+{
+ unsigned long page_shift;
+
+ if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
+ return entry;
+
+ page_shift = huge_page_shift(hstate_vma(vma));
+ /*
+ * We don't support 1G hugetlb pages yet.
+ */
+ VM_WARN_ON(page_shift == mmu_psize_defs[MMU_PAGE_1G].shift);
+ if (page_shift == mmu_psize_defs[MMU_PAGE_2M].shift)
+ return __pte(pte_val(entry) | _PAGE_LARGE);
+ else
+ return entry;
+}
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 0ebfbc8f0449..5905f0ff57d1 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -26,6 +26,11 @@
#define _RPAGE_SW1 0x00800
#define _RPAGE_SW2 0x00400
#define _RPAGE_SW3 0x00200
+#define _RPAGE_RSV1 0x1000000000000000UL
+#define _RPAGE_RSV2 0x0800000000000000UL
+#define _RPAGE_RSV3 0x0400000000000000UL
+#define _RPAGE_RSV4 0x0200000000000000UL
+
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */
#else
@@ -33,6 +38,11 @@
#endif
#define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */
+/*
+ * For P9 DD1 only, we need to track whether the pte's huge.
+ */
+#define _PAGE_LARGE _RPAGE_RSV1
+
#define _PAGE_PTE (1ul << 62) /* distinguishes PTEs from pointers */
#define _PAGE_PRESENT (1ul << 63) /* pte contains a translation */
@@ -568,10 +578,11 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
*/
static inline void __ptep_set_access_flags(struct mm_struct *mm,
- pte_t *ptep, pte_t entry)
+ pte_t *ptep, pte_t entry,
+ unsigned long address)
{
if (radix_enabled())
- return radix__ptep_set_access_flags(mm, ptep, entry);
+ return radix__ptep_set_access_flags(mm, ptep, entry, address);
return hash__ptep_set_access_flags(ptep, entry);
}
@@ -789,9 +800,6 @@ extern struct page *pgd_page(pgd_t pgd);
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
-void pgtable_cache_init(void);
-
static inline int map_kernel_page(unsigned long ea, unsigned long pa,
unsigned long flags)
{
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 2a46dea8e1b1..b4d1302387a3 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -140,19 +140,20 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
unsigned long new_pte;
old_pte = __radix_pte_update(ptep, ~0, 0);
- asm volatile("ptesync" : : : "memory");
/*
* new value of pte
*/
new_pte = (old_pte | set) & ~clr;
-
/*
- * For now let's do heavy pid flush
- * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
+ * If we are trying to clear the pte, we can skip
+ * the below sequence and batch the tlb flush. The
+ * tlb flush batching is done by mmu gather code
*/
- radix__flush_tlb_mm(mm);
-
- __radix_pte_update(ptep, 0, new_pte);
+ if (new_pte) {
+ asm volatile("ptesync" : : : "memory");
+ radix__flush_tlb_pte_p9_dd1(old_pte, mm, addr);
+ __radix_pte_update(ptep, 0, new_pte);
+ }
} else
old_pte = __radix_pte_update(ptep, clr, set);
asm volatile("ptesync" : : : "memory");
@@ -167,7 +168,8 @@ static inline unsigned long radix__pte_update(struct mm_struct *mm,
* function doesn't need to invalidate tlb.
*/
static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
- pte_t *ptep, pte_t entry)
+ pte_t *ptep, pte_t entry,
+ unsigned long address)
{
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
@@ -183,13 +185,7 @@ static inline void radix__ptep_set_access_flags(struct mm_struct *mm,
* new value of pte
*/
new_pte = old_pte | set;
-
- /*
- * For now let's do heavy pid flush
- * radix__flush_tlb_page_psize(mm, addr, mmu_virtual_psize);
- */
- radix__flush_tlb_mm(mm);
-
+ radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
__radix_pte_update(ptep, 0, new_pte);
} else
__radix_pte_update(ptep, 0, set);
@@ -243,6 +239,8 @@ static inline int radix__pmd_trans_huge(pmd_t pmd)
static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
{
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ return __pmd(pmd_val(pmd) | _PAGE_PTE | _PAGE_LARGE);
return __pmd(pmd_val(pmd) | _PAGE_PTE);
}
static inline void radix__pmdp_huge_split_prepare(struct vm_area_struct *vma,
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index a9e19cb2f7c5..cc7fbde4f53c 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -42,4 +42,6 @@ extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
unsigned long page_size);
extern void radix__flush_tlb_lpid(unsigned long lpid);
extern void radix__flush_tlb_all(void);
+extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
+ unsigned long address);
#endif
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index ffbafbf76b19..7657aa897a38 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -20,12 +20,15 @@
#endif
#else /* CONFIG_PPC64 */
#define L1_CACHE_SHIFT 7
+#define IFETCH_ALIGN_SHIFT 4 /* POWER8,9 */
#endif
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES L1_CACHE_BYTES
+#define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT)
+
#if defined(__powerpc64__) && !defined(__ASSEMBLY__)
struct ppc64_caches {
u32 dsize; /* L1 d-cache size */
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index 44efe739b6b9..fc46b664c49e 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -7,6 +7,71 @@
#include <asm/asm-compat.h>
#include <linux/bug.h>
+#ifdef __BIG_ENDIAN
+#define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
+#else
+#define BITOFF_CAL(size, off) (off * BITS_PER_BYTE)
+#endif
+
+#define XCHG_GEN(type, sfx, cl) \
+static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
+{ \
+ unsigned int prev, prev_mask, tmp, bitoff, off; \
+ \
+ off = (unsigned long)p % sizeof(u32); \
+ bitoff = BITOFF_CAL(sizeof(type), off); \
+ p -= off; \
+ val <<= bitoff; \
+ prev_mask = (u32)(type)-1 << bitoff; \
+ \
+ __asm__ __volatile__( \
+"1: lwarx %0,0,%3\n" \
+" andc %1,%0,%5\n" \
+" or %1,%1,%4\n" \
+ PPC405_ERR77(0,%3) \
+" stwcx. %1,0,%3\n" \
+" bne- 1b\n" \
+ : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
+ : "r" (p), "r" (val), "r" (prev_mask) \
+ : "cc", cl); \
+ \
+ return prev >> bitoff; \
+}
+
+#define CMPXCHG_GEN(type, sfx, br, br2, cl) \
+static inline \
+u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \
+{ \
+ unsigned int prev, prev_mask, tmp, bitoff, off; \
+ \
+ off = (unsigned long)p % sizeof(u32); \
+ bitoff = BITOFF_CAL(sizeof(type), off); \
+ p -= off; \
+ old <<= bitoff; \
+ new <<= bitoff; \
+ prev_mask = (u32)(type)-1 << bitoff; \
+ \
+ __asm__ __volatile__( \
+ br \
+"1: lwarx %0,0,%3\n" \
+" and %1,%0,%6\n" \
+" cmpw 0,%1,%4\n" \
+" bne- 2f\n" \
+" andc %1,%0,%6\n" \
+" or %1,%1,%5\n" \
+ PPC405_ERR77(0,%3) \
+" stwcx. %1,0,%3\n" \
+" bne- 1b\n" \
+ br2 \
+ "\n" \
+"2:" \
+ : "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
+ : "r" (p), "r" (old), "r" (new), "r" (prev_mask) \
+ : "cc", cl); \
+ \
+ return prev >> bitoff; \
+}
+
/*
* Atomic exchange
*
@@ -14,6 +79,11 @@
* the previous value stored there.
*/
+XCHG_GEN(u8, _local, "memory");
+XCHG_GEN(u8, _relaxed, "cc");
+XCHG_GEN(u16, _local, "memory");
+XCHG_GEN(u16, _relaxed, "cc");
+
static __always_inline unsigned long
__xchg_u32_local(volatile void *p, unsigned long val)
{
@@ -85,9 +155,13 @@ __xchg_u64_relaxed(u64 *p, unsigned long val)
#endif
static __always_inline unsigned long
-__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
+__xchg_local(void *ptr, unsigned long x, unsigned int size)
{
switch (size) {
+ case 1:
+ return __xchg_u8_local(ptr, x);
+ case 2:
+ return __xchg_u16_local(ptr, x);
case 4:
return __xchg_u32_local(ptr, x);
#ifdef CONFIG_PPC64
@@ -103,6 +177,10 @@ static __always_inline unsigned long
__xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
{
switch (size) {
+ case 1:
+ return __xchg_u8_relaxed(ptr, x);
+ case 2:
+ return __xchg_u16_relaxed(ptr, x);
case 4:
return __xchg_u32_relaxed(ptr, x);
#ifdef CONFIG_PPC64
@@ -131,6 +209,15 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
* and return the old value of *p.
*/
+CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
+CMPXCHG_GEN(u8, _local, , , "memory");
+CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
+CMPXCHG_GEN(u8, _relaxed, , , "cc");
+CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
+CMPXCHG_GEN(u16, _local, , , "memory");
+CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
+CMPXCHG_GEN(u16, _relaxed, , , "cc");
+
static __always_inline unsigned long
__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
{
@@ -316,6 +403,10 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
unsigned int size)
{
switch (size) {
+ case 1:
+ return __cmpxchg_u8(ptr, old, new);
+ case 2:
+ return __cmpxchg_u16(ptr, old, new);
case 4:
return __cmpxchg_u32(ptr, old, new);
#ifdef CONFIG_PPC64
@@ -328,10 +419,14 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
}
static __always_inline unsigned long
-__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
+__cmpxchg_local(void *ptr, unsigned long old, unsigned long new,
unsigned int size)
{
switch (size) {
+ case 1:
+ return __cmpxchg_u8_local(ptr, old, new);
+ case 2:
+ return __cmpxchg_u16_local(ptr, old, new);
case 4:
return __cmpxchg_u32_local(ptr, old, new);
#ifdef CONFIG_PPC64
@@ -348,6 +443,10 @@ __cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
unsigned int size)
{
switch (size) {
+ case 1:
+ return __cmpxchg_u8_relaxed(ptr, old, new);
+ case 2:
+ return __cmpxchg_u16_relaxed(ptr, old, new);
case 4:
return __cmpxchg_u32_relaxed(ptr, old, new);
#ifdef CONFIG_PPC64
@@ -364,6 +463,10 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
unsigned int size)
{
switch (size) {
+ case 1:
+ return __cmpxchg_u8_acquire(ptr, old, new);
+ case 2:
+ return __cmpxchg_u16_acquire(ptr, old, new);
case 4:
return __cmpxchg_u32_acquire(ptr, old, new);
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index a954e4975049..86308f177f2d 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -10,7 +10,7 @@ struct pt_regs;
extern struct dentry *powerpc_debugfs_root;
-#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
extern int (*__debugger)(struct pt_regs *regs);
extern int (*__debugger_ipi)(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 2a9cf845473b..eaada6c92344 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -23,10 +23,8 @@
"4: li %1,%3\n" \
"b 3b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n" \
- ".align 3\n" \
- PPC_LONG "1b,4b,2b,4b\n" \
- ".previous" \
+ EX_TABLE(1b, 4b) \
+ EX_TABLE(2b, 4b) \
: "=&r" (oldval), "=&r" (ret) \
: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
: "cr0", "memory")
@@ -104,11 +102,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"3: .section .fixup,\"ax\"\n\
4: li %0,%6\n\
b 3b\n\
- .previous\n\
- .section __ex_table,\"a\"\n\
- .align 3\n\
- " PPC_LONG "1b,4b,2b,4b\n\
- .previous" \
+ .previous\n"
+ EX_TABLE(1b, 4b)
+ EX_TABLE(2b, 4b)
: "+r" (ret), "=&r" (prev), "+m" (*uaddr)
: "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
: "cc", "memory");
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index ab90c2fa1ea6..fca7033839a9 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -95,12 +95,12 @@ end_##sname:
#define __FIXED_SECTION_ENTRY_BEGIN(sname, name, __align) \
USE_FIXED_SECTION(sname); \
- .align __align; \
+ .balign __align; \
.global name; \
name:
#define FIXED_SECTION_ENTRY_BEGIN(sname, name) \
- __FIXED_SECTION_ENTRY_BEGIN(sname, name, 0)
+ __FIXED_SECTION_ENTRY_BEGIN(sname, name, IFETCH_ALIGN_BYTES)
#define FIXED_SECTION_ENTRY_BEGIN_LOCATION(sname, name, start) \
USE_FIXED_SECTION(sname); \
@@ -203,9 +203,9 @@ name:
#define EXC_VIRT_END(name, start, end) \
FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, end)
-#define EXC_COMMON_BEGIN(name) \
+#define EXC_COMMON_BEGIN(name) \
USE_TEXT_SECTION(); \
- .align 7; \
+ .balign IFETCH_ALIGN_BYTES; \
.global name; \
DEFINE_FIXED_SYMBOL(name); \
name:
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index c5517f463ec7..ede215167d1a 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -9,7 +9,7 @@ extern struct kmem_cache *hugepte_cache;
#ifdef CONFIG_PPC_BOOK3S_64
-#include <asm/book3s/64/hugetlb-radix.h>
+#include <asm/book3s/64/hugetlb.h>
/*
* This should work for other subarchs too. But right now we use the
* new format only for 64bit book3s
@@ -51,12 +51,20 @@ static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma,
static inline pte_t *hugepd_page(hugepd_t hpd)
{
BUG_ON(!hugepd_ok(hpd));
+#ifdef CONFIG_PPC_8xx
+ return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
+#else
return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+#endif
}
static inline unsigned int hugepd_shift(hugepd_t hpd)
{
+#ifdef CONFIG_PPC_8xx
+ return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17;
+#else
return hpd.pd & HUGEPD_SHIFT_MASK;
+#endif
}
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -99,7 +107,15 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
pte_t pte);
+#ifdef CONFIG_PPC_8xx
+static inline void flush_hugetlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ flush_tlb_page(vma, vmaddr);
+}
+#else
void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+#endif
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor,
@@ -205,7 +221,8 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
* are reserved early in the boot process by memblock instead of via
* the .dts as on IBM platforms.
*/
-#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
+#if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \
+ defined(CONFIG_PPC_8xx))
extern void __init reserve_hugetlb_gpages(void);
#else
static inline void reserve_hugetlb_gpages(void)
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 708edebcf147..77ff1ba99d1f 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -275,7 +275,9 @@
#define H_COP 0x304
#define H_GET_MPP_X 0x314
#define H_SET_MODE 0x31C
-#define MAX_HCALL_OPCODE H_SET_MODE
+#define H_CLEAR_HPT 0x358
+#define H_SIGNAL_SYS_RESET 0x380
+#define MAX_HCALL_OPCODE H_SIGNAL_SYS_RESET
/* H_VIOCTL functions */
#define H_GET_VIOA_DUMP_SIZE 0x01
@@ -306,6 +308,11 @@
#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3
#define H_SET_MODE_RESOURCE_LE 4
+/* Values for argument to H_SIGNAL_SYS_RESET */
+#define H_SIGNAL_SYS_RESET_ALL -1
+#define H_SIGNAL_SYS_RESET_ALL_OTHERS -2
+/* >= 0 values are CPU number */
+
#ifndef __ASSEMBLY__
/**
@@ -412,27 +419,6 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc)
}
}
-#ifdef CONFIG_PPC_PSERIES
-extern int CMO_PrPSP;
-extern int CMO_SecPSP;
-extern unsigned long CMO_PageSize;
-
-static inline int cmo_get_primary_psp(void)
-{
- return CMO_PrPSP;
-}
-
-static inline int cmo_get_secondary_psp(void)
-{
- return CMO_SecPSP;
-}
-
-static inline unsigned long cmo_get_page_size(void)
-{
- return CMO_PageSize;
-}
-#endif /* CONFIG_PPC_PSERIES */
-
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index f6fda8482f60..5ed292431b5b 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -33,6 +33,7 @@ extern struct pci_dev *isa_bridge_pcidev;
#include <asm/synch.h>
#include <asm/delay.h>
#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
#include <asm-generic/iomap.h>
@@ -458,13 +459,10 @@ static inline unsigned int name(unsigned int port) \
"5: li %0,-1\n" \
" b 4b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 2\n" \
- " .long 0b,5b\n" \
- " .long 1b,5b\n" \
- " .long 2b,5b\n" \
- " .long 3b,5b\n" \
- ".previous" \
+ EX_TABLE(0b, 5b) \
+ EX_TABLE(1b, 5b) \
+ EX_TABLE(2b, 5b) \
+ EX_TABLE(3b, 5b) \
: "=&r" (x) \
: "r" (port + _IO_BASE) \
: "memory"); \
@@ -479,11 +477,8 @@ static inline void name(unsigned int val, unsigned int port) \
"0:" op " %0,0,%1\n" \
"1: sync\n" \
"2:\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 2\n" \
- " .long 0b,2b\n" \
- " .long 1b,2b\n" \
- ".previous" \
+ EX_TABLE(0b, 2b) \
+ EX_TABLE(1b, 2b) \
: : "r" (val), "r" (port + _IO_BASE) \
: "memory"); \
}
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index a46f5f45570c..6c3b71502fbc 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -53,7 +53,7 @@
typedef void (*crash_shutdown_t)(void);
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
/*
* This function is responsible for capturing register states if coming
@@ -91,7 +91,17 @@ static inline bool kdump_in_progress(void)
return crashing_cpu >= 0;
}
-#else /* !CONFIG_KEXEC */
+#ifdef CONFIG_KEXEC_FILE
+extern struct kexec_file_ops kexec_elf64_ops;
+
+int setup_purgatory(struct kimage *image, const void *slave_code,
+ const void *fdt, unsigned long kernel_load_addr,
+ unsigned long fdt_load_addr);
+int setup_new_fdt(void *fdt, unsigned long initrd_load_addr,
+ unsigned long initrd_len, const char *cmdline);
+#endif /* CONFIG_KEXEC_FILE */
+
+#else /* !CONFIG_KEXEC_CORE */
static inline void crash_kexec_secondary(struct pt_regs *regs) { }
static inline int overlaps_crashkernel(unsigned long start, unsigned long size)
@@ -116,7 +126,7 @@ static inline bool kdump_in_progress(void)
return false;
}
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
#endif /* ! __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KEXEC_H */
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index 2c9759bdb63b..97b8c1f83453 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -32,6 +32,7 @@
#include <asm/probes.h>
#include <asm/code-patching.h>
+#ifdef CONFIG_KPROBES
#define __ARCH_WANT_KPROBES_INSN_SLOT
struct pt_regs;
@@ -127,5 +128,11 @@ struct kprobe_ctlblk {
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+extern int kprobe_handler(struct pt_regs *regs);
+extern int kprobe_post_handler(struct pt_regs *regs);
+#else
+static inline int kprobe_handler(struct pt_regs *regs) { return 0; }
+static inline int kprobe_post_handler(struct pt_regs *regs) { return 0; }
+#endif /* CONFIG_KPROBES */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KPROBES_H */
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index e02cbc6a6c70..5011b69107a7 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -183,7 +183,7 @@ struct machdep_calls {
*/
void (*machine_shutdown)(void);
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
void (*kexec_cpu_down)(int crash_shutdown, int secondary);
/* Called to do what every setup is needed on image and the
@@ -198,7 +198,7 @@ struct machdep_calls {
* no return.
*/
void (*machine_kexec)(struct kimage *image);
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
#ifdef CONFIG_SUSPEND
/* These are called to disable and enable, respectively, IRQs when
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 3e0e4927811c..798b5bf91427 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -172,6 +172,41 @@ typedef struct {
#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
+
+/* Page size definitions, common between 32 and 64-bit
+ *
+ * shift : is the "PAGE_SHIFT" value for that page size
+ * penc : is the pte encoding mask
+ *
+ */
+struct mmu_psize_def {
+ unsigned int shift; /* number of bits */
+ unsigned int enc; /* PTE encoding */
+ unsigned int ind; /* Corresponding indirect page size shift */
+ unsigned int flags;
+#define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */
+#define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */
+};
+
+extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
+static inline int shift_to_mmu_psize(unsigned int shift)
+{
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
+ if (mmu_psize_defs[psize].shift == shift)
+ return psize;
+ return -1;
+}
+
+static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
+{
+ if (mmu_psize_defs[mmu_psize].shift)
+ return mmu_psize_defs[mmu_psize].shift;
+ BUG();
+}
+
#endif /* !__ASSEMBLY__ */
#if defined(CONFIG_PPC_4K_PAGES)
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 8d1499334257..a34c764ca8dd 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -275,19 +275,20 @@ static inline bool early_radix_enabled(void)
#define MMU_PAGE_64K 2
#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
#define MMU_PAGE_256K 4
-#define MMU_PAGE_1M 5
-#define MMU_PAGE_2M 6
-#define MMU_PAGE_4M 7
-#define MMU_PAGE_8M 8
-#define MMU_PAGE_16M 9
-#define MMU_PAGE_64M 10
-#define MMU_PAGE_256M 11
-#define MMU_PAGE_1G 12
-#define MMU_PAGE_16G 13
-#define MMU_PAGE_64G 14
+#define MMU_PAGE_512K 5
+#define MMU_PAGE_1M 6
+#define MMU_PAGE_2M 7
+#define MMU_PAGE_4M 8
+#define MMU_PAGE_8M 9
+#define MMU_PAGE_16M 10
+#define MMU_PAGE_64M 11
+#define MMU_PAGE_256M 12
+#define MMU_PAGE_1G 13
+#define MMU_PAGE_16G 14
+#define MMU_PAGE_64G 15
/* N.B. we need to change the type of hpte_page_sizes if this gets to be > 16 */
-#define MMU_PAGE_COUNT 15
+#define MMU_PAGE_COUNT 16
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/mmu.h>
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 5c451140660a..b9e3f0aca261 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -19,16 +19,18 @@ extern void destroy_context(struct mm_struct *mm);
struct mm_iommu_table_group_mem_t;
extern int isolate_lru_page(struct page *page); /* from internal.h */
-extern bool mm_iommu_preregistered(void);
-extern long mm_iommu_get(unsigned long ua, unsigned long entries,
+extern bool mm_iommu_preregistered(struct mm_struct *mm);
+extern long mm_iommu_get(struct mm_struct *mm,
+ unsigned long ua, unsigned long entries,
struct mm_iommu_table_group_mem_t **pmem);
-extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem);
-extern void mm_iommu_init(mm_context_t *ctx);
-extern void mm_iommu_cleanup(mm_context_t *ctx);
-extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
- unsigned long size);
-extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
- unsigned long entries);
+extern long mm_iommu_put(struct mm_struct *mm,
+ struct mm_iommu_table_group_mem_t *mem);
+extern void mm_iommu_init(struct mm_struct *mm);
+extern void mm_iommu_cleanup(struct mm_struct *mm);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
+ unsigned long ua, unsigned long size);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
+ unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index cd4ffd86765f..cc12c61ef315 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -90,10 +90,6 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
}
#endif
-struct exception_table_entry;
-void sort_ex_table(struct exception_table_entry *start,
- struct exception_table_entry *finish);
-
#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
#define ARCH_RELOCATES_KCRCTAB
#define reloc_start PHYSICAL_START
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h
index 76d6b9e0c8a9..633139291a48 100644
--- a/arch/powerpc/include/asm/nohash/32/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h
@@ -2,14 +2,42 @@
#define _ASM_POWERPC_PGALLOC_32_H
#include <linux/threads.h>
+#include <linux/slab.h>
-/* For 32-bit, all levels of page tables are just drawn from get_free_page() */
-#define MAX_PGTABLE_INDEX_SIZE 0
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation. For PTE pages (which are linked to a struct
+ * page for now, and drawn from the main get_free_pages() pool), the
+ * allocation size will be (2^index_size * sizeof(pointer)) and
+ * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer. In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value. This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE 0xf
extern void __bad_pte(pmd_t *pmd);
-extern pgd_t *pgd_alloc(struct mm_struct *mm);
-extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+extern struct kmem_cache *pgtable_cache[];
+#define PGT_CACHE(shift) ({ \
+ BUG_ON(!(shift)); \
+ pgtable_cache[(shift) - 1]; \
+ })
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
/*
* We don't have any real pmd's, and this code never triggers because
@@ -68,8 +96,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
static inline void pgtable_free(void *table, unsigned index_size)
{
- BUG_ON(index_size); /* 32-bit doesn't use this */
- free_page((unsigned long)table);
+ if (!index_size) {
+ free_page((unsigned long)table);
+ } else {
+ BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(index_size), table);
+ }
}
#define check_pgt_cache() do { } while (0)
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index c219ef7be53b..ba9921bf202e 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -16,6 +16,23 @@ extern int icache_44x_need_flush;
#endif /* __ASSEMBLY__ */
+#define PTE_INDEX_SIZE PTE_SHIFT
+#define PMD_INDEX_SIZE 0
+#define PUD_INDEX_SIZE 0
+#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
+
+#define PMD_CACHE_INDEX PMD_INDEX_SIZE
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE 0
+#define PUD_TABLE_SIZE 0
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif /* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+
/*
* The normal case is that PTEs are 32-bits and we have a 1-page
* 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
@@ -27,22 +44,12 @@ extern int icache_44x_need_flush;
* -Matt
*/
/* PGDIR_SHIFT determines what a top-level page table entry can map */
-#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
+#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-/*
- * entries per page directory level: our page-table tree is two-level, so
- * we don't really have any PMD directory.
- */
-#ifndef __ASSEMBLY__
-#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
-#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
-#endif /* __ASSEMBLY__ */
-
-#define PTRS_PER_PTE (1 << PTE_SHIFT)
-#define PTRS_PER_PMD 1
-#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS 0
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0UL
@@ -268,7 +275,8 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline void __ptep_set_access_flags(struct mm_struct *mm,
- pte_t *ptep, pte_t entry)
+ pte_t *ptep, pte_t entry,
+ unsigned long address)
{
unsigned long set = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
@@ -328,15 +336,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
-#ifndef CONFIG_PPC_4K_PAGES
-void pgtable_cache_init(void);
-#else
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-#endif
-
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
pmd_t **pmdp);
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index 3742b1919661..b4df2734c078 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -49,6 +49,7 @@
#define _PMD_BAD 0x0ff0
#define _PMD_PAGE_MASK 0x000c
#define _PMD_PAGE_8M 0x000c
+#define _PMD_PAGE_512K 0x0004
/* Until my rework is finished, 8xx still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
index fc7d51753f81..d0db98793dd8 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
@@ -27,9 +27,6 @@
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT PMD_SHIFT
-
/* PUD_SHIFT determines what a third-level page table entry can map */
#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
#define PUD_SIZE (1UL << PUD_SHIFT)
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
index 908324574f77..55b28ef3409a 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
@@ -31,9 +31,6 @@
#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
-/* With 4k base page size, hugepage PTEs go at the PMD level */
-#define MIN_HUGEPTE_SHIFT PAGE_SHIFT
-
/* PMD_SHIFT determines what a second-level page table entry can map */
#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
#define PMD_SIZE (1UL << PMD_SHIFT)
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 653a1838469d..c7f927e67d14 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -26,15 +26,11 @@
#else
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
#endif
+
/*
* Define the address range of the kernel non-linear virtual area
*/
-
-#ifdef CONFIG_PPC_BOOK3E
#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
-#else
-#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
-#endif
#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
/*
@@ -43,11 +39,7 @@
* (we keep a quarter for the virtual memmap)
*/
#define VMALLOC_START KERN_VIRT_START
-#ifdef CONFIG_PPC_BOOK3E
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2)
-#else
-#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
-#endif
#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
/*
@@ -85,12 +77,8 @@
* Defines the address of the vmemap area, in its own region on
* hash table CPUs and after the vmalloc space on Book3E
*/
-#ifdef CONFIG_PPC_BOOK3E
#define VMEMMAP_BASE VMALLOC_END
#define VMEMMAP_END KERN_IO_START
-#else
-#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
-#endif
#define vmemmap ((struct page *)VMEMMAP_BASE)
@@ -301,7 +289,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
* function doesn't need to flush the hash entry
*/
static inline void __ptep_set_access_flags(struct mm_struct *mm,
- pte_t *ptep, pte_t entry)
+ pte_t *ptep, pte_t entry,
+ unsigned long address)
{
unsigned long bits = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
@@ -358,8 +347,6 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
#define __swp_entry_to_pte(x) __pte((x).val)
-void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
-void pgtable_cache_init(void);
extern int map_kernel_page(unsigned long ea, unsigned long pa,
unsigned long flags);
extern int __meminit vmemmap_create_mapping(unsigned long start,
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 1263c22d60d8..172849727054 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -226,7 +226,11 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#ifdef CONFIG_HUGETLB_PAGE
static inline int hugepd_ok(hugepd_t hpd)
{
+#ifdef CONFIG_PPC_8xx
+ return ((hpd.pd & 0x4) != 0);
+#else
return (hpd.pd > 0);
+#endif
}
static inline int pmd_huge(pmd_t pmd)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 9bd87f269d6d..dd01212935ac 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -78,6 +78,8 @@ static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
unsigned long vmalloc_to_phys(void *vmalloc_addr);
+void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
+void pgtable_cache_init(void);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 1b394247afc2..0bcc75e295e3 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -93,38 +93,6 @@ static inline long register_dtl(unsigned long cpu, unsigned long vpa)
return vpa_call(H_VPA_REG_DTL, cpu, vpa);
}
-static inline long plpar_page_set_loaned(unsigned long vpa)
-{
- unsigned long cmo_page_sz = cmo_get_page_size();
- long rc = 0;
- int i;
-
- for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
- rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
-
- for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
- plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
- vpa + i - cmo_page_sz, 0);
-
- return rc;
-}
-
-static inline long plpar_page_set_active(unsigned long vpa)
-{
- unsigned long cmo_page_sz = cmo_get_page_size();
- long rc = 0;
- int i;
-
- for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
- rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
-
- for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
- plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
- vpa + i - cmo_page_sz, 0);
-
- return rc;
-}
-
extern void vpa_init(int cpu);
static inline long plpar_pte_enter(unsigned long flags,
@@ -340,4 +308,9 @@ static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawr
return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0);
}
+static inline long plapr_signal_sys_reset(long cpu)
+{
+ return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
+}
+
#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 0f73de069f19..726288048652 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -53,7 +53,7 @@ void eeh_addr_cache_rmv_dev(struct pci_dev *dev);
struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr);
void eeh_slot_error_detail(struct eeh_pe *pe, int severity);
int eeh_pci_enable(struct eeh_pe *pe, int function);
-int eeh_reset_pe(struct eeh_pe *);
+int eeh_pe_reset_full(struct eeh_pe *pe);
void eeh_save_bars(struct eeh_dev *edev);
int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index c73750b0d9fa..025833b8df9f 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -10,9 +10,7 @@
#include <asm/ppc-opcode.h>
#include <asm/firmware.h>
-#ifndef __ASSEMBLY__
-#error __FILE__ should only be used in assembler files
-#else
+#ifdef __ASSEMBLY__
#define SZL (BITS_PER_LONG/8)
@@ -265,10 +263,14 @@ n:
* latter is for those that incdentially must be excluded from probing
* and allows them to be linked at more optimal location within text.
*/
+#ifdef CONFIG_KPROBES
#define _ASM_NOKPROBE_SYMBOL(entry) \
.pushsection "_kprobe_blacklist","aw"; \
PPC_LONG (entry) ; \
.popsection
+#else
+#define _ASM_NOKPROBE_SYMBOL(entry)
+#endif
#define FUNC_START(name) _GLOBAL(name)
#define FUNC_END(name)
@@ -779,5 +781,17 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
.long 0xa6037b7d; /* mtsrr1 r11 */ \
.long 0x2400004c /* rfid */
#endif /* !CONFIG_PPC_BOOK3E */
+
#endif /* __ASSEMBLY__ */
+
+/*
+ * Helper macro for exception table entries
+ */
+#define EX_TABLE(_fault, _target) \
+ stringify_in_c(.section __ex_table,"a";)\
+ stringify_in_c(.balign 4;) \
+ stringify_in_c(.long (_fault) - . ;) \
+ stringify_in_c(.long (_target) - . ;) \
+ stringify_in_c(.previous)
+
#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index dac83fcb9445..1ba814436c73 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -312,8 +312,6 @@ struct thread_struct {
unsigned long mmcr2;
unsigned mmcr0;
unsigned used_ebb;
- unsigned long lmrr;
- unsigned long lmser;
#endif
};
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 7f436ba1b56f..5e57705b4759 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -159,11 +159,5 @@ struct of_drconf_cell {
/* Option Vector 6: IBM PAPR hints */
#define OV6_LINUX 0x02 /* Linux is our OS */
-/*
- * The architecture vector has an array of PVR mask/value pairs,
- * followed by # option vectors - 1, followed by the option vectors.
- */
-extern unsigned char ibm_architecture_vec[];
-
#endif /* __KERNEL__ */
#endif /* _POWERPC_PROM_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 04aa1ee8cdb6..0d4531aa2052 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -295,8 +295,6 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
-#define SPRN_LMRR 0x32D /* Load Monitor Region Register */
-#define SPRN_LMSER 0x32E /* Load Monitor Section Enable Register */
#define SPRN_ASDR 0x330 /* Access segment descriptor register */
#define SPRN_IC 0x350 /* Virtual Instruction Count */
#define SPRN_VTB 0x351 /* Virtual Time Base */
@@ -308,7 +306,6 @@
#define SPRN_PMCR 0x374 /* Power Management Control Register */
/* HFSCR and FSCR bit numbers are the same */
-#define FSCR_LM_LG 11 /* Enable Load Monitor Registers */
#define FSCR_MSGP_LG 10 /* Enable MSGP */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
@@ -319,12 +316,10 @@
#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
-#define FSCR_LM __MASK(FSCR_LM_LG)
#define FSCR_TAR __MASK(FSCR_TAR_LG)
#define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
-#define HFSCR_LM __MASK(FSCR_LM_LG)
#define HFSCR_MSGP __MASK(FSCR_MSGP_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG)
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
index 0197e12f7d48..1f1636124a04 100644
--- a/arch/powerpc/include/asm/reg_8xx.h
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -4,7 +4,7 @@
#ifndef _ASM_POWERPC_REG_8xx_H
#define _ASM_POWERPC_REG_8xx_H
-#include <asm/mmu-8xx.h>
+#include <asm/mmu.h>
/* Cache control on the MPC8xx is provided through some additional
* special purpose registers.
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 0d02c11dc331..32db16d2e7ad 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -176,7 +176,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
#endif /* !CONFIG_SMP */
#endif /* !CONFIG_PPC64 */
-#if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC))
+#if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE))
extern void smp_release_cpus(void);
#else
static inline void smp_release_cpus(void) { };
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
new file mode 100644
index 000000000000..6720190eabec
--- /dev/null
+++ b/arch/powerpc/include/asm/stackprotector.h
@@ -0,0 +1,40 @@
+/*
+ * GCC stack protector support.
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on PPC. This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H
+
+#include <linux/random.h>
+#include <linux/version.h>
+#include <asm/reg.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ get_random_bytes(&canary, sizeof(canary));
+ canary ^= mftb();
+ canary ^= LINUX_VERSION_CODE;
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 2fc5d4db503c..4b369d83fe9c 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -386,3 +386,4 @@ SYSCALL(mlock2)
SYSCALL(copy_file_range)
COMPAT_SYS_SPU(preadv2)
COMPAT_SYS_SPU(pwritev2)
+SYSCALL(kexec_file_load)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index c266227fdd5b..a15d84d59356 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -7,6 +7,7 @@
#include <linux/sched.h>
#include <linux/errno.h>
#include <asm/asm-compat.h>
+#include <asm/ppc_asm.h>
#include <asm/processor.h>
#include <asm/page.h>
@@ -63,23 +64,30 @@
__access_ok((__force unsigned long)(addr), (size), get_fs()))
/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
+ * The exception table consists of pairs of relative addresses: the first is
+ * the address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
+ * modified, so it is entirely up to the continuation code to figure out what
+ * to do.
*
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
+ * All the routines below use bits of fixup code that are out of line with the
+ * main instruction path. This means when everything is well, we don't even
+ * have to jump over them. Further, they do not intrude on our cache or tlb
+ * entries.
*/
+#define ARCH_HAS_RELATIVE_EXTABLE
+
struct exception_table_entry {
- unsigned long insn;
- unsigned long fixup;
+ int insn;
+ int fixup;
};
+static inline unsigned long extable_fixup(const struct exception_table_entry *x)
+{
+ return (unsigned long)&x->fixup + x->fixup;
+}
+
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
@@ -132,10 +140,7 @@ extern long __put_user_bad(void);
"3: li %0,%3\n" \
" b 2b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n" \
- PPC_LONG_ALIGN "\n" \
- PPC_LONG "1b,3b\n" \
- ".previous" \
+ EX_TABLE(1b, 3b) \
: "=r" (err) \
: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
@@ -152,11 +157,8 @@ extern long __put_user_bad(void);
"4: li %0,%3\n" \
" b 3b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n" \
- PPC_LONG_ALIGN "\n" \
- PPC_LONG "1b,4b\n" \
- PPC_LONG "2b,4b\n" \
- ".previous" \
+ EX_TABLE(1b, 4b) \
+ EX_TABLE(2b, 4b) \
: "=r" (err) \
: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
#endif /* __powerpc64__ */
@@ -215,10 +217,7 @@ extern long __get_user_bad(void);
" li %1,0\n" \
" b 2b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n" \
- PPC_LONG_ALIGN "\n" \
- PPC_LONG "1b,3b\n" \
- ".previous" \
+ EX_TABLE(1b, 3b) \
: "=r" (err), "=r" (x) \
: "b" (addr), "i" (-EFAULT), "0" (err))
@@ -237,11 +236,8 @@ extern long __get_user_bad(void);
" li %1+1,0\n" \
" b 3b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n" \
- PPC_LONG_ALIGN "\n" \
- PPC_LONG "1b,4b\n" \
- PPC_LONG "2b,4b\n" \
- ".previous" \
+ EX_TABLE(1b, 4b) \
+ EX_TABLE(2b, 4b) \
: "=r" (err), "=&r" (x) \
: "b" (addr), "i" (-EFAULT), "0" (err))
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index e8cdfec8d512..eb1acee91a20 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 382
+#define NR_syscalls 383
#define __NR__exit __NR_exit
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index 4afe66aa1400..f3f4710d4ff5 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <asm/asm-compat.h>
+#include <asm/ppc_asm.h>
#ifdef __BIG_ENDIAN__
@@ -193,10 +194,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
#endif
"b 2b\n"
".previous\n"
- ".section __ex_table,\"a\"\n\t"
- PPC_LONG_ALIGN "\n\t"
- PPC_LONG "1b,3b\n"
- ".previous"
+ EX_TABLE(1b, 3b)
: [tmp] "=&b" (tmp), [offset] "=&r" (offset), [ret] "=&r" (ret)
: [addr] "b" (addr), "m" (*(unsigned long *)addr));
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index e9f5f41aa55a..2f26335a3c42 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -392,5 +392,6 @@
#define __NR_copy_file_range 379
#define __NR_preadv2 380
#define __NR_pwritev2 381
+#define __NR_kexec_file_load 382
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 1925341dbb9c..a3a6047fd395 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -19,6 +19,10 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+# -fstack-protector triggers protection checks in this code,
+# but it is being used too early to link to meaningful stack_chk logic.
+CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
+
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
@@ -58,8 +62,6 @@ obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y)
obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
-obj-$(CONFIG_IBMVIO) += vio.o
-obj-$(CONFIG_IBMEBUS) += ibmebus.o
obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
eeh_driver.o eeh_event.o eeh_sysfs.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
@@ -107,8 +109,9 @@ pci64-$(CONFIG_PPC64) += pci_dn.o pci-hotplug.o isa-bridge.o
obj-$(CONFIG_PCI) += pci_$(BITS).o $(pci64-y) \
pci-common.o pci_of_scan.o
obj-$(CONFIG_PCI_MSI) += msi.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \
+obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o crash.o \
machine_kexec_$(BITS).o
+obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file_$(BITS).o kexec_elf_$(BITS).o
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
@@ -128,7 +131,7 @@ obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM) += tm.o
obj-$(CONFIG_PPC64) += $(obj64-y)
obj-$(CONFIG_PPC32) += $(obj32-y)
-ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),)
+ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC_CORE),)
obj-y += ppc_save_regs.o
endif
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 195a9fc8f81c..0601e6a7297c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -91,6 +91,9 @@ int main(void)
DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
#endif
+#ifdef CONFIG_CC_STACKPROTECTOR
+ DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
+#endif
DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index f3e1f5d29dce..917188615bf5 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -96,6 +96,7 @@ _GLOBAL(__setup_cpu_power9)
mtlr r11
beqlr
li r0,0
+ mtspr SPRN_PSSCR,r0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
@@ -116,6 +117,7 @@ _GLOBAL(__restore_cpu_power9)
mtlr r11
beqlr
li r0,0
+ mtspr SPRN_PSSCR,r0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index f25731627d7f..8180bfd7ab93 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -372,7 +372,7 @@ static int eeh_phb_check_failure(struct eeh_pe *pe)
/* Find the PHB PE */
phb_pe = eeh_phb_pe_get(pe->phb);
if (!phb_pe) {
- pr_warn("%s Can't find PE for PHB#%d\n",
+ pr_warn("%s Can't find PE for PHB#%x\n",
__func__, pe->phb->global_number);
return -EEXIST;
}
@@ -664,7 +664,7 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
rc = eeh_ops->set_option(pe, function);
if (rc)
pr_warn("%s: Unexpected state change %d on "
- "PHB#%d-PE#%x, err=%d\n",
+ "PHB#%x-PE#%x, err=%d\n",
__func__, function, pe->phb->global_number,
pe->addr, rc);
@@ -808,76 +808,67 @@ static void *eeh_set_dev_freset(void *data, void *flag)
}
/**
- * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second
+ * eeh_pe_reset_full - Complete a full reset process on the indicated PE
* @pe: EEH PE
*
- * Assert the PCI #RST line for 1/4 second.
+ * This function executes a full reset procedure on a PE, including setting
+ * the appropriate flags, performing a fundamental or hot reset, and then
+ * deactivating the reset status. It is designed to be used within the EEH
+ * subsystem, as opposed to eeh_pe_reset which is exported to drivers and
+ * only performs a single operation at a time.
+ *
+ * This function will attempt to reset a PE three times before failing.
*/
-static void eeh_reset_pe_once(struct eeh_pe *pe)
+int eeh_pe_reset_full(struct eeh_pe *pe)
{
+ int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
+ int reset_state = (EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
+ int type = EEH_RESET_HOT;
unsigned int freset = 0;
+ int i, state, ret;
- /* Determine type of EEH reset required for
- * Partitionable Endpoint, a hot-reset (1)
- * or a fundamental reset (3).
- * A fundamental reset required by any device under
- * Partitionable Endpoint trumps hot-reset.
+ /*
+ * Determine the type of reset to perform - hot or fundamental.
+ * Hot reset is the default operation, unless any device under the
+ * PE requires a fundamental reset.
*/
eeh_pe_dev_traverse(pe, eeh_set_dev_freset, &freset);
if (freset)
- eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
- else
- eeh_ops->reset(pe, EEH_RESET_HOT);
-
- eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
-}
-
-/**
- * eeh_reset_pe - Reset the indicated PE
- * @pe: EEH PE
- *
- * This routine should be called to reset indicated device, including
- * PE. A PE might include multiple PCI devices and sometimes PCI bridges
- * might be involved as well.
- */
-int eeh_reset_pe(struct eeh_pe *pe)
-{
- int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
- int i, state, ret;
+ type = EEH_RESET_FUNDAMENTAL;
- /* Mark as reset and block config space */
- eeh_pe_state_mark(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
+ /* Mark the PE as in reset state and block config space accesses */
+ eeh_pe_state_mark(pe, reset_state);
- /* Take three shots at resetting the bus */
+ /* Make three attempts at resetting the bus */
for (i = 0; i < 3; i++) {
- eeh_reset_pe_once(pe);
+ ret = eeh_pe_reset(pe, type);
+ if (ret)
+ break;
- /*
- * EEH_PE_ISOLATED is expected to be removed after
- * BAR restore.
- */
+ ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE);
+ if (ret)
+ break;
+
+ /* Wait until the PE is in a functioning state */
state = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
- if ((state & flags) == flags) {
- ret = 0;
- goto out;
- }
+ if ((state & active_flags) == active_flags)
+ break;
if (state < 0) {
- pr_warn("%s: Unrecoverable slot failure on PHB#%d-PE#%x",
+ pr_warn("%s: Unrecoverable slot failure on PHB#%x-PE#%x",
__func__, pe->phb->global_number, pe->addr);
ret = -ENOTRECOVERABLE;
- goto out;
+ break;
}
- /* We might run out of credits */
+ /* Set error in case this is our last attempt */
ret = -EIO;
pr_warn("%s: Failure %d resetting PHB#%x-PE#%x\n (%d)\n",
__func__, state, pe->phb->global_number, pe->addr, (i + 1));
}
-out:
- eeh_pe_state_clear(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
+ eeh_pe_state_clear(pe, reset_state);
return ret;
}
@@ -1601,6 +1592,7 @@ static int eeh_pe_reenable_devices(struct eeh_pe *pe)
return eeh_unfreeze_pe(pe, true);
}
+
/**
* eeh_pe_reset - Issue PE reset according to specified type
* @pe: EEH PE
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 5c31369435f2..d88573bdd090 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -588,7 +588,7 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe)
eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
/* Issue reset */
- ret = eeh_reset_pe(pe);
+ ret = eeh_pe_reset_full(pe);
if (ret) {
eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
return ret;
@@ -659,7 +659,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
* config accesses. So we prefer to block them. However, controlled
* PCI config accesses initiated from EEH itself are allowed.
*/
- rc = eeh_reset_pe(pe);
+ rc = eeh_pe_reset_full(pe);
if (rc)
return rc;
@@ -734,7 +734,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
frozen_bus = eeh_pe_bus_get(pe);
if (!frozen_bus) {
- pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
+ pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
__func__, pe->phb->global_number, pe->addr);
return;
}
@@ -878,7 +878,7 @@ excess_failures:
* are due to poorly seated PCI cards. Only 10% or so are
* due to actual, failed cards.
*/
- pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
+ pr_err("EEH: PHB#%x-PE#%x has failed %d times in the\n"
"last hour and has been permanently disabled.\n"
"Please try reseating or replacing it.\n",
pe->phb->global_number, pe->addr,
@@ -886,7 +886,7 @@ excess_failures:
goto perm_error;
hard_fail:
- pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
+ pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
"Please try reseating or replacing it\n",
pe->phb->global_number, pe->addr);
@@ -1000,7 +1000,7 @@ static void eeh_handle_special_event(void)
bus = eeh_pe_bus_get(phb_pe);
if (!bus) {
pr_err("%s: Cannot find PCI bus for "
- "PHB#%d-PE#%x\n",
+ "PHB#%x-PE#%x\n",
__func__,
pe->phb->global_number,
pe->addr);
diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c
index 82e7327e3cd0..accbf8b5fd46 100644
--- a/arch/powerpc/kernel/eeh_event.c
+++ b/arch/powerpc/kernel/eeh_event.c
@@ -75,11 +75,11 @@ static int eeh_event_handler(void * dummy)
if (pe) {
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
if (pe->type & EEH_PE_PHB)
- pr_info("EEH: Detected error on PHB#%d\n",
+ pr_info("EEH: Detected error on PHB#%x\n",
pe->phb->global_number);
else
pr_info("EEH: Detected PCI bus error on "
- "PHB#%d-PE#%x\n",
+ "PHB#%x-PE#%x\n",
pe->phb->global_number, pe->addr);
eeh_handle_event(pe);
eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index de7d091c4c31..cc4b206f77e4 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -104,7 +104,7 @@ int eeh_phb_pe_create(struct pci_controller *phb)
/* Put it into the list */
list_add_tail(&pe->child, &eeh_phb_pe);
- pr_debug("EEH: Add PE for PHB#%d\n", phb->global_number);
+ pr_debug("EEH: Add PE for PHB#%x\n", phb->global_number);
return 0;
}
@@ -333,7 +333,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
/* Check if the PE number is valid */
if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) {
- pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%d\n",
+ pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%x\n",
__func__, edev->config_addr, edev->phb->global_number);
return -EINVAL;
}
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 3841d749a430..5742dbdbee46 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -674,7 +674,11 @@ BEGIN_FTR_SECTION
mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
END_FTR_SECTION_IFSET(CPU_FTR_SPE)
#endif /* CONFIG_SPE */
-
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ lwz r0,TSK_STACK_CANARY(r2)
+ lis r4,__stack_chk_guard@ha
+ stw r0,__stack_chk_guard@l(r4)
+#endif
lwz r0,_CCR(r1)
mtcrf 0xFF,r0
/* r3-r12 are destroyed -- Cort */
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 38a1f96430e1..45b453e4d0c8 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -923,10 +923,10 @@ kernel_dbg_exc:
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x340)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl .save_nvgprs
+ bl save_nvgprs
INTS_RESTORE_HARD
- bl .unknown_exception
- b .ret_from_except
+ bl unknown_exception
+ b ret_from_except
/*
* An interrupt came in while soft-disabled; We mark paca->irq_happened
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 1ba82ea90230..d39d6118c6e9 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1408,7 +1408,7 @@ USE_TEXT_SECTION()
/*
* Hash table stuff
*/
- .align 7
+ .balign IFETCH_ALIGN_BYTES
do_hash_page:
#ifdef CONFIG_PPC_STD_MMU_64
andis. r0,r4,0xa410 /* weird error? */
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index a95639b8d4ac..5c9f50c1aa99 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -47,13 +47,11 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
unsigned int replaced;
/*
- * Note: Due to modules and __init, code can
- * disappear and change, we need to protect against faulting
- * as well as code changing. We do this by using the
- * probe_kernel_* functions.
- *
- * No real locking needed, this code is run through
- * kstop_machine, or before SMP starts.
+ * Note:
+ * We are paranoid about modifying text, as if a bug was to happen, it
+ * could cause us to read or write to someplace that could cause harm.
+ * Carefully read and modify the code with probe_kernel_*(), and make
+ * sure what we read is what we expected it to be before modifying it.
*/
/* read the text we want to modify */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 04c546e20cc0..1dc5eae2ced3 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -107,12 +107,19 @@ __secondary_hold_acknowledge:
* crash_kernel region. The loader is responsible for
* observing the alignment requirement.
*/
+
+#ifdef CONFIG_RELOCATABLE_TEST
+#define RUN_AT_LOAD_DEFAULT 1 /* Test relocation, do not copy to 0 */
+#else
+#define RUN_AT_LOAD_DEFAULT 0x72756e30 /* "run0" -- relocate to 0 by default */
+#endif
+
/* Do not move this variable as kexec-tools knows about it. */
. = 0x5c
.globl __run_at_load
__run_at_load:
DEFINE_FIXED_SYMBOL(__run_at_load)
- .long 0x72756e30 /* "run0" -- relocate to 0 by default */
+ .long RUN_AT_LOAD_DEFAULT
#endif
. = 0x60
@@ -153,7 +160,7 @@ __secondary_hold:
cmpdi 0,r12,0
beq 100b
-#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
#ifdef CONFIG_PPC_BOOK3E
tovirt(r12,r12)
#endif
@@ -214,9 +221,9 @@ booting_thread_hwid:
*/
_GLOBAL(book3e_start_thread)
LOAD_REG_IMMEDIATE(r5, MSR_KERNEL)
- cmpi 0, r3, 0
+ cmpwi r3, 0
beq 10f
- cmpi 0, r3, 1
+ cmpwi r3, 1
beq 11f
/* If the thread id is invalid, just exit. */
b 13f
@@ -241,9 +248,9 @@ _GLOBAL(book3e_start_thread)
* r3 = the thread physical id
*/
_GLOBAL(book3e_stop_thread)
- cmpi 0, r3, 0
+ cmpwi r3, 0
beq 10f
- cmpi 0, r3, 1
+ cmpwi r3, 1
beq 10f
/* If the thread id is invalid, just exit. */
b 13f
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index fb133a163263..1a9c99d3e5d8 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -73,6 +73,9 @@
#define RPN_PATTERN 0x00f0
#endif
+#define PAGE_SHIFT_512K 19
+#define PAGE_SHIFT_8M 23
+
__HEAD
_ENTRY(_stext);
_ENTRY(_start);
@@ -322,7 +325,7 @@ SystemCall:
#endif
InstructionTLBMiss:
-#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
+#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
mtspr SPRN_SPRG_SCRATCH2, r3
#endif
EXCEPTION_PROLOG_0
@@ -332,10 +335,12 @@ InstructionTLBMiss:
*/
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
-#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
/* Only modules will cause ITLB Misses as we always
* pin the first 8MB of kernel memory */
+#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
mfcr r3
+#endif
+#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
IS_KERNEL(r11, r10)
#endif
mfspr r11, SPRN_M_TW /* Get level 1 table */
@@ -343,7 +348,6 @@ InstructionTLBMiss:
BRANCH_UNLESS_KERNEL(3f)
lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
3:
- mtcr r3
#endif
/* Insert level 1 index */
rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
@@ -351,14 +355,25 @@ InstructionTLBMiss:
/* Extract level 2 index */
rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
+#ifdef CONFIG_HUGETLB_PAGE
+ mtcr r11
+ bt- 28, 10f /* bit 28 = Large page (8M) */
+ bt- 29, 20f /* bit 29 = Large page (8M or 512k) */
+#endif
rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */
lwz r10, 0(r10) /* Get the pte */
-
+4:
+#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
+ mtcr r3
+#endif
/* Insert the APG into the TWC from the Linux PTE. */
rlwimi r11, r10, 0, 25, 26
/* Load the MI_TWC with the attributes for this "segment." */
MTSPR_CPU6(SPRN_MI_TWC, r11, r3) /* Set segment attributes */
+#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
+ rlwimi r10, r11, 1, MI_SPS16K
+#endif
#ifdef CONFIG_SWAP
rlwinm r11, r10, 32-5, _PAGE_PRESENT
and r11, r11, r10
@@ -371,16 +386,45 @@ InstructionTLBMiss:
* set. All other Linux PTE bits control the behavior
* of the MMU.
*/
+#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
+ rlwimi r10, r11, 0, 0x0ff0 /* Set 24-27, clear 20-23 */
+#else
rlwimi r10, r11, 0, 0x0ff8 /* Set 24-27, clear 20-23,28 */
+#endif
MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */
/* Restore registers */
-#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
+#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) || defined (CONFIG_HUGETLB_PAGE)
mfspr r3, SPRN_SPRG_SCRATCH2
#endif
EXCEPTION_EPILOG_0
rfi
+#ifdef CONFIG_HUGETLB_PAGE
+10: /* 8M pages */
+#ifdef CONFIG_PPC_16K_PAGES
+ /* Extract level 2 index */
+ rlwinm r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
+ /* Add level 2 base */
+ rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
+#else
+ /* Level 2 base */
+ rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK
+#endif
+ lwz r10, 0(r10) /* Get the pte */
+ rlwinm r11, r11, 0, 0xf
+ b 4b
+
+20: /* 512k pages */
+ /* Extract level 2 index */
+ rlwinm r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
+ /* Add level 2 base */
+ rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
+ lwz r10, 0(r10) /* Get the pte */
+ rlwinm r11, r11, 0, 0xf
+ b 4b
+#endif
+
. = 0x1200
DataStoreTLBMiss:
mtspr SPRN_SPRG_SCRATCH2, r3
@@ -407,7 +451,6 @@ _ENTRY(DTLBMiss_jmp)
#endif
blt cr7, DTLBMissLinear
3:
- mtcr r3
mfspr r10, SPRN_MD_EPN
/* Insert level 1 index */
@@ -418,8 +461,15 @@ _ENTRY(DTLBMiss_jmp)
*/
/* Extract level 2 index */
rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
+#ifdef CONFIG_HUGETLB_PAGE
+ mtcr r11
+ bt- 28, 10f /* bit 28 = Large page (8M) */
+ bt- 29, 20f /* bit 29 = Large page (8M or 512k) */
+#endif
rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */
lwz r10, 0(r10) /* Get the pte */
+4:
+ mtcr r3
/* Insert the Guarded flag and APG into the TWC from the Linux PTE.
* It is bit 26-27 of both the Linux PTE and the TWC (at least
@@ -434,6 +484,11 @@ _ENTRY(DTLBMiss_jmp)
rlwimi r11, r10, 32-5, 30, 30
MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
+ /* In 4k pages mode, SPS (bit 28) in RPN must match PS[1] (bit 29)
+ * In 16k pages mode, SPS is always 1 */
+#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
+ rlwimi r10, r11, 1, MD_SPS16K
+#endif
/* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
* We also need to know if the insn is a load/store, so:
* Clear _PAGE_PRESENT and load that which will
@@ -455,7 +510,11 @@ _ENTRY(DTLBMiss_jmp)
* of the MMU.
*/
li r11, RPN_PATTERN
+#if defined (CONFIG_HUGETLB_PAGE) && defined (CONFIG_PPC_4K_PAGES)
+ rlwimi r10, r11, 0, 24, 27 /* Set 24-27 */
+#else
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
+#endif
rlwimi r10, r11, 0, 20, 20 /* clear 20 */
MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
@@ -465,6 +524,30 @@ _ENTRY(DTLBMiss_jmp)
EXCEPTION_EPILOG_0
rfi
+#ifdef CONFIG_HUGETLB_PAGE
+10: /* 8M pages */
+ /* Extract level 2 index */
+#ifdef CONFIG_PPC_16K_PAGES
+ rlwinm r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
+ /* Add level 2 base */
+ rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
+#else
+ /* Level 2 base */
+ rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK
+#endif
+ lwz r10, 0(r10) /* Get the pte */
+ rlwinm r11, r11, 0, 0xf
+ b 4b
+
+20: /* 512k pages */
+ /* Extract level 2 index */
+ rlwinm r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
+ /* Add level 2 base */
+ rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
+ lwz r10, 0(r10) /* Get the pte */
+ rlwinm r11, r11, 0, 0xf
+ b 4b
+#endif
/* This is an instruction TLB error on the MPC8xx. This could be due
* to many reasons, such as executing guarded memory or illegal instruction
@@ -586,6 +669,9 @@ _ENTRY(FixupDAR_cmp)
/* Insert level 1 index */
3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
+ mtcr r11
+ bt 28,200f /* bit 28 = Large page (8M) */
+ bt 29,202f /* bit 29 = Large page (8M or 512K) */
rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */
/* Insert level 2 index */
rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
@@ -611,6 +697,27 @@ _ENTRY(FixupDAR_cmp)
141: mfspr r10,SPRN_SPRG_SCRATCH2
b DARFixed /* Nope, go back to normal TLB processing */
+ /* concat physical page address(r11) and page offset(r10) */
+200:
+#ifdef CONFIG_PPC_16K_PAGES
+ rlwinm r11, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
+ rlwimi r11, r10, 32 - (PAGE_SHIFT_8M - 2), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
+#else
+ rlwinm r11, r10, 0, ~HUGEPD_SHIFT_MASK
+#endif
+ lwz r11, 0(r11) /* Get the pte */
+ /* concat physical page address(r11) and page offset(r10) */
+ rlwimi r11, r10, 0, 32 - PAGE_SHIFT_8M, 31
+ b 201b
+
+202:
+ rlwinm r11, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
+ rlwimi r11, r10, 32 - (PAGE_SHIFT_512K - 2), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
+ lwz r11, 0(r11) /* Get the pte */
+ /* concat physical page address(r11) and page offset(r10) */
+ rlwimi r11, r10, 0, 32 - PAGE_SHIFT_512K, 31
+ b 201b
+
144: mfspr r10, SPRN_DSISR
rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */
mtspr SPRN_DSISR, r10
diff --git a/arch/powerpc/kernel/kexec_elf_64.c b/arch/powerpc/kernel/kexec_elf_64.c
new file mode 100644
index 000000000000..6acffd34a70f
--- /dev/null
+++ b/arch/powerpc/kernel/kexec_elf_64.c
@@ -0,0 +1,663 @@
+/*
+ * Load ELF vmlinux file for the kexec_file_load syscall.
+ *
+ * Copyright (C) 2004 Adam Litke (agl@us.ibm.com)
+ * Copyright (C) 2004 IBM Corp.
+ * Copyright (C) 2005 R Sharada (sharada@in.ibm.com)
+ * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com)
+ * Copyright (C) 2016 IBM Corporation
+ *
+ * Based on kexec-tools' kexec-elf-exec.c and kexec-elf-ppc64.c.
+ * Heavily modified for the kernel by
+ * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation (version 2 of the License).
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "kexec_elf: " fmt
+
+#include <linux/elf.h>
+#include <linux/kexec.h>
+#include <linux/libfdt.h>
+#include <linux/module.h>
+#include <linux/of_fdt.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define PURGATORY_STACK_SIZE (16 * 1024)
+
+#define elf_addr_to_cpu elf64_to_cpu
+
+#ifndef Elf_Rel
+#define Elf_Rel Elf64_Rel
+#endif /* Elf_Rel */
+
+struct elf_info {
+ /*
+ * Where the ELF binary contents are kept.
+ * Memory managed by the user of the struct.
+ */
+ const char *buffer;
+
+ const struct elfhdr *ehdr;
+ const struct elf_phdr *proghdrs;
+ struct elf_shdr *sechdrs;
+};
+
+static inline bool elf_is_elf_file(const struct elfhdr *ehdr)
+{
+ return memcmp(ehdr->e_ident, ELFMAG, SELFMAG) == 0;
+}
+
+static uint64_t elf64_to_cpu(const struct elfhdr *ehdr, uint64_t value)
+{
+ if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
+ value = le64_to_cpu(value);
+ else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
+ value = be64_to_cpu(value);
+
+ return value;
+}
+
+static uint16_t elf16_to_cpu(const struct elfhdr *ehdr, uint16_t value)
+{
+ if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
+ value = le16_to_cpu(value);
+ else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
+ value = be16_to_cpu(value);
+
+ return value;
+}
+
+static uint32_t elf32_to_cpu(const struct elfhdr *ehdr, uint32_t value)
+{
+ if (ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
+ value = le32_to_cpu(value);
+ else if (ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
+ value = be32_to_cpu(value);
+
+ return value;
+}
+
+/**
+ * elf_is_ehdr_sane - check that it is safe to use the ELF header
+ * @buf_len: size of the buffer in which the ELF file is loaded.
+ */
+static bool elf_is_ehdr_sane(const struct elfhdr *ehdr, size_t buf_len)
+{
+ if (ehdr->e_phnum > 0 && ehdr->e_phentsize != sizeof(struct elf_phdr)) {
+ pr_debug("Bad program header size.\n");
+ return false;
+ } else if (ehdr->e_shnum > 0 &&
+ ehdr->e_shentsize != sizeof(struct elf_shdr)) {
+ pr_debug("Bad section header size.\n");
+ return false;
+ } else if (ehdr->e_ident[EI_VERSION] != EV_CURRENT ||
+ ehdr->e_version != EV_CURRENT) {
+ pr_debug("Unknown ELF version.\n");
+ return false;
+ }
+
+ if (ehdr->e_phoff > 0 && ehdr->e_phnum > 0) {
+ size_t phdr_size;
+
+ /*
+ * e_phnum is at most 65535 so calculating the size of the
+ * program header cannot overflow.
+ */
+ phdr_size = sizeof(struct elf_phdr) * ehdr->e_phnum;
+
+ /* Sanity check the program header table location. */
+ if (ehdr->e_phoff + phdr_size < ehdr->e_phoff) {
+ pr_debug("Program headers at invalid location.\n");
+ return false;
+ } else if (ehdr->e_phoff + phdr_size > buf_len) {
+ pr_debug("Program headers truncated.\n");
+ return false;
+ }
+ }
+
+ if (ehdr->e_shoff > 0 && ehdr->e_shnum > 0) {
+ size_t shdr_size;
+
+ /*
+ * e_shnum is at most 65536 so calculating
+ * the size of the section header cannot overflow.
+ */
+ shdr_size = sizeof(struct elf_shdr) * ehdr->e_shnum;
+
+ /* Sanity check the section header table location. */
+ if (ehdr->e_shoff + shdr_size < ehdr->e_shoff) {
+ pr_debug("Section headers at invalid location.\n");
+ return false;
+ } else if (ehdr->e_shoff + shdr_size > buf_len) {
+ pr_debug("Section headers truncated.\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int elf_read_ehdr(const char *buf, size_t len, struct elfhdr *ehdr)
+{
+ struct elfhdr *buf_ehdr;
+
+ if (len < sizeof(*buf_ehdr)) {
+ pr_debug("Buffer is too small to hold ELF header.\n");
+ return -ENOEXEC;
+ }
+
+ memset(ehdr, 0, sizeof(*ehdr));
+ memcpy(ehdr->e_ident, buf, sizeof(ehdr->e_ident));
+ if (!elf_is_elf_file(ehdr)) {
+ pr_debug("No ELF header magic.\n");
+ return -ENOEXEC;
+ }
+
+ if (ehdr->e_ident[EI_CLASS] != ELF_CLASS) {
+ pr_debug("Not a supported ELF class.\n");
+ return -ENOEXEC;
+ } else if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB &&
+ ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
+ pr_debug("Not a supported ELF data format.\n");
+ return -ENOEXEC;
+ }
+
+ buf_ehdr = (struct elfhdr *) buf;
+ if (elf16_to_cpu(ehdr, buf_ehdr->e_ehsize) != sizeof(*buf_ehdr)) {
+ pr_debug("Bad ELF header size.\n");
+ return -ENOEXEC;
+ }
+
+ ehdr->e_type = elf16_to_cpu(ehdr, buf_ehdr->e_type);
+ ehdr->e_machine = elf16_to_cpu(ehdr, buf_ehdr->e_machine);
+ ehdr->e_version = elf32_to_cpu(ehdr, buf_ehdr->e_version);
+ ehdr->e_entry = elf_addr_to_cpu(ehdr, buf_ehdr->e_entry);
+ ehdr->e_phoff = elf_addr_to_cpu(ehdr, buf_ehdr->e_phoff);
+ ehdr->e_shoff = elf_addr_to_cpu(ehdr, buf_ehdr->e_shoff);
+ ehdr->e_flags = elf32_to_cpu(ehdr, buf_ehdr->e_flags);
+ ehdr->e_phentsize = elf16_to_cpu(ehdr, buf_ehdr->e_phentsize);
+ ehdr->e_phnum = elf16_to_cpu(ehdr, buf_ehdr->e_phnum);
+ ehdr->e_shentsize = elf16_to_cpu(ehdr, buf_ehdr->e_shentsize);
+ ehdr->e_shnum = elf16_to_cpu(ehdr, buf_ehdr->e_shnum);
+ ehdr->e_shstrndx = elf16_to_cpu(ehdr, buf_ehdr->e_shstrndx);
+
+ return elf_is_ehdr_sane(ehdr, len) ? 0 : -ENOEXEC;
+}
+
+/**
+ * elf_is_phdr_sane - check that it is safe to use the program header
+ * @buf_len: size of the buffer in which the ELF file is loaded.
+ */
+static bool elf_is_phdr_sane(const struct elf_phdr *phdr, size_t buf_len)
+{
+
+ if (phdr->p_offset + phdr->p_filesz < phdr->p_offset) {
+ pr_debug("ELF segment location wraps around.\n");
+ return false;
+ } else if (phdr->p_offset + phdr->p_filesz > buf_len) {
+ pr_debug("ELF segment not in file.\n");
+ return false;
+ } else if (phdr->p_paddr + phdr->p_memsz < phdr->p_paddr) {
+ pr_debug("ELF segment address wraps around.\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int elf_read_phdr(const char *buf, size_t len, struct elf_info *elf_info,
+ int idx)
+{
+ /* Override the const in proghdrs, we are the ones doing the loading. */
+ struct elf_phdr *phdr = (struct elf_phdr *) &elf_info->proghdrs[idx];
+ const char *pbuf;
+ struct elf_phdr *buf_phdr;
+
+ pbuf = buf + elf_info->ehdr->e_phoff + (idx * sizeof(*buf_phdr));
+ buf_phdr = (struct elf_phdr *) pbuf;
+
+ phdr->p_type = elf32_to_cpu(elf_info->ehdr, buf_phdr->p_type);
+ phdr->p_offset = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_offset);
+ phdr->p_paddr = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_paddr);
+ phdr->p_vaddr = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_vaddr);
+ phdr->p_flags = elf32_to_cpu(elf_info->ehdr, buf_phdr->p_flags);
+
+ /*
+ * The following fields have a type equivalent to Elf_Addr
+ * both in 32 bit and 64 bit ELF.
+ */
+ phdr->p_filesz = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_filesz);
+ phdr->p_memsz = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_memsz);
+ phdr->p_align = elf_addr_to_cpu(elf_info->ehdr, buf_phdr->p_align);
+
+ return elf_is_phdr_sane(phdr, len) ? 0 : -ENOEXEC;
+}
+
+/**
+ * elf_read_phdrs - read the program headers from the buffer
+ *
+ * This function assumes that the program header table was checked for sanity.
+ * Use elf_is_ehdr_sane() if it wasn't.
+ */
+static int elf_read_phdrs(const char *buf, size_t len,
+ struct elf_info *elf_info)
+{
+ size_t phdr_size, i;
+ const struct elfhdr *ehdr = elf_info->ehdr;
+
+ /*
+ * e_phnum is at most 65535 so calculating the size of the
+ * program header cannot overflow.
+ */
+ phdr_size = sizeof(struct elf_phdr) * ehdr->e_phnum;
+
+ elf_info->proghdrs = kzalloc(phdr_size, GFP_KERNEL);
+ if (!elf_info->proghdrs)
+ return -ENOMEM;
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ int ret;
+
+ ret = elf_read_phdr(buf, len, elf_info, i);
+ if (ret) {
+ kfree(elf_info->proghdrs);
+ elf_info->proghdrs = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * elf_is_shdr_sane - check that it is safe to use the section header
+ * @buf_len: size of the buffer in which the ELF file is loaded.
+ */
+static bool elf_is_shdr_sane(const struct elf_shdr *shdr, size_t buf_len)
+{
+ bool size_ok;
+
+ /* SHT_NULL headers have undefined values, so we can't check them. */
+ if (shdr->sh_type == SHT_NULL)
+ return true;
+
+ /* Now verify sh_entsize */
+ switch (shdr->sh_type) {
+ case SHT_SYMTAB:
+ size_ok = shdr->sh_entsize == sizeof(Elf_Sym);
+ break;
+ case SHT_RELA:
+ size_ok = shdr->sh_entsize == sizeof(Elf_Rela);
+ break;
+ case SHT_DYNAMIC:
+ size_ok = shdr->sh_entsize == sizeof(Elf_Dyn);
+ break;
+ case SHT_REL:
+ size_ok = shdr->sh_entsize == sizeof(Elf_Rel);
+ break;
+ case SHT_NOTE:
+ case SHT_PROGBITS:
+ case SHT_HASH:
+ case SHT_NOBITS:
+ default:
+ /*
+ * This is a section whose entsize requirements
+ * I don't care about. If I don't know about
+ * the section I can't care about it's entsize
+ * requirements.
+ */
+ size_ok = true;
+ break;
+ }
+
+ if (!size_ok) {
+ pr_debug("ELF section with wrong entry size.\n");
+ return false;
+ } else if (shdr->sh_addr + shdr->sh_size < shdr->sh_addr) {
+ pr_debug("ELF section address wraps around.\n");
+ return false;
+ }
+
+ if (shdr->sh_type != SHT_NOBITS) {
+ if (shdr->sh_offset + shdr->sh_size < shdr->sh_offset) {
+ pr_debug("ELF section location wraps around.\n");
+ return false;
+ } else if (shdr->sh_offset + shdr->sh_size > buf_len) {
+ pr_debug("ELF section not in file.\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int elf_read_shdr(const char *buf, size_t len, struct elf_info *elf_info,
+ int idx)
+{
+ struct elf_shdr *shdr = &elf_info->sechdrs[idx];
+ const struct elfhdr *ehdr = elf_info->ehdr;
+ const char *sbuf;
+ struct elf_shdr *buf_shdr;
+
+ sbuf = buf + ehdr->e_shoff + idx * sizeof(*buf_shdr);
+ buf_shdr = (struct elf_shdr *) sbuf;
+
+ shdr->sh_name = elf32_to_cpu(ehdr, buf_shdr->sh_name);
+ shdr->sh_type = elf32_to_cpu(ehdr, buf_shdr->sh_type);
+ shdr->sh_addr = elf_addr_to_cpu(ehdr, buf_shdr->sh_addr);
+ shdr->sh_offset = elf_addr_to_cpu(ehdr, buf_shdr->sh_offset);
+ shdr->sh_link = elf32_to_cpu(ehdr, buf_shdr->sh_link);
+ shdr->sh_info = elf32_to_cpu(ehdr, buf_shdr->sh_info);
+
+ /*
+ * The following fields have a type equivalent to Elf_Addr
+ * both in 32 bit and 64 bit ELF.
+ */
+ shdr->sh_flags = elf_addr_to_cpu(ehdr, buf_shdr->sh_flags);
+ shdr->sh_size = elf_addr_to_cpu(ehdr, buf_shdr->sh_size);
+ shdr->sh_addralign = elf_addr_to_cpu(ehdr, buf_shdr->sh_addralign);
+ shdr->sh_entsize = elf_addr_to_cpu(ehdr, buf_shdr->sh_entsize);
+
+ return elf_is_shdr_sane(shdr, len) ? 0 : -ENOEXEC;
+}
+
+/**
+ * elf_read_shdrs - read the section headers from the buffer
+ *
+ * This function assumes that the section header table was checked for sanity.
+ * Use elf_is_ehdr_sane() if it wasn't.
+ */
+static int elf_read_shdrs(const char *buf, size_t len,
+ struct elf_info *elf_info)
+{
+ size_t shdr_size, i;
+
+ /*
+ * e_shnum is at most 65536 so calculating
+ * the size of the section header cannot overflow.
+ */
+ shdr_size = sizeof(struct elf_shdr) * elf_info->ehdr->e_shnum;
+
+ elf_info->sechdrs = kzalloc(shdr_size, GFP_KERNEL);
+ if (!elf_info->sechdrs)
+ return -ENOMEM;
+
+ for (i = 0; i < elf_info->ehdr->e_shnum; i++) {
+ int ret;
+
+ ret = elf_read_shdr(buf, len, elf_info, i);
+ if (ret) {
+ kfree(elf_info->sechdrs);
+ elf_info->sechdrs = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * elf_read_from_buffer - read ELF file and sets up ELF header and ELF info
+ * @buf: Buffer to read ELF file from.
+ * @len: Size of @buf.
+ * @ehdr: Pointer to existing struct which will be populated.
+ * @elf_info: Pointer to existing struct which will be populated.
+ *
+ * This function allows reading ELF files with different byte order than
+ * the kernel, byte-swapping the fields as needed.
+ *
+ * Return:
+ * On success returns 0, and the caller should call elf_free_info(elf_info) to
+ * free the memory allocated for the section and program headers.
+ */
+int elf_read_from_buffer(const char *buf, size_t len, struct elfhdr *ehdr,
+ struct elf_info *elf_info)
+{
+ int ret;
+
+ ret = elf_read_ehdr(buf, len, ehdr);
+ if (ret)
+ return ret;
+
+ elf_info->buffer = buf;
+ elf_info->ehdr = ehdr;
+ if (ehdr->e_phoff > 0 && ehdr->e_phnum > 0) {
+ ret = elf_read_phdrs(buf, len, elf_info);
+ if (ret)
+ return ret;
+ }
+ if (ehdr->e_shoff > 0 && ehdr->e_shnum > 0) {
+ ret = elf_read_shdrs(buf, len, elf_info);
+ if (ret) {
+ kfree(elf_info->proghdrs);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * elf_free_info - free memory allocated by elf_read_from_buffer
+ */
+void elf_free_info(struct elf_info *elf_info)
+{
+ kfree(elf_info->proghdrs);
+ kfree(elf_info->sechdrs);
+ memset(elf_info, 0, sizeof(*elf_info));
+}
+/**
+ * build_elf_exec_info - read ELF executable and check that we can use it
+ */
+static int build_elf_exec_info(const char *buf, size_t len, struct elfhdr *ehdr,
+ struct elf_info *elf_info)
+{
+ int i;
+ int ret;
+
+ ret = elf_read_from_buffer(buf, len, ehdr, elf_info);
+ if (ret)
+ return ret;
+
+ /* Big endian vmlinux has type ET_DYN. */
+ if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) {
+ pr_err("Not an ELF executable.\n");
+ goto error;
+ } else if (!elf_info->proghdrs) {
+ pr_err("No ELF program header.\n");
+ goto error;
+ }
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ /*
+ * Kexec does not support loading interpreters.
+ * In addition this check keeps us from attempting
+ * to kexec ordinay executables.
+ */
+ if (elf_info->proghdrs[i].p_type == PT_INTERP) {
+ pr_err("Requires an ELF interpreter.\n");
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ elf_free_info(elf_info);
+ return -ENOEXEC;
+}
+
+static int elf64_probe(const char *buf, unsigned long len)
+{
+ struct elfhdr ehdr;
+ struct elf_info elf_info;
+ int ret;
+
+ ret = build_elf_exec_info(buf, len, &ehdr, &elf_info);
+ if (ret)
+ return ret;
+
+ elf_free_info(&elf_info);
+
+ return elf_check_arch(&ehdr) ? 0 : -ENOEXEC;
+}
+
+/**
+ * elf_exec_load - load ELF executable image
+ * @lowest_load_addr: On return, will be the address where the first PT_LOAD
+ * section will be loaded in memory.
+ *
+ * Return:
+ * 0 on success, negative value on failure.
+ */
+static int elf_exec_load(struct kimage *image, struct elfhdr *ehdr,
+ struct elf_info *elf_info,
+ unsigned long *lowest_load_addr)
+{
+ unsigned long base = 0, lowest_addr = UINT_MAX;
+ int ret;
+ size_t i;
+ struct kexec_buf kbuf = { .image = image, .buf_max = ppc64_rma_size,
+ .top_down = false };
+
+ /* Read in the PT_LOAD segments. */
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ unsigned long load_addr;
+ size_t size;
+ const struct elf_phdr *phdr;
+
+ phdr = &elf_info->proghdrs[i];
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ size = phdr->p_filesz;
+ if (size > phdr->p_memsz)
+ size = phdr->p_memsz;
+
+ kbuf.buffer = (void *) elf_info->buffer + phdr->p_offset;
+ kbuf.bufsz = size;
+ kbuf.memsz = phdr->p_memsz;
+ kbuf.buf_align = phdr->p_align;
+ kbuf.buf_min = phdr->p_paddr + base;
+ ret = kexec_add_buffer(&kbuf);
+ if (ret)
+ goto out;
+ load_addr = kbuf.mem;
+
+ if (load_addr < lowest_addr)
+ lowest_addr = load_addr;
+ }
+
+ /* Update entry point to reflect new load address. */
+ ehdr->e_entry += base;
+
+ *lowest_load_addr = lowest_addr;
+ ret = 0;
+ out:
+ return ret;
+}
+
+static void *elf64_load(struct kimage *image, char *kernel_buf,
+ unsigned long kernel_len, char *initrd,
+ unsigned long initrd_len, char *cmdline,
+ unsigned long cmdline_len)
+{
+ int ret;
+ unsigned int fdt_size;
+ unsigned long kernel_load_addr, purgatory_load_addr;
+ unsigned long initrd_load_addr = 0, fdt_load_addr;
+ void *fdt;
+ const void *slave_code;
+ struct elfhdr ehdr;
+ struct elf_info elf_info;
+ struct kexec_buf kbuf = { .image = image, .buf_min = 0,
+ .buf_max = ppc64_rma_size };
+
+ ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info);
+ if (ret)
+ goto out;
+
+ ret = elf_exec_load(image, &ehdr, &elf_info, &kernel_load_addr);
+ if (ret)
+ goto out;
+
+ pr_debug("Loaded the kernel at 0x%lx\n", kernel_load_addr);
+
+ ret = kexec_load_purgatory(image, 0, ppc64_rma_size, true,
+ &purgatory_load_addr);
+ if (ret) {
+ pr_err("Loading purgatory failed.\n");
+ goto out;
+ }
+
+ pr_debug("Loaded purgatory at 0x%lx\n", purgatory_load_addr);
+
+ if (initrd != NULL) {
+ kbuf.buffer = initrd;
+ kbuf.bufsz = kbuf.memsz = initrd_len;
+ kbuf.buf_align = PAGE_SIZE;
+ kbuf.top_down = false;
+ ret = kexec_add_buffer(&kbuf);
+ if (ret)
+ goto out;
+ initrd_load_addr = kbuf.mem;
+
+ pr_debug("Loaded initrd at 0x%lx\n", initrd_load_addr);
+ }
+
+ fdt_size = fdt_totalsize(initial_boot_params) * 2;
+ fdt = kmalloc(fdt_size, GFP_KERNEL);
+ if (!fdt) {
+ pr_err("Not enough memory for the device tree.\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = fdt_open_into(initial_boot_params, fdt, fdt_size);
+ if (ret < 0) {
+ pr_err("Error setting up the new device tree.\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = setup_new_fdt(fdt, initrd_load_addr, initrd_len, cmdline);
+ if (ret)
+ goto out;
+
+ fdt_pack(fdt);
+
+ kbuf.buffer = fdt;
+ kbuf.bufsz = kbuf.memsz = fdt_size;
+ kbuf.buf_align = PAGE_SIZE;
+ kbuf.top_down = true;
+ ret = kexec_add_buffer(&kbuf);
+ if (ret)
+ goto out;
+ fdt_load_addr = kbuf.mem;
+
+ pr_debug("Loaded device tree at 0x%lx\n", fdt_load_addr);
+
+ slave_code = elf_info.buffer + elf_info.proghdrs[0].p_offset;
+ ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr,
+ fdt_load_addr);
+ if (ret)
+ pr_err("Error setting up the purgatory.\n");
+
+out:
+ elf_free_info(&elf_info);
+
+ /* Make kimage_file_post_load_cleanup free the fdt buffer for us. */
+ return ret ? ERR_PTR(ret) : fdt;
+}
+
+struct kexec_file_ops kexec_elf64_ops = {
+ .probe = elf64_probe,
+ .load = elf64_load,
+};
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index e785cc9e1ecd..ad108b842669 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -140,13 +140,16 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
regs->link = (unsigned long)kretprobe_trampoline;
}
-static int __kprobes kprobe_handler(struct pt_regs *regs)
+int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
int ret = 0;
unsigned int *addr = (unsigned int *)regs->nip;
struct kprobe_ctlblk *kcb;
+ if (user_mode(regs))
+ return 0;
+
/*
* We don't want to be preempted for the entire
* duration of kprobe processing
@@ -359,12 +362,12 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
* single-stepped a copy of the instruction. The address of this
* copy is p->ainsn.insn.
*/
-static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+int __kprobes kprobe_post_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- if (!cur)
+ if (!cur || user_mode(regs))
return 0;
/* make sure we got here for instruction we have a kprobe on */
@@ -449,7 +452,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* zero, try to fix up.
*/
if ((entry = search_exception_tables(regs->nip)) != NULL) {
- regs->nip = entry->fixup;
+ regs->nip = extable_fixup(entry);
return 1;
}
@@ -470,25 +473,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
- struct die_args *args = (struct die_args *)data;
- int ret = NOTIFY_DONE;
-
- if (args->regs && user_mode(args->regs))
- return ret;
-
- switch (val) {
- case DIE_BPT:
- if (kprobe_handler(args->regs))
- ret = NOTIFY_STOP;
- break;
- case DIE_SSTEP:
- if (post_kprobe_handler(args->regs))
- ret = NOTIFY_STOP;
- break;
- default:
- break;
- }
- return ret;
+ return NOTIFY_DONE;
}
unsigned long arch_deref_entry_point(void *entry)
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index a205fa3d9bf3..5c12e21d0d1a 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -310,7 +310,7 @@ void default_machine_kexec(struct kimage *image)
if (!kdump_in_progress())
kexec_prepare_cpus();
- pr_debug("kexec: Starting switchover sequence.\n");
+ printk("kexec: Starting switchover sequence.\n");
/* switch to a staticly allocated stack. Based on irq stack code.
* We setup preempt_count to avoid using VMX in memcpy.
diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c
new file mode 100644
index 000000000000..7abc8a75ee48
--- /dev/null
+++ b/arch/powerpc/kernel/machine_kexec_file_64.c
@@ -0,0 +1,338 @@
+/*
+ * ppc64 code to implement the kexec_file_load syscall
+ *
+ * Copyright (C) 2004 Adam Litke (agl@us.ibm.com)
+ * Copyright (C) 2004 IBM Corp.
+ * Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation
+ * Copyright (C) 2005 R Sharada (sharada@in.ibm.com)
+ * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com)
+ * Copyright (C) 2016 IBM Corporation
+ *
+ * Based on kexec-tools' kexec-elf-ppc64.c, fs2dt.c.
+ * Heavily modified for the kernel by
+ * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation (version 2 of the License).
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/kexec.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
+
+#define SLAVE_CODE_SIZE 256
+
+static struct kexec_file_ops *kexec_file_loaders[] = {
+ &kexec_elf64_ops,
+};
+
+int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+ unsigned long buf_len)
+{
+ int i, ret = -ENOEXEC;
+ struct kexec_file_ops *fops;
+
+ /* We don't support crash kernels yet. */
+ if (image->type == KEXEC_TYPE_CRASH)
+ return -ENOTSUPP;
+
+ for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
+ fops = kexec_file_loaders[i];
+ if (!fops || !fops->probe)
+ continue;
+
+ ret = fops->probe(buf, buf_len);
+ if (!ret) {
+ image->fops = fops;
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+void *arch_kexec_kernel_image_load(struct kimage *image)
+{
+ if (!image->fops || !image->fops->load)
+ return ERR_PTR(-ENOEXEC);
+
+ return image->fops->load(image, image->kernel_buf,
+ image->kernel_buf_len, image->initrd_buf,
+ image->initrd_buf_len, image->cmdline_buf,
+ image->cmdline_buf_len);
+}
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ if (!image->fops || !image->fops->cleanup)
+ return 0;
+
+ return image->fops->cleanup(image->image_loader_data);
+}
+
+/**
+ * arch_kexec_walk_mem - call func(data) for each unreserved memory block
+ * @kbuf: Context info for the search. Also passed to @func.
+ * @func: Function to call for each memory block.
+ *
+ * This function is used by kexec_add_buffer and kexec_locate_mem_hole
+ * to find unreserved memory to load kexec segments into.
+ *
+ * Return: The memory walk will stop when func returns a non-zero value
+ * and that value will be returned. If all free regions are visited without
+ * func returning non-zero, then zero will be returned.
+ */
+int arch_kexec_walk_mem(struct kexec_buf *kbuf, int (*func)(u64, u64, void *))
+{
+ int ret = 0;
+ u64 i;
+ phys_addr_t mstart, mend;
+
+ if (kbuf->top_down) {
+ for_each_free_mem_range_reverse(i, NUMA_NO_NODE, 0,
+ &mstart, &mend, NULL) {
+ /*
+ * In memblock, end points to the first byte after the
+ * range while in kexec, end points to the last byte
+ * in the range.
+ */
+ ret = func(mstart, mend - 1, kbuf);
+ if (ret)
+ break;
+ }
+ } else {
+ for_each_free_mem_range(i, NUMA_NO_NODE, 0, &mstart, &mend,
+ NULL) {
+ /*
+ * In memblock, end points to the first byte after the
+ * range while in kexec, end points to the last byte
+ * in the range.
+ */
+ ret = func(mstart, mend - 1, kbuf);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * setup_purgatory - initialize the purgatory's global variables
+ * @image: kexec image.
+ * @slave_code: Slave code for the purgatory.
+ * @fdt: Flattened device tree for the next kernel.
+ * @kernel_load_addr: Address where the kernel is loaded.
+ * @fdt_load_addr: Address where the flattened device tree is loaded.
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+int setup_purgatory(struct kimage *image, const void *slave_code,
+ const void *fdt, unsigned long kernel_load_addr,
+ unsigned long fdt_load_addr)
+{
+ unsigned int *slave_code_buf, master_entry;
+ int ret;
+
+ slave_code_buf = kmalloc(SLAVE_CODE_SIZE, GFP_KERNEL);
+ if (!slave_code_buf)
+ return -ENOMEM;
+
+ /* Get the slave code from the new kernel and put it in purgatory. */
+ ret = kexec_purgatory_get_set_symbol(image, "purgatory_start",
+ slave_code_buf, SLAVE_CODE_SIZE,
+ true);
+ if (ret) {
+ kfree(slave_code_buf);
+ return ret;
+ }
+
+ master_entry = slave_code_buf[0];
+ memcpy(slave_code_buf, slave_code, SLAVE_CODE_SIZE);
+ slave_code_buf[0] = master_entry;
+ ret = kexec_purgatory_get_set_symbol(image, "purgatory_start",
+ slave_code_buf, SLAVE_CODE_SIZE,
+ false);
+ kfree(slave_code_buf);
+
+ ret = kexec_purgatory_get_set_symbol(image, "kernel", &kernel_load_addr,
+ sizeof(kernel_load_addr), false);
+ if (ret)
+ return ret;
+ ret = kexec_purgatory_get_set_symbol(image, "dt_offset", &fdt_load_addr,
+ sizeof(fdt_load_addr), false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * delete_fdt_mem_rsv - delete memory reservation with given address and size
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+static int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size)
+{
+ int i, ret, num_rsvs = fdt_num_mem_rsv(fdt);
+
+ for (i = 0; i < num_rsvs; i++) {
+ uint64_t rsv_start, rsv_size;
+
+ ret = fdt_get_mem_rsv(fdt, i, &rsv_start, &rsv_size);
+ if (ret) {
+ pr_err("Malformed device tree.\n");
+ return -EINVAL;
+ }
+
+ if (rsv_start == start && rsv_size == size) {
+ ret = fdt_del_mem_rsv(fdt, i);
+ if (ret) {
+ pr_err("Error deleting device tree reservation.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+/*
+ * setup_new_fdt - modify /chosen and memory reservation for the next kernel
+ * @fdt: Flattened device tree for the next kernel.
+ * @initrd_load_addr: Address where the next initrd will be loaded.
+ * @initrd_len: Size of the next initrd, or 0 if there will be none.
+ * @cmdline: Command line for the next kernel, or NULL if there will
+ * be none.
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+int setup_new_fdt(void *fdt, unsigned long initrd_load_addr,
+ unsigned long initrd_len, const char *cmdline)
+{
+ int ret, chosen_node;
+ const void *prop;
+
+ /* Remove memory reservation for the current device tree. */
+ ret = delete_fdt_mem_rsv(fdt, __pa(initial_boot_params),
+ fdt_totalsize(initial_boot_params));
+ if (ret == 0)
+ pr_debug("Removed old device tree reservation.\n");
+ else if (ret != -ENOENT)
+ return ret;
+
+ chosen_node = fdt_path_offset(fdt, "/chosen");
+ if (chosen_node == -FDT_ERR_NOTFOUND) {
+ chosen_node = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"),
+ "chosen");
+ if (chosen_node < 0) {
+ pr_err("Error creating /chosen.\n");
+ return -EINVAL;
+ }
+ } else if (chosen_node < 0) {
+ pr_err("Malformed device tree: error reading /chosen.\n");
+ return -EINVAL;
+ }
+
+ /* Did we boot using an initrd? */
+ prop = fdt_getprop(fdt, chosen_node, "linux,initrd-start", NULL);
+ if (prop) {
+ uint64_t tmp_start, tmp_end, tmp_size;
+
+ tmp_start = fdt64_to_cpu(*((const fdt64_t *) prop));
+
+ prop = fdt_getprop(fdt, chosen_node, "linux,initrd-end", NULL);
+ if (!prop) {
+ pr_err("Malformed device tree.\n");
+ return -EINVAL;
+ }
+ tmp_end = fdt64_to_cpu(*((const fdt64_t *) prop));
+
+ /*
+ * kexec reserves exact initrd size, while firmware may
+ * reserve a multiple of PAGE_SIZE, so check for both.
+ */
+ tmp_size = tmp_end - tmp_start;
+ ret = delete_fdt_mem_rsv(fdt, tmp_start, tmp_size);
+ if (ret == -ENOENT)
+ ret = delete_fdt_mem_rsv(fdt, tmp_start,
+ round_up(tmp_size, PAGE_SIZE));
+ if (ret == 0)
+ pr_debug("Removed old initrd reservation.\n");
+ else if (ret != -ENOENT)
+ return ret;
+
+ /* If there's no new initrd, delete the old initrd's info. */
+ if (initrd_len == 0) {
+ ret = fdt_delprop(fdt, chosen_node,
+ "linux,initrd-start");
+ if (ret) {
+ pr_err("Error deleting linux,initrd-start.\n");
+ return -EINVAL;
+ }
+
+ ret = fdt_delprop(fdt, chosen_node, "linux,initrd-end");
+ if (ret) {
+ pr_err("Error deleting linux,initrd-end.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (initrd_len) {
+ ret = fdt_setprop_u64(fdt, chosen_node,
+ "linux,initrd-start",
+ initrd_load_addr);
+ if (ret < 0) {
+ pr_err("Error setting up the new device tree.\n");
+ return -EINVAL;
+ }
+
+ /* initrd-end is the first address after the initrd image. */
+ ret = fdt_setprop_u64(fdt, chosen_node, "linux,initrd-end",
+ initrd_load_addr + initrd_len);
+ if (ret < 0) {
+ pr_err("Error setting up the new device tree.\n");
+ return -EINVAL;
+ }
+
+ ret = fdt_add_mem_rsv(fdt, initrd_load_addr, initrd_len);
+ if (ret) {
+ pr_err("Error reserving initrd memory: %s\n",
+ fdt_strerror(ret));
+ return -EINVAL;
+ }
+ }
+
+ if (cmdline != NULL) {
+ ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline);
+ if (ret < 0) {
+ pr_err("Error setting up the new device tree.\n");
+ return -EINVAL;
+ }
+ } else {
+ ret = fdt_delprop(fdt, chosen_node, "bootargs");
+ if (ret && ret != -FDT_ERR_NOTFOUND) {
+ pr_err("Error deleting bootargs.\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = fdt_setprop(fdt, chosen_node, "linux,booted-from-kexec", NULL, 0);
+ if (ret) {
+ pr_err("Error setting up the new device tree.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 5e7ece0fda9f..c6923ff45131 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -72,7 +72,6 @@ void save_mce_event(struct pt_regs *regs, long handled,
struct mce_error_info *mce_err,
uint64_t nip, uint64_t addr)
{
- uint64_t srr1;
int index = __this_cpu_inc_return(mce_nest_count) - 1;
struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
@@ -99,8 +98,6 @@ void save_mce_event(struct pt_regs *regs, long handled,
mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
mce->severity = MCE_SEV_ERROR_SYNC;
- srr1 = regs->msr;
-
/*
* Populate the mce error_type and type-specific error_type.
*/
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 93cf7a5846a6..1863324c6a3c 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -614,7 +614,7 @@ _GLOBAL(start_secondary_resume)
_GLOBAL(__main)
blr
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
/*
* Must be relocatable PIC code callable as a C function.
*/
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 4f178671f230..32be2a844947 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -478,7 +478,7 @@ _GLOBAL(kexec_wait)
addi r5,r5,kexec_flag-1b
99: HMT_LOW
-#ifdef CONFIG_KEXEC /* use no memory without kexec */
+#ifdef CONFIG_KEXEC_CORE /* use no memory without kexec */
lwz r4,0(r5)
cmpwi 0,r4,0
beq 99b
@@ -503,7 +503,7 @@ kexec_flag:
.long 0
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_PPC_BOOK3E
/*
* BOOK3E has no real MMU mode, so we have to setup the initial TLB
@@ -716,4 +716,4 @@ _GLOBAL(kexec_sequence)
mtlr 4
li r5,0
blr /* image->start(physid, image->start, 0); */
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 183368e008cf..bb1807184bad 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -652,6 +652,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
*location = value - (unsigned long)location;
break;
+ case R_PPC64_REL32:
+ /* 32 bits relative (used by relative exception tables) */
+ *(u32 *)location = value - (unsigned long)location;
+ break;
+
case R_PPC64_TOCSAVE:
/*
* Marker reloc indicates we don't have to save r2.
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index b60a67d92ebd..34aeac54f120 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -114,11 +114,6 @@ static struct platform_driver of_pci_phb_driver = {
},
};
-static __init int of_pci_phb_init(void)
-{
- return platform_driver_register(&of_pci_phb_driver);
-}
-
-device_initcall(of_pci_phb_init);
+builtin_platform_driver(of_pci_phb_driver);
#endif /* CONFIG_PPC_OF_PLATFORM_PCI */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 49a680d5ae37..04885cec24df 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -64,6 +64,12 @@
#include <linux/kprobes.h>
#include <linux/kdebug.h>
+#ifdef CONFIG_CC_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
/* Transactional Memory debug */
#ifdef TM_DEBUG_SW
#define TM_DEBUG(x...) printk(KERN_INFO x)
@@ -1051,14 +1057,6 @@ static inline void save_sprs(struct thread_struct *t)
*/
t->tar = mfspr(SPRN_TAR);
}
-
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- /* Conditionally save Load Monitor registers, if enabled */
- if (t->fscr & FSCR_LM) {
- t->lmrr = mfspr(SPRN_LMRR);
- t->lmser = mfspr(SPRN_LMSER);
- }
- }
#endif
}
@@ -1094,16 +1092,6 @@ static inline void restore_sprs(struct thread_struct *old_thread,
if (old_thread->tar != new_thread->tar)
mtspr(SPRN_TAR, new_thread->tar);
}
-
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- /* Conditionally restore Load Monitor registers, if enabled */
- if (new_thread->fscr & FSCR_LM) {
- if (old_thread->lmrr != new_thread->lmrr)
- mtspr(SPRN_LMRR, new_thread->lmrr);
- if (old_thread->lmser != new_thread->lmser)
- mtspr(SPRN_LMSER, new_thread->lmser);
- }
- }
#endif
}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index b0245bed6f54..f5d399e46193 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -156,21 +156,22 @@ static struct ibm_pa_feature {
unsigned char pabit; /* bit number (big-endian) */
unsigned char invert; /* if 1, pa bit set => clear feature */
} ibm_pa_features[] __initdata = {
- {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
- {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
- {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
- {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
- {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
- {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
- {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
+ { .pabyte = 0, .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU },
+ { .pabyte = 0, .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU },
+ { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL },
+ { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE },
+ { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE },
+ { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX },
+ { .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN },
+ { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE,
+ .cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
/*
* If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
* we don't want to turn on TM here, so we use the *_COMP versions
* which are 0 if the kernel doesn't support TM.
*/
- {CPU_FTR_TM_COMP, 0, 0,
- PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
- {0, MMU_FTR_TYPE_RADIX, 0, 0, 40, 0, 0},
+ { .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP,
+ .cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP },
};
static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -427,7 +428,7 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
tce_alloc_end = *lprop;
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
if (lprop)
crashk_res.start = *lprop;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 88ac964f4858..ec47a939cbdd 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -461,14 +461,14 @@ static int __init prom_next_node(phandle *nodep)
}
}
-static int inline prom_getprop(phandle node, const char *pname,
+static inline int prom_getprop(phandle node, const char *pname,
void *value, size_t valuelen)
{
return call_prom("getprop", 4, 1, node, ADDR(pname),
(u32)(unsigned long) value, (u32) valuelen);
}
-static int inline prom_getproplen(phandle node, const char *pname)
+static inline int prom_getproplen(phandle node, const char *pname)
{
return call_prom("getproplen", 2, 1, node, ADDR(pname));
}
@@ -635,13 +635,7 @@ static void __init early_cmdline_parse(void)
*
* See prom.h for the definition of the bits specified in the
* architecture vector.
- *
- * Because the description vector contains a mix of byte and word
- * values, we declare it as an unsigned char array, and use this
- * macro to put word values in.
*/
-#define W(x) ((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
- ((x) >> 8) & 0xff, (x) & 0xff
/* Firmware expects the value to be n - 1, where n is the # of vectors */
#define NUM_VECTORS(n) ((n) - 1)
@@ -652,92 +646,205 @@ static void __init early_cmdline_parse(void)
*/
#define VECTOR_LENGTH(n) (1 + (n) - 2)
-unsigned char ibm_architecture_vec[] = {
- W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
- W(0xffff0000), W(0x003e0000), /* POWER6 */
- W(0xffff0000), W(0x003f0000), /* POWER7 */
- W(0xffff0000), W(0x004b0000), /* POWER8E */
- W(0xffff0000), W(0x004c0000), /* POWER8NVL */
- W(0xffff0000), W(0x004d0000), /* POWER8 */
- W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
- W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
- W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
- W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */
- NUM_VECTORS(6), /* 6 option vectors */
-
- /* option vector 1: processor architectures supported */
- VECTOR_LENGTH(2), /* length */
- 0, /* don't ignore, don't halt */
- OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
- OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
+struct option_vector1 {
+ u8 byte1;
+ u8 arch_versions;
+} __packed;
+
+struct option_vector2 {
+ u8 byte1;
+ __be16 reserved;
+ __be32 real_base;
+ __be32 real_size;
+ __be32 virt_base;
+ __be32 virt_size;
+ __be32 load_base;
+ __be32 min_rma;
+ __be32 min_load;
+ u8 min_rma_percent;
+ u8 max_pft_size;
+} __packed;
+
+struct option_vector3 {
+ u8 byte1;
+ u8 byte2;
+} __packed;
+
+struct option_vector4 {
+ u8 byte1;
+ u8 min_vp_cap;
+} __packed;
+
+struct option_vector5 {
+ u8 byte1;
+ u8 byte2;
+ u8 byte3;
+ u8 cmo;
+ u8 associativity;
+ u8 bin_opts;
+ u8 micro_checkpoint;
+ u8 reserved0;
+ __be32 max_cpus;
+ __be16 papr_level;
+ __be16 reserved1;
+ u8 platform_facilities;
+ u8 reserved2;
+ __be16 reserved3;
+ u8 subprocessors;
+} __packed;
+
+struct option_vector6 {
+ u8 reserved;
+ u8 secondary_pteg;
+ u8 os_name;
+} __packed;
+
+struct ibm_arch_vec {
+ struct { u32 mask, val; } pvrs[10];
+
+ u8 num_vectors;
+
+ u8 vec1_len;
+ struct option_vector1 vec1;
+
+ u8 vec2_len;
+ struct option_vector2 vec2;
+
+ u8 vec3_len;
+ struct option_vector3 vec3;
+
+ u8 vec4_len;
+ struct option_vector4 vec4;
+
+ u8 vec5_len;
+ struct option_vector5 vec5;
+
+ u8 vec6_len;
+ struct option_vector6 vec6;
+} __packed;
+
+struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
+ .pvrs = {
+ {
+ .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
+ .val = cpu_to_be32(0x003a0000),
+ },
+ {
+ .mask = cpu_to_be32(0xffff0000), /* POWER6 */
+ .val = cpu_to_be32(0x003e0000),
+ },
+ {
+ .mask = cpu_to_be32(0xffff0000), /* POWER7 */
+ .val = cpu_to_be32(0x003f0000),
+ },
+ {
+ .mask = cpu_to_be32(0xffff0000), /* POWER8E */
+ .val = cpu_to_be32(0x004b0000),
+ },
+ {
+ .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
+ .val = cpu_to_be32(0x004c0000),
+ },
+ {
+ .mask = cpu_to_be32(0xffff0000), /* POWER8 */
+ .val = cpu_to_be32(0x004d0000),
+ },
+ {
+ .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
+ .val = cpu_to_be32(0x0f000004),
+ },
+ {
+ .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
+ .val = cpu_to_be32(0x0f000003),
+ },
+ {
+ .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
+ .val = cpu_to_be32(0x0f000002),
+ },
+ {
+ .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
+ .val = cpu_to_be32(0x0f000001),
+ },
+ },
+
+ .num_vectors = NUM_VECTORS(6),
+ .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
+ .vec1 = {
+ .byte1 = 0,
+ .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
+ OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
+ },
+
+ .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
/* option vector 2: Open Firmware options supported */
- VECTOR_LENGTH(33), /* length */
- OV2_REAL_MODE,
- 0, 0,
- W(0xffffffff), /* real_base */
- W(0xffffffff), /* real_size */
- W(0xffffffff), /* virt_base */
- W(0xffffffff), /* virt_size */
- W(0xffffffff), /* load_base */
- W(256), /* 256MB min RMA */
- W(0xffffffff), /* full client load */
- 0, /* min RMA percentage of total RAM */
- 48, /* max log_2(hash table size) */
+ .vec2 = {
+ .byte1 = OV2_REAL_MODE,
+ .reserved = 0,
+ .real_base = cpu_to_be32(0xffffffff),
+ .real_size = cpu_to_be32(0xffffffff),
+ .virt_base = cpu_to_be32(0xffffffff),
+ .virt_size = cpu_to_be32(0xffffffff),
+ .load_base = cpu_to_be32(0xffffffff),
+ .min_rma = cpu_to_be32(256), /* 256MB min RMA */
+ .min_load = cpu_to_be32(0xffffffff), /* full client load */
+ .min_rma_percent = 0, /* min RMA percentage of total RAM */
+ .max_pft_size = 48, /* max log_2(hash table size) */
+ },
+ .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
/* option vector 3: processor options supported */
- VECTOR_LENGTH(2), /* length */
- 0, /* don't ignore, don't halt */
- OV3_FP | OV3_VMX | OV3_DFP,
+ .vec3 = {
+ .byte1 = 0, /* don't ignore, don't halt */
+ .byte2 = OV3_FP | OV3_VMX | OV3_DFP,
+ },
+ .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
/* option vector 4: IBM PAPR implementation */
- VECTOR_LENGTH(2), /* length */
- 0, /* don't halt */
- OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
+ .vec4 = {
+ .byte1 = 0, /* don't halt */
+ .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
+ },
+ .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
/* option vector 5: PAPR/OF options */
- VECTOR_LENGTH(21), /* length */
- 0, /* don't ignore, don't halt */
- OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
- OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
+ .vec5 = {
+ .byte1 = 0, /* don't ignore, don't halt */
+ .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
+ OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
#ifdef CONFIG_PCI_MSI
- /* PCIe/MSI support. Without MSI full PCIe is not supported */
- OV5_FEAT(OV5_MSI),
+ /* PCIe/MSI support. Without MSI full PCIe is not supported */
+ OV5_FEAT(OV5_MSI),
#else
- 0,
+ 0,
#endif
- 0,
+ .byte3 = 0,
+ .cmo =
#ifdef CONFIG_PPC_SMLPAR
- OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
+ OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
#else
- 0,
+ 0,
#endif
- OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
- 0,
- 0,
- 0,
- /* WARNING: The offset of the "number of cores" field below
- * must match by the macro below. Update the definition if
- * the structure layout changes.
- */
-#define IBM_ARCH_VEC_NRCORES_OFFSET 133
- W(NR_CPUS), /* number of cores supported */
- 0,
- 0,
- 0,
- 0,
- OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
- OV5_FEAT(OV5_PFO_HW_842), /* Byte 17 */
- 0, /* Byte 18 */
- 0, /* Byte 19 */
- 0, /* Byte 20 */
- OV5_FEAT(OV5_SUB_PROCESSORS), /* Byte 21 */
+ .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
+ .bin_opts = 0,
+ .micro_checkpoint = 0,
+ .reserved0 = 0,
+ .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
+ .papr_level = 0,
+ .reserved1 = 0,
+ .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
+ .reserved2 = 0,
+ .reserved3 = 0,
+ .subprocessors = 1,
+ },
/* option vector 6: IBM PAPR hints */
- VECTOR_LENGTH(3), /* length */
- 0,
- 0,
- OV6_LINUX,
+ .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
+ .vec6 = {
+ .reserved = 0,
+ .secondary_pteg = 0,
+ .os_name = OV6_LINUX,
+ },
};
/* Old method - ELF header with PT_NOTE sections only works on BE */
@@ -873,7 +980,6 @@ static void __init prom_send_capabilities(void)
ihandle root;
prom_arg_t ret;
u32 cores;
- unsigned char *ptcores;
root = call_prom("open", 1, 1, ADDR("/"));
if (root != 0) {
@@ -884,37 +990,18 @@ static void __init prom_send_capabilities(void)
* divide NR_CPUS.
*/
- /* The core value may start at an odd address. If such a word
- * access is made at a cache line boundary, this leads to an
- * exception which may not be handled at this time.
- * Forcing a per byte access to avoid exception.
- */
- ptcores = &ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
- cores = 0;
- cores |= ptcores[0] << 24;
- cores |= ptcores[1] << 16;
- cores |= ptcores[2] << 8;
- cores |= ptcores[3];
- if (cores != NR_CPUS) {
- prom_printf("WARNING ! "
- "ibm_architecture_vec structure inconsistent: %lu!\n",
- cores);
- } else {
- cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
- prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
- cores, NR_CPUS);
- ptcores[0] = (cores >> 24) & 0xff;
- ptcores[1] = (cores >> 16) & 0xff;
- ptcores[2] = (cores >> 8) & 0xff;
- ptcores[3] = cores & 0xff;
- }
+ cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
+ prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
+ cores, NR_CPUS);
+
+ ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
/* try calling the ibm,client-architecture-support method */
prom_printf("Calling ibm,client-architecture-support...");
if (call_prom_ret("call-method", 3, 2, &ret,
ADDR("ibm,client-architecture-support"),
root,
- ADDR(ibm_architecture_vec)) == 0) {
+ ADDR(&ibm_architecture_vec)) == 0) {
/* the call exists... */
if (ret)
prom_printf("\nWARNING: ibm,client-architecture"
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 270ee30abdcf..f516ac508ae3 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -915,7 +915,7 @@ void __init setup_arch(char **cmdline_p)
init_mm.context.pte_frag = NULL;
#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
- mm_iommu_init(&init_mm.context);
+ mm_iommu_init(&init_mm);
#endif
irqstack_early_init();
exc_lvl_early_init();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 8d586cff8a41..6824157e4d2e 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -354,7 +354,7 @@ void early_setup_secondary(void)
#endif /* CONFIG_SMP */
-#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
static bool use_spinloop(void)
{
if (!IS_ENABLED(CONFIG_PPC_BOOK3E))
@@ -399,7 +399,7 @@ void smp_release_cpus(void)
DBG(" <- smp_release_cpus()\n");
}
-#endif /* CONFIG_SMP || CONFIG_KEXEC */
+#endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
/*
* Initialize some remaining members of the ppc64_caches and systemcfg
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 9c6f3fd58059..893bd7f79be6 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -193,7 +193,7 @@ int smp_request_message_ipi(int virq, int msg)
if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
return -EINVAL;
}
-#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
+#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC_CORE)
if (msg == PPC_MSG_DEBUGGER_BREAK) {
return 1;
}
@@ -325,7 +325,7 @@ void tick_broadcast(const struct cpumask *mask)
}
#endif
-#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
void smp_send_debugger_break(void)
{
int cpu;
@@ -340,7 +340,7 @@ void smp_send_debugger_break(void)
}
#endif
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
{
crash_ipi_function_ptr = crash_ipi_callback;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 023a462725b5..4239aaf74886 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -64,8 +64,9 @@
#include <asm/asm-prototypes.h>
#include <asm/hmi.h>
#include <sysdev/fsl_pci.h>
+#include <asm/kprobes.h>
-#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
int (*__debugger)(struct pt_regs *regs) __read_mostly;
int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
@@ -122,9 +123,6 @@ static unsigned long oops_begin(struct pt_regs *regs)
int cpu;
unsigned long flags;
- if (debugger(regs))
- return 1;
-
oops_enter();
/* racy, but better than risking deadlock. */
@@ -150,14 +148,15 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
int signr)
{
bust_spinlocks(0);
- die_owner = -1;
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
die_nest_count--;
oops_exit();
printk("\n");
- if (!die_nest_count)
+ if (!die_nest_count) {
/* Nest count reaches zero, release the lock. */
+ die_owner = -1;
arch_spin_unlock(&die_lock);
+ }
raw_local_irq_restore(flags);
crash_fadump(regs, "die oops");
@@ -227,8 +226,12 @@ NOKPROBE_SYMBOL(__die);
void die(const char *str, struct pt_regs *regs, long err)
{
- unsigned long flags = oops_begin(regs);
+ unsigned long flags;
+ if (debugger(regs))
+ return;
+
+ flags = oops_begin(regs);
if (__die(str, regs, err))
err = 0;
oops_end(flags, regs, err);
@@ -365,7 +368,7 @@ static inline int check_io_access(struct pt_regs *regs)
(*nip & 0x100)? "OUT to": "IN from",
regs->gpr[rb] - _IO_BASE, nip);
regs->msr |= MSR_RI;
- regs->nip = entry->fixup;
+ regs->nip = extable_fixup(entry);
return 1;
}
}
@@ -824,6 +827,9 @@ void single_step_exception(struct pt_regs *regs)
clear_single_step(regs);
+ if (kprobe_post_handler(regs))
+ return;
+
if (notify_die(DIE_SSTEP, "single_step", regs, 5,
5, SIGTRAP) == NOTIFY_STOP)
goto bail;
@@ -1177,6 +1183,9 @@ void program_check_exception(struct pt_regs *regs)
if (debugger_bpt(regs))
goto bail;
+ if (kprobe_handler(regs))
+ goto bail;
+
/* trap exception */
if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
== NOTIFY_STOP)
@@ -1430,7 +1439,6 @@ void facility_unavailable_exception(struct pt_regs *regs)
[FSCR_TM_LG] = "TM",
[FSCR_EBB_LG] = "EBB",
[FSCR_TAR_LG] = "TAR",
- [FSCR_LM_LG] = "LM",
};
char *facility = "unknown";
u64 value;
@@ -1488,14 +1496,6 @@ void facility_unavailable_exception(struct pt_regs *regs)
emulate_single_step(regs);
}
return;
- } else if ((status == FSCR_LM_LG) && cpu_has_feature(CPU_FTR_ARCH_300)) {
- /*
- * This process has touched LM, so turn it on forever
- * for this process
- */
- current->thread.fscr |= FSCR_LM;
- mtspr(SPRN_FSCR, current->thread.fscr);
- return;
}
if (status == FSCR_TM_LG) {
@@ -1519,7 +1519,8 @@ void facility_unavailable_exception(struct pt_regs *regs)
return;
}
- if ((status < ARRAY_SIZE(facility_strings)) &&
+ if ((hv || status >= 2) &&
+ (status < ARRAY_SIZE(facility_strings)) &&
facility_strings[status])
facility = facility_strings[status];
@@ -1527,9 +1528,8 @@ void facility_unavailable_exception(struct pt_regs *regs)
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
- pr_err_ratelimited(
- "%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
- hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
+ pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
+ hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
out:
if (user_mode(regs)) {
@@ -1754,6 +1754,9 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status)
return;
}
+ if (kprobe_post_handler(regs))
+ return;
+
if (notify_die(DIE_SSTEP, "block_step", regs, 5,
5, SIGTRAP) == NOTIFY_STOP) {
return;
@@ -1768,6 +1771,9 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status)
/* Clear the instruction completion event */
mtspr(SPRN_DBSR, DBSR_IC);
+ if (kprobe_post_handler(regs))
+ return;
+
if (notify_die(DIE_SSTEP, "single_step", regs, 5,
5, SIGTRAP) == NOTIFY_STOP) {
return;
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
index ea29a5d67743..9a671c774b22 100644
--- a/arch/powerpc/lib/checksum_32.S
+++ b/arch/powerpc/lib/checksum_32.S
@@ -103,17 +103,14 @@ EXPORT_SYMBOL(__csum_partial)
adde r12,r12,r10
#define CSUM_COPY_16_BYTES_EXCODE(n) \
-.section __ex_table,"a"; \
- .align 2; \
- .long 8 ## n ## 0b,src_error; \
- .long 8 ## n ## 1b,src_error; \
- .long 8 ## n ## 2b,src_error; \
- .long 8 ## n ## 3b,src_error; \
- .long 8 ## n ## 4b,dst_error; \
- .long 8 ## n ## 5b,dst_error; \
- .long 8 ## n ## 6b,dst_error; \
- .long 8 ## n ## 7b,dst_error; \
- .text
+ EX_TABLE(8 ## n ## 0b, src_error); \
+ EX_TABLE(8 ## n ## 1b, src_error); \
+ EX_TABLE(8 ## n ## 2b, src_error); \
+ EX_TABLE(8 ## n ## 3b, src_error); \
+ EX_TABLE(8 ## n ## 4b, dst_error); \
+ EX_TABLE(8 ## n ## 5b, dst_error); \
+ EX_TABLE(8 ## n ## 6b, dst_error); \
+ EX_TABLE(8 ## n ## 7b, dst_error);
.text
.stabs "arch/powerpc/lib/",N_SO,0,0,0f
@@ -263,14 +260,11 @@ dst_error:
stw r0,0(r8)
blr
- .section __ex_table,"a"
- .align 2
- .long 70b,src_error
- .long 71b,dst_error
- .long 72b,src_error
- .long 73b,dst_error
- .long 54b,dst_error
- .text
+ EX_TABLE(70b, src_error);
+ EX_TABLE(71b, dst_error);
+ EX_TABLE(72b, src_error);
+ EX_TABLE(73b, dst_error);
+ EX_TABLE(54b, dst_error);
/*
* this stuff handles faults in the cacheline loop and branches to either
@@ -291,12 +285,11 @@ dst_error:
#endif
#endif
- .section __ex_table,"a"
- .align 2
- .long 30b,src_error
- .long 31b,dst_error
- .long 40b,src_error
- .long 41b,dst_error
- .long 50b,src_error
- .long 51b,dst_error
+ EX_TABLE(30b, src_error);
+ EX_TABLE(31b, dst_error);
+ EX_TABLE(40b, src_error);
+ EX_TABLE(41b, dst_error);
+ EX_TABLE(50b, src_error);
+ EX_TABLE(51b, dst_error);
+
EXPORT_SYMBOL(csum_partial_copy_generic)
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index fd9176671f9f..d0d311e108ff 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -182,34 +182,22 @@ EXPORT_SYMBOL(__csum_partial)
.macro srcnr
100:
- .section __ex_table,"a"
- .align 3
- .llong 100b,.Lsrc_error_nr
- .previous
+ EX_TABLE(100b,.Lsrc_error_nr)
.endm
.macro source
150:
- .section __ex_table,"a"
- .align 3
- .llong 150b,.Lsrc_error
- .previous
+ EX_TABLE(150b,.Lsrc_error)
.endm
.macro dstnr
200:
- .section __ex_table,"a"
- .align 3
- .llong 200b,.Ldest_error_nr
- .previous
+ EX_TABLE(200b,.Ldest_error_nr)
.endm
.macro dest
250:
- .section __ex_table,"a"
- .align 3
- .llong 250b,.Ldest_error
- .previous
+ EX_TABLE(250b,.Ldest_error)
.endm
/*
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 40cce33b08d6..ff0d894d7ff9 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -49,17 +49,14 @@
9 ## n ## 1: \
addi r5,r5,-(16 * n); \
b 105f; \
-.section __ex_table,"a"; \
- .align 2; \
- .long 8 ## n ## 0b,9 ## n ## 0b; \
- .long 8 ## n ## 1b,9 ## n ## 0b; \
- .long 8 ## n ## 2b,9 ## n ## 0b; \
- .long 8 ## n ## 3b,9 ## n ## 0b; \
- .long 8 ## n ## 4b,9 ## n ## 1b; \
- .long 8 ## n ## 5b,9 ## n ## 1b; \
- .long 8 ## n ## 6b,9 ## n ## 1b; \
- .long 8 ## n ## 7b,9 ## n ## 1b; \
- .text
+ EX_TABLE(8 ## n ## 0b,9 ## n ## 0b); \
+ EX_TABLE(8 ## n ## 1b,9 ## n ## 0b); \
+ EX_TABLE(8 ## n ## 2b,9 ## n ## 0b); \
+ EX_TABLE(8 ## n ## 3b,9 ## n ## 0b); \
+ EX_TABLE(8 ## n ## 4b,9 ## n ## 1b); \
+ EX_TABLE(8 ## n ## 5b,9 ## n ## 1b); \
+ EX_TABLE(8 ## n ## 6b,9 ## n ## 1b); \
+ EX_TABLE(8 ## n ## 7b,9 ## n ## 1b)
.text
.stabs "arch/powerpc/lib/",N_SO,0,0,0f
@@ -323,13 +320,10 @@ _GLOBAL(__copy_tofrom_user)
73: stwu r9,4(r6)
bdnz 72b
- .section __ex_table,"a"
- .align 2
- .long 70b,100f
- .long 71b,101f
- .long 72b,102f
- .long 73b,103f
- .text
+ EX_TABLE(70b,100f)
+ EX_TABLE(71b,101f)
+ EX_TABLE(72b,102f)
+ EX_TABLE(73b,103f)
58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
clrlwi r5,r5,32-LG_CACHELINE_BYTES
@@ -364,10 +358,7 @@ _GLOBAL(__copy_tofrom_user)
53: dcbt r3,r4
54: dcbz r11,r6
- .section __ex_table,"a"
- .align 2
- .long 54b,105f
- .text
+ EX_TABLE(54b,105f)
/* the main body of the cacheline loop */
COPY_16_BYTES_WITHEX(0)
#if L1_CACHE_BYTES >= 32
@@ -500,15 +491,13 @@ _GLOBAL(__copy_tofrom_user)
bdnz 114b
120: blr
- .section __ex_table,"a"
- .align 2
- .long 30b,108b
- .long 31b,109b
- .long 40b,110b
- .long 41b,111b
- .long 130b,132b
- .long 131b,120b
- .long 112b,120b
- .long 114b,120b
- .text
+ EX_TABLE(30b,108b)
+ EX_TABLE(31b,109b)
+ EX_TABLE(40b,110b)
+ EX_TABLE(41b,111b)
+ EX_TABLE(130b,132b)
+ EX_TABLE(131b,120b)
+ EX_TABLE(112b,120b)
+ EX_TABLE(114b,120b)
+
EXPORT_SYMBOL(__copy_tofrom_user)
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 60386b2c99bb..aee6e24e81ab 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -394,70 +394,66 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
192:
blr /* #bytes not copied in r3 */
- .section __ex_table,"a"
- .align 3
- .llong 20b,120b
- .llong 220b,320b
- .llong 21b,121b
- .llong 221b,321b
- .llong 70b,170b
- .llong 270b,370b
- .llong 22b,122b
- .llong 222b,322b
- .llong 71b,171b
- .llong 271b,371b
- .llong 72b,172b
- .llong 272b,372b
- .llong 244b,344b
- .llong 245b,345b
- .llong 23b,123b
- .llong 73b,173b
- .llong 44b,144b
- .llong 74b,174b
- .llong 45b,145b
- .llong 75b,175b
- .llong 24b,124b
- .llong 25b,125b
- .llong 26b,126b
- .llong 27b,127b
- .llong 28b,128b
- .llong 29b,129b
- .llong 30b,130b
- .llong 31b,131b
- .llong 32b,132b
- .llong 76b,176b
- .llong 33b,133b
- .llong 77b,177b
- .llong 78b,178b
- .llong 79b,179b
- .llong 80b,180b
- .llong 34b,134b
- .llong 94b,194b
- .llong 95b,195b
- .llong 96b,196b
- .llong 35b,135b
- .llong 81b,181b
- .llong 36b,136b
- .llong 82b,182b
- .llong 37b,137b
- .llong 83b,183b
- .llong 38b,138b
- .llong 39b,139b
- .llong 84b,184b
- .llong 85b,185b
- .llong 40b,140b
- .llong 86b,186b
- .llong 41b,141b
- .llong 87b,187b
- .llong 42b,142b
- .llong 88b,188b
- .llong 43b,143b
- .llong 89b,189b
- .llong 90b,190b
- .llong 91b,191b
- .llong 92b,192b
-
- .text
+ EX_TABLE(20b,120b)
+ EX_TABLE(220b,320b)
+ EX_TABLE(21b,121b)
+ EX_TABLE(221b,321b)
+ EX_TABLE(70b,170b)
+ EX_TABLE(270b,370b)
+ EX_TABLE(22b,122b)
+ EX_TABLE(222b,322b)
+ EX_TABLE(71b,171b)
+ EX_TABLE(271b,371b)
+ EX_TABLE(72b,172b)
+ EX_TABLE(272b,372b)
+ EX_TABLE(244b,344b)
+ EX_TABLE(245b,345b)
+ EX_TABLE(23b,123b)
+ EX_TABLE(73b,173b)
+ EX_TABLE(44b,144b)
+ EX_TABLE(74b,174b)
+ EX_TABLE(45b,145b)
+ EX_TABLE(75b,175b)
+ EX_TABLE(24b,124b)
+ EX_TABLE(25b,125b)
+ EX_TABLE(26b,126b)
+ EX_TABLE(27b,127b)
+ EX_TABLE(28b,128b)
+ EX_TABLE(29b,129b)
+ EX_TABLE(30b,130b)
+ EX_TABLE(31b,131b)
+ EX_TABLE(32b,132b)
+ EX_TABLE(76b,176b)
+ EX_TABLE(33b,133b)
+ EX_TABLE(77b,177b)
+ EX_TABLE(78b,178b)
+ EX_TABLE(79b,179b)
+ EX_TABLE(80b,180b)
+ EX_TABLE(34b,134b)
+ EX_TABLE(94b,194b)
+ EX_TABLE(95b,195b)
+ EX_TABLE(96b,196b)
+ EX_TABLE(35b,135b)
+ EX_TABLE(81b,181b)
+ EX_TABLE(36b,136b)
+ EX_TABLE(82b,182b)
+ EX_TABLE(37b,137b)
+ EX_TABLE(83b,183b)
+ EX_TABLE(38b,138b)
+ EX_TABLE(39b,139b)
+ EX_TABLE(84b,184b)
+ EX_TABLE(85b,185b)
+ EX_TABLE(40b,140b)
+ EX_TABLE(86b,186b)
+ EX_TABLE(41b,141b)
+ EX_TABLE(87b,187b)
+ EX_TABLE(42b,142b)
+ EX_TABLE(88b,188b)
+ EX_TABLE(43b,143b)
+ EX_TABLE(89b,189b)
+ EX_TABLE(90b,190b)
+ EX_TABLE(91b,191b)
+ EX_TABLE(92b,192b)
/*
* Routine to copy a whole page of data, optimized for POWER4.
@@ -598,78 +594,77 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
li r5,4096
b .Ldst_aligned
- .section __ex_table,"a"
- .align 3
- .llong 20b,100b
- .llong 21b,100b
- .llong 22b,100b
- .llong 23b,100b
- .llong 24b,100b
- .llong 25b,100b
- .llong 26b,100b
- .llong 27b,100b
- .llong 28b,100b
- .llong 29b,100b
- .llong 30b,100b
- .llong 31b,100b
- .llong 32b,100b
- .llong 33b,100b
- .llong 34b,100b
- .llong 35b,100b
- .llong 36b,100b
- .llong 37b,100b
- .llong 38b,100b
- .llong 39b,100b
- .llong 40b,100b
- .llong 41b,100b
- .llong 42b,100b
- .llong 43b,100b
- .llong 44b,100b
- .llong 45b,100b
- .llong 46b,100b
- .llong 47b,100b
- .llong 48b,100b
- .llong 49b,100b
- .llong 50b,100b
- .llong 51b,100b
- .llong 52b,100b
- .llong 53b,100b
- .llong 54b,100b
- .llong 55b,100b
- .llong 56b,100b
- .llong 57b,100b
- .llong 58b,100b
- .llong 59b,100b
- .llong 60b,100b
- .llong 61b,100b
- .llong 62b,100b
- .llong 63b,100b
- .llong 64b,100b
- .llong 65b,100b
- .llong 66b,100b
- .llong 67b,100b
- .llong 68b,100b
- .llong 69b,100b
- .llong 70b,100b
- .llong 71b,100b
- .llong 72b,100b
- .llong 73b,100b
- .llong 74b,100b
- .llong 75b,100b
- .llong 76b,100b
- .llong 77b,100b
- .llong 78b,100b
- .llong 79b,100b
- .llong 80b,100b
- .llong 81b,100b
- .llong 82b,100b
- .llong 83b,100b
- .llong 84b,100b
- .llong 85b,100b
- .llong 86b,100b
- .llong 87b,100b
- .llong 88b,100b
- .llong 89b,100b
- .llong 90b,100b
- .llong 91b,100b
+ EX_TABLE(20b,100b)
+ EX_TABLE(21b,100b)
+ EX_TABLE(22b,100b)
+ EX_TABLE(23b,100b)
+ EX_TABLE(24b,100b)
+ EX_TABLE(25b,100b)
+ EX_TABLE(26b,100b)
+ EX_TABLE(27b,100b)
+ EX_TABLE(28b,100b)
+ EX_TABLE(29b,100b)
+ EX_TABLE(30b,100b)
+ EX_TABLE(31b,100b)
+ EX_TABLE(32b,100b)
+ EX_TABLE(33b,100b)
+ EX_TABLE(34b,100b)
+ EX_TABLE(35b,100b)
+ EX_TABLE(36b,100b)
+ EX_TABLE(37b,100b)
+ EX_TABLE(38b,100b)
+ EX_TABLE(39b,100b)
+ EX_TABLE(40b,100b)
+ EX_TABLE(41b,100b)
+ EX_TABLE(42b,100b)
+ EX_TABLE(43b,100b)
+ EX_TABLE(44b,100b)
+ EX_TABLE(45b,100b)
+ EX_TABLE(46b,100b)
+ EX_TABLE(47b,100b)
+ EX_TABLE(48b,100b)
+ EX_TABLE(49b,100b)
+ EX_TABLE(50b,100b)
+ EX_TABLE(51b,100b)
+ EX_TABLE(52b,100b)
+ EX_TABLE(53b,100b)
+ EX_TABLE(54b,100b)
+ EX_TABLE(55b,100b)
+ EX_TABLE(56b,100b)
+ EX_TABLE(57b,100b)
+ EX_TABLE(58b,100b)
+ EX_TABLE(59b,100b)
+ EX_TABLE(60b,100b)
+ EX_TABLE(61b,100b)
+ EX_TABLE(62b,100b)
+ EX_TABLE(63b,100b)
+ EX_TABLE(64b,100b)
+ EX_TABLE(65b,100b)
+ EX_TABLE(66b,100b)
+ EX_TABLE(67b,100b)
+ EX_TABLE(68b,100b)
+ EX_TABLE(69b,100b)
+ EX_TABLE(70b,100b)
+ EX_TABLE(71b,100b)
+ EX_TABLE(72b,100b)
+ EX_TABLE(73b,100b)
+ EX_TABLE(74b,100b)
+ EX_TABLE(75b,100b)
+ EX_TABLE(76b,100b)
+ EX_TABLE(77b,100b)
+ EX_TABLE(78b,100b)
+ EX_TABLE(79b,100b)
+ EX_TABLE(80b,100b)
+ EX_TABLE(81b,100b)
+ EX_TABLE(82b,100b)
+ EX_TABLE(83b,100b)
+ EX_TABLE(84b,100b)
+ EX_TABLE(85b,100b)
+ EX_TABLE(86b,100b)
+ EX_TABLE(87b,100b)
+ EX_TABLE(88b,100b)
+ EX_TABLE(89b,100b)
+ EX_TABLE(90b,100b)
+ EX_TABLE(91b,100b)
+
EXPORT_SYMBOL(__copy_tofrom_user)
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index da0c568d18c4..a24b4039352c 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -29,35 +29,23 @@
.macro err1
100:
- .section __ex_table,"a"
- .align 3
- .llong 100b,.Ldo_err1
- .previous
+ EX_TABLE(100b,.Ldo_err1)
.endm
.macro err2
200:
- .section __ex_table,"a"
- .align 3
- .llong 200b,.Ldo_err2
- .previous
+ EX_TABLE(200b,.Ldo_err2)
.endm
#ifdef CONFIG_ALTIVEC
.macro err3
300:
- .section __ex_table,"a"
- .align 3
- .llong 300b,.Ldo_err3
- .previous
+ EX_TABLE(300b,.Ldo_err3)
.endm
.macro err4
400:
- .section __ex_table,"a"
- .align 3
- .llong 400b,.Ldo_err4
- .previous
+ EX_TABLE(400b,.Ldo_err4)
.endm
diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S
index 5d0cdbfbe3f2..a58777c1b2cb 100644
--- a/arch/powerpc/lib/ldstfp.S
+++ b/arch/powerpc/lib/ldstfp.S
@@ -21,18 +21,12 @@
#define STKFRM (PPC_MIN_STKFRM + 16)
- .macro extab instr,handler
- .section __ex_table,"a"
- PPC_LONG \instr,\handler
- .previous
- .endm
-
.macro inst32 op
reg = 0
.rept 32
20: \op reg,0,r4
b 3f
- extab 20b,99f
+ EX_TABLE(20b,99f)
reg = reg + 1
.endr
.endm
@@ -100,7 +94,7 @@ _GLOBAL(do_lfs)
mr r3,r9
addi r1,r1,STKFRM
blr
- extab 2b,3b
+ EX_TABLE(2b,3b)
/* Load FP reg N from double at *p. N is in r3, p in r4. */
_GLOBAL(do_lfd)
@@ -127,7 +121,7 @@ _GLOBAL(do_lfd)
mr r3,r9
addi r1,r1,STKFRM
blr
- extab 2b,3b
+ EX_TABLE(2b,3b)
/* Store FP reg N to float at *p. N is in r3, p in r4. */
_GLOBAL(do_stfs)
@@ -154,7 +148,7 @@ _GLOBAL(do_stfs)
mr r3,r9
addi r1,r1,STKFRM
blr
- extab 2b,3b
+ EX_TABLE(2b,3b)
/* Store FP reg N to double at *p. N is in r3, p in r4. */
_GLOBAL(do_stfd)
@@ -181,7 +175,7 @@ _GLOBAL(do_stfd)
mr r3,r9
addi r1,r1,STKFRM
blr
- extab 2b,3b
+ EX_TABLE(2b,3b)
#ifdef CONFIG_ALTIVEC
/* Get the contents of vrN into v0; N is in r3. */
@@ -248,7 +242,7 @@ _GLOBAL(do_lvx)
mr r3,r9
addi r1,r1,STKFRM
blr
- extab 2b,3b
+ EX_TABLE(2b,3b)
/* Store vector reg N to *p. N is in r3, p in r4. */
_GLOBAL(do_stvx)
@@ -276,7 +270,7 @@ _GLOBAL(do_stvx)
mr r3,r9
addi r1,r1,STKFRM
blr
- extab 2b,3b
+ EX_TABLE(2b,3b)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
@@ -344,7 +338,7 @@ _GLOBAL(do_lxvd2x)
mr r3,r9
addi r1,r1,STKFRM
blr
- extab 2b,3b
+ EX_TABLE(2b,3b)
/* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */
_GLOBAL(do_stxvd2x)
@@ -372,7 +366,7 @@ _GLOBAL(do_stxvd2x)
mr r3,r9
addi r1,r1,STKFRM
blr
- extab 2b,3b
+ EX_TABLE(2b,3b)
#endif /* CONFIG_VSX */
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 3362299b1859..9c78a9c102c3 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -15,6 +15,7 @@
#include <asm/sstep.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
+#include <asm/cpu_has_feature.h>
#include <asm/cputable.h>
extern char system_call_common[];
@@ -493,10 +494,7 @@ static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
"3: li %0,%4\n" \
" b 2b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n" \
- PPC_LONG_ALIGN "\n" \
- PPC_LONG "1b,3b\n" \
- ".previous" \
+ EX_TABLE(1b, 3b) \
: "=r" (err), "=r" (cr) \
: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
@@ -508,10 +506,7 @@ static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
"3: li %0,%3\n" \
" b 2b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n" \
- PPC_LONG_ALIGN "\n" \
- PPC_LONG "1b,3b\n" \
- ".previous" \
+ EX_TABLE(1b, 3b) \
: "=r" (err), "=r" (x) \
: "r" (addr), "i" (-EFAULT), "0" (err))
@@ -523,10 +518,7 @@ static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
"3: li %0,%3\n" \
" b 2b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n" \
- PPC_LONG_ALIGN "\n" \
- PPC_LONG "1b,3b\n" \
- ".previous" \
+ EX_TABLE(1b, 3b) \
: "=r" (err) \
: "r" (addr), "i" (-EFAULT), "0" (err))
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index d13e07603519..a787776822d8 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -13,8 +13,6 @@
#include <asm/ppc_asm.h>
#include <asm/export.h>
- .section __ex_table,"a"
- PPC_LONG_ALIGN
.text
/* This clears out any unused part of the destination buffer,
@@ -125,10 +123,9 @@ _GLOBAL(__clear_user)
92: mfctr r3
blr
- .section __ex_table,"a"
- PPC_LONG 11b,90b
- PPC_LONG 1b,91b
- PPC_LONG 8b,92b
- .text
+ EX_TABLE(11b, 90b)
+ EX_TABLE(1b, 91b)
+ EX_TABLE(8b, 92b)
+
EXPORT_SYMBOL(__clear_user)
#endif
diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
index 57ace356c949..c100f4d5d5d0 100644
--- a/arch/powerpc/lib/string_64.S
+++ b/arch/powerpc/lib/string_64.S
@@ -19,6 +19,7 @@
*/
#include <asm/ppc_asm.h>
+#include <asm/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/export.h>
@@ -41,26 +42,17 @@ PPC64_CACHES:
.macro err1
100:
- .section __ex_table,"a"
- .align 3
- .llong 100b,.Ldo_err1
- .previous
+ EX_TABLE(100b,.Ldo_err1)
.endm
.macro err2
200:
- .section __ex_table,"a"
- .align 3
- .llong 200b,.Ldo_err2
- .previous
+ EX_TABLE(200b,.Ldo_err2)
.endm
.macro err3
300:
- .section __ex_table,"a"
- .align 3
- .llong 300b,.Ldo_err3
- .previous
+ EX_TABLE(300b,.Ldo_err3)
.endm
.Ldo_err1:
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 1a4e570f7894..7414034df1c3 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -7,7 +7,8 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-y := fault.o mem.o pgtable.o mmap.o \
- init_$(BITS).o pgtable_$(BITS).o
+ init_$(BITS).o pgtable_$(BITS).o \
+ init-common.o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
tlb_nohash_low.o
obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o
@@ -42,3 +43,5 @@ obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o
obj-$(CONFIG_SPAPR_TCE_IOMMU) += mmu_context_iommu.o
+obj-$(CONFIG_PPC_PTDUMP) += dump_linuxpagetables.o
+obj-$(CONFIG_PPC_HTDUMP) += dump_hashpagetable.o
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 362954f98029..aaa7ec6788b9 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -134,6 +134,9 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
return 1;
}
+ /* Bad address */
+ if (!vsid)
+ return 1;
vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey;
diff --git a/arch/powerpc/mm/dump_hashpagetable.c b/arch/powerpc/mm/dump_hashpagetable.c
new file mode 100644
index 000000000000..d979709a0239
--- /dev/null
+++ b/arch/powerpc/mm/dump_hashpagetable.c
@@ -0,0 +1,551 @@
+/*
+ * Copyright 2016, Rashmica Gupta, IBM Corp.
+ *
+ * This traverses the kernel virtual memory and dumps the pages that are in
+ * the hash pagetable, along with their flags to
+ * /sys/kernel/debug/kernel_hash_pagetable.
+ *
+ * If radix is enabled then there is no hash page table and so no debugfs file
+ * is generated.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <asm/fixmap.h>
+#include <asm/pgtable.h>
+#include <linux/const.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/plpar_wrappers.h>
+#include <linux/memblock.h>
+#include <asm/firmware.h>
+
+struct pg_state {
+ struct seq_file *seq;
+ const struct addr_marker *marker;
+ unsigned long start_address;
+ unsigned int level;
+ u64 current_flags;
+};
+
+struct addr_marker {
+ unsigned long start_address;
+ const char *name;
+};
+
+static struct addr_marker address_markers[] = {
+ { 0, "Start of kernel VM" },
+ { 0, "vmalloc() Area" },
+ { 0, "vmalloc() End" },
+ { 0, "isa I/O start" },
+ { 0, "isa I/O end" },
+ { 0, "phb I/O start" },
+ { 0, "phb I/O end" },
+ { 0, "I/O remap start" },
+ { 0, "I/O remap end" },
+ { 0, "vmemmap start" },
+ { -1, NULL },
+};
+
+struct flag_info {
+ u64 mask;
+ u64 val;
+ const char *set;
+ const char *clear;
+ bool is_val;
+ int shift;
+};
+
+static const struct flag_info v_flag_array[] = {
+ {
+ .mask = SLB_VSID_B,
+ .val = SLB_VSID_B_256M,
+ .set = "ssize: 256M",
+ .clear = "ssize: 1T ",
+ }, {
+ .mask = HPTE_V_SECONDARY,
+ .val = HPTE_V_SECONDARY,
+ .set = "secondary",
+ .clear = "primary ",
+ }, {
+ .mask = HPTE_V_VALID,
+ .val = HPTE_V_VALID,
+ .set = "valid ",
+ .clear = "invalid",
+ }, {
+ .mask = HPTE_V_BOLTED,
+ .val = HPTE_V_BOLTED,
+ .set = "bolted",
+ .clear = "",
+ }
+};
+
+static const struct flag_info r_flag_array[] = {
+ {
+ .mask = HPTE_R_PP0 | HPTE_R_PP,
+ .val = PP_RWXX,
+ .set = "prot:RW--",
+ }, {
+ .mask = HPTE_R_PP0 | HPTE_R_PP,
+ .val = PP_RWRX,
+ .set = "prot:RWR-",
+ }, {
+ .mask = HPTE_R_PP0 | HPTE_R_PP,
+ .val = PP_RWRW,
+ .set = "prot:RWRW",
+ }, {
+ .mask = HPTE_R_PP0 | HPTE_R_PP,
+ .val = PP_RXRX,
+ .set = "prot:R-R-",
+ }, {
+ .mask = HPTE_R_PP0 | HPTE_R_PP,
+ .val = PP_RXXX,
+ .set = "prot:R---",
+ }, {
+ .mask = HPTE_R_KEY_HI | HPTE_R_KEY_LO,
+ .val = HPTE_R_KEY_HI | HPTE_R_KEY_LO,
+ .set = "key",
+ .clear = "",
+ .is_val = true,
+ }, {
+ .mask = HPTE_R_R,
+ .val = HPTE_R_R,
+ .set = "ref",
+ .clear = " ",
+ }, {
+ .mask = HPTE_R_C,
+ .val = HPTE_R_C,
+ .set = "changed",
+ .clear = " ",
+ }, {
+ .mask = HPTE_R_N,
+ .val = HPTE_R_N,
+ .set = "no execute",
+ }, {
+ .mask = HPTE_R_WIMG,
+ .val = HPTE_R_W,
+ .set = "writethru",
+ }, {
+ .mask = HPTE_R_WIMG,
+ .val = HPTE_R_I,
+ .set = "no cache",
+ }, {
+ .mask = HPTE_R_WIMG,
+ .val = HPTE_R_G,
+ .set = "guarded",
+ }
+};
+
+static int calculate_pagesize(struct pg_state *st, int ps, char s[])
+{
+ static const char units[] = "BKMGTPE";
+ const char *unit = units;
+
+ while (ps > 9 && unit[1]) {
+ ps -= 10;
+ unit++;
+ }
+ seq_printf(st->seq, " %s_ps: %i%c\t", s, 1<<ps, *unit);
+ return ps;
+}
+
+static void dump_flag_info(struct pg_state *st, const struct flag_info
+ *flag, u64 pte, int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++, flag++) {
+ const char *s = NULL;
+ u64 val;
+
+ /* flag not defined so don't check it */
+ if (flag->mask == 0)
+ continue;
+ /* Some 'flags' are actually values */
+ if (flag->is_val) {
+ val = pte & flag->val;
+ if (flag->shift)
+ val = val >> flag->shift;
+ seq_printf(st->seq, " %s:%llx", flag->set, val);
+ } else {
+ if ((pte & flag->mask) == flag->val)
+ s = flag->set;
+ else
+ s = flag->clear;
+ if (s)
+ seq_printf(st->seq, " %s", s);
+ }
+ }
+}
+
+static void dump_hpte_info(struct pg_state *st, unsigned long ea, u64 v, u64 r,
+ unsigned long rpn, int bps, int aps, unsigned long lp)
+{
+ int aps_index;
+
+ while (ea >= st->marker[1].start_address) {
+ st->marker++;
+ seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ }
+ seq_printf(st->seq, "0x%lx:\t", ea);
+ seq_printf(st->seq, "AVPN:%llx\t", HPTE_V_AVPN_VAL(v));
+ dump_flag_info(st, v_flag_array, v, ARRAY_SIZE(v_flag_array));
+ seq_printf(st->seq, " rpn: %lx\t", rpn);
+ dump_flag_info(st, r_flag_array, r, ARRAY_SIZE(r_flag_array));
+
+ calculate_pagesize(st, bps, "base");
+ aps_index = calculate_pagesize(st, aps, "actual");
+ if (aps_index != 2)
+ seq_printf(st->seq, "LP enc: %lx", lp);
+ seq_puts(st->seq, "\n");
+}
+
+
+static int native_find(unsigned long ea, int psize, bool primary, u64 *v, u64
+ *r)
+{
+ struct hash_pte *hptep;
+ unsigned long hash, vsid, vpn, hpte_group, want_v, hpte_v;
+ int i, ssize = mmu_kernel_ssize;
+ unsigned long shift = mmu_psize_defs[psize].shift;
+
+ /* calculate hash */
+ vsid = get_kernel_vsid(ea, ssize);
+ vpn = hpt_vpn(ea, vsid, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
+
+ /* to check in the secondary hash table, we invert the hash */
+ if (!primary)
+ hash = ~hash;
+ hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+ for (i = 0; i < HPTES_PER_GROUP; i++) {
+ hptep = htab_address + hpte_group;
+ hpte_v = be64_to_cpu(hptep->v);
+
+ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
+ /* HPTE matches */
+ *v = be64_to_cpu(hptep->v);
+ *r = be64_to_cpu(hptep->r);
+ return 0;
+ }
+ ++hpte_group;
+ }
+ return -1;
+}
+
+#ifdef CONFIG_PPC_PSERIES
+static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *r)
+{
+ struct hash_pte ptes[4];
+ unsigned long vsid, vpn, hash, hpte_group, want_v;
+ int i, j, ssize = mmu_kernel_ssize;
+ long lpar_rc = 0;
+ unsigned long shift = mmu_psize_defs[psize].shift;
+
+ /* calculate hash */
+ vsid = get_kernel_vsid(ea, ssize);
+ vpn = hpt_vpn(ea, vsid, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
+ want_v = hpte_encode_avpn(vpn, psize, ssize);
+
+ /* to check in the secondary hash table, we invert the hash */
+ if (!primary)
+ hash = ~hash;
+ hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+ /* see if we can find an entry in the hpte with this hash */
+ for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
+ lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
+
+ if (lpar_rc != H_SUCCESS)
+ continue;
+ for (j = 0; j < 4; j++) {
+ if (HPTE_V_COMPARE(ptes[j].v, want_v) &&
+ (ptes[j].v & HPTE_V_VALID)) {
+ /* HPTE matches */
+ *v = ptes[j].v;
+ *r = ptes[j].r;
+ return 0;
+ }
+ }
+ }
+ return -1;
+}
+#endif
+
+static void decode_r(int bps, unsigned long r, unsigned long *rpn, int *aps,
+ unsigned long *lp_bits)
+{
+ struct mmu_psize_def entry;
+ unsigned long arpn, mask, lp;
+ int penc = -2, idx = 0, shift;
+
+ /*.
+ * The LP field has 8 bits. Depending on the actual page size, some of
+ * these bits are concatenated with the APRN to get the RPN. The rest
+ * of the bits in the LP field is the LP value and is an encoding for
+ * the base page size and the actual page size.
+ *
+ * - find the mmu entry for our base page size
+ * - go through all page encodings and use the associated mask to
+ * find an encoding that matches our encoding in the LP field.
+ */
+ arpn = (r & HPTE_R_RPN) >> HPTE_R_RPN_SHIFT;
+ lp = arpn & 0xff;
+
+ entry = mmu_psize_defs[bps];
+ while (idx < MMU_PAGE_COUNT) {
+ penc = entry.penc[idx];
+ if ((penc != -1) && (mmu_psize_defs[idx].shift)) {
+ shift = mmu_psize_defs[idx].shift - HPTE_R_RPN_SHIFT;
+ mask = (0x1 << (shift)) - 1;
+ if ((lp & mask) == penc) {
+ *aps = mmu_psize_to_shift(idx);
+ *lp_bits = lp & mask;
+ *rpn = arpn >> shift;
+ return;
+ }
+ }
+ idx++;
+ }
+}
+
+static int base_hpte_find(unsigned long ea, int psize, bool primary, u64 *v,
+ u64 *r)
+{
+#ifdef CONFIG_PPC_PSERIES
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ return pseries_find(ea, psize, primary, v, r);
+#endif
+ return native_find(ea, psize, primary, v, r);
+}
+
+static unsigned long hpte_find(struct pg_state *st, unsigned long ea, int psize)
+{
+ unsigned long slot;
+ u64 v = 0, r = 0;
+ unsigned long rpn, lp_bits;
+ int base_psize = 0, actual_psize = 0;
+
+ if (ea <= PAGE_OFFSET)
+ return -1;
+
+ /* Look in primary table */
+ slot = base_hpte_find(ea, psize, true, &v, &r);
+
+ /* Look in secondary table */
+ if (slot == -1)
+ slot = base_hpte_find(ea, psize, true, &v, &r);
+
+ /* No entry found */
+ if (slot == -1)
+ return -1;
+
+ /*
+ * We found an entry in the hash page table:
+ * - check that this has the same base page
+ * - find the actual page size
+ * - find the RPN
+ */
+ base_psize = mmu_psize_to_shift(psize);
+
+ if ((v & HPTE_V_LARGE) == HPTE_V_LARGE) {
+ decode_r(psize, r, &rpn, &actual_psize, &lp_bits);
+ } else {
+ /* 4K actual page size */
+ actual_psize = 12;
+ rpn = (r & HPTE_R_RPN) >> HPTE_R_RPN_SHIFT;
+ /* In this case there are no LP bits */
+ lp_bits = -1;
+ }
+ /*
+ * We didn't find a matching encoding, so the PTE we found isn't for
+ * this address.
+ */
+ if (actual_psize == -1)
+ return -1;
+
+ dump_hpte_info(st, ea, v, r, rpn, base_psize, actual_psize, lp_bits);
+ return 0;
+}
+
+static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
+{
+ pte_t *pte = pte_offset_kernel(pmd, 0);
+ unsigned long addr, pteval, psize;
+ int i, status;
+
+ for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
+ addr = start + i * PAGE_SIZE;
+ pteval = pte_val(*pte);
+
+ if (addr < VMALLOC_END)
+ psize = mmu_vmalloc_psize;
+ else
+ psize = mmu_io_psize;
+#ifdef CONFIG_PPC_64K_PAGES
+ /* check for secret 4K mappings */
+ if (((pteval & H_PAGE_COMBO) == H_PAGE_COMBO) ||
+ ((pteval & H_PAGE_4K_PFN) == H_PAGE_4K_PFN))
+ psize = mmu_io_psize;
+#endif
+ /* check for hashpte */
+ status = hpte_find(st, addr, psize);
+
+ if (((pteval & H_PAGE_HASHPTE) != H_PAGE_HASHPTE)
+ && (status != -1)) {
+ /* found a hpte that is not in the linux page tables */
+ seq_printf(st->seq, "page probably bolted before linux"
+ " pagetables were set: addr:%lx, pteval:%lx\n",
+ addr, pteval);
+ }
+ }
+}
+
+static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
+{
+ pmd_t *pmd = pmd_offset(pud, 0);
+ unsigned long addr;
+ unsigned int i;
+
+ for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
+ addr = start + i * PMD_SIZE;
+ if (!pmd_none(*pmd))
+ /* pmd exists */
+ walk_pte(st, pmd, addr);
+ }
+}
+
+static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
+{
+ pud_t *pud = pud_offset(pgd, 0);
+ unsigned long addr;
+ unsigned int i;
+
+ for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
+ addr = start + i * PUD_SIZE;
+ if (!pud_none(*pud))
+ /* pud exists */
+ walk_pmd(st, pud, addr);
+ }
+}
+
+static void walk_pagetables(struct pg_state *st)
+{
+ pgd_t *pgd = pgd_offset_k(0UL);
+ unsigned int i;
+ unsigned long addr;
+
+ /*
+ * Traverse the linux pagetable structure and dump pages that are in
+ * the hash pagetable.
+ */
+ for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
+ addr = KERN_VIRT_START + i * PGDIR_SIZE;
+ if (!pgd_none(*pgd))
+ /* pgd exists */
+ walk_pud(st, pgd, addr);
+ }
+}
+
+
+static void walk_linearmapping(struct pg_state *st)
+{
+ unsigned long addr;
+
+ /*
+ * Traverse the linear mapping section of virtual memory and dump pages
+ * that are in the hash pagetable.
+ */
+ unsigned long psize = 1 << mmu_psize_defs[mmu_linear_psize].shift;
+
+ for (addr = PAGE_OFFSET; addr < PAGE_OFFSET +
+ memblock_phys_mem_size(); addr += psize)
+ hpte_find(st, addr, mmu_linear_psize);
+}
+
+static void walk_vmemmap(struct pg_state *st)
+{
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ struct vmemmap_backing *ptr = vmemmap_list;
+
+ /*
+ * Traverse the vmemmaped memory and dump pages that are in the hash
+ * pagetable.
+ */
+ while (ptr->list) {
+ hpte_find(st, ptr->virt_addr, mmu_vmemmap_psize);
+ ptr = ptr->list;
+ }
+ seq_puts(st->seq, "---[ vmemmap end ]---\n");
+#endif
+}
+
+static void populate_markers(void)
+{
+ address_markers[0].start_address = PAGE_OFFSET;
+ address_markers[1].start_address = VMALLOC_START;
+ address_markers[2].start_address = VMALLOC_END;
+ address_markers[3].start_address = ISA_IO_BASE;
+ address_markers[4].start_address = ISA_IO_END;
+ address_markers[5].start_address = PHB_IO_BASE;
+ address_markers[6].start_address = PHB_IO_END;
+ address_markers[7].start_address = IOREMAP_BASE;
+ address_markers[8].start_address = IOREMAP_END;
+#ifdef CONFIG_PPC_STD_MMU_64
+ address_markers[9].start_address = H_VMEMMAP_BASE;
+#else
+ address_markers[9].start_address = VMEMMAP_BASE;
+#endif
+}
+
+static int ptdump_show(struct seq_file *m, void *v)
+{
+ struct pg_state st = {
+ .seq = m,
+ .start_address = PAGE_OFFSET,
+ .marker = address_markers,
+ };
+ /*
+ * Traverse the 0xc, 0xd and 0xf areas of the kernel virtual memory and
+ * dump pages that are in the hash pagetable.
+ */
+ walk_linearmapping(&st);
+ walk_pagetables(&st);
+ walk_vmemmap(&st);
+ return 0;
+}
+
+static int ptdump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ptdump_show, NULL);
+}
+
+static const struct file_operations ptdump_fops = {
+ .open = ptdump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int ptdump_init(void)
+{
+ struct dentry *debugfs_file;
+
+ if (!radix_enabled()) {
+ populate_markers();
+ debugfs_file = debugfs_create_file("kernel_hash_pagetable",
+ 0400, NULL, NULL, &ptdump_fops);
+ return debugfs_file ? 0 : -ENOMEM;
+ }
+ return 0;
+}
+device_initcall(ptdump_init);
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
new file mode 100644
index 000000000000..49abaf4dc8e3
--- /dev/null
+++ b/arch/powerpc/mm/dump_linuxpagetables.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright 2016, Rashmica Gupta, IBM Corp.
+ *
+ * This traverses the kernel pagetables and dumps the
+ * information about the used sections of memory to
+ * /sys/kernel/debug/kernel_pagetables.
+ *
+ * Derived from the arm64 implementation:
+ * Copyright (c) 2014, The Linux Foundation, Laura Abbott.
+ * (C) Copyright 2008 Intel Corporation, Arjan van de Ven.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <asm/fixmap.h>
+#include <asm/pgtable.h>
+#include <linux/const.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+
+/*
+ * To visualise what is happening,
+ *
+ * - PTRS_PER_P** = how many entries there are in the corresponding P**
+ * - P**_SHIFT = how many bits of the address we use to index into the
+ * corresponding P**
+ * - P**_SIZE is how much memory we can access through the table - not the
+ * size of the table itself.
+ * P**={PGD, PUD, PMD, PTE}
+ *
+ *
+ * Each entry of the PGD points to a PUD. Each entry of a PUD points to a
+ * PMD. Each entry of a PMD points to a PTE. And every PTE entry points to
+ * a page.
+ *
+ * In the case where there are only 3 levels, the PUD is folded into the
+ * PGD: every PUD has only one entry which points to the PMD.
+ *
+ * The page dumper groups page table entries of the same type into a single
+ * description. It uses pg_state to track the range information while
+ * iterating over the PTE entries. When the continuity is broken it then
+ * dumps out a description of the range - ie PTEs that are virtually contiguous
+ * with the same PTE flags are chunked together. This is to make it clear how
+ * different areas of the kernel virtual memory are used.
+ *
+ */
+struct pg_state {
+ struct seq_file *seq;
+ const struct addr_marker *marker;
+ unsigned long start_address;
+ unsigned int level;
+ u64 current_flags;
+};
+
+struct addr_marker {
+ unsigned long start_address;
+ const char *name;
+};
+
+static struct addr_marker address_markers[] = {
+ { 0, "Start of kernel VM" },
+ { 0, "vmalloc() Area" },
+ { 0, "vmalloc() End" },
+ { 0, "isa I/O start" },
+ { 0, "isa I/O end" },
+ { 0, "phb I/O start" },
+ { 0, "phb I/O end" },
+ { 0, "I/O remap start" },
+ { 0, "I/O remap end" },
+ { 0, "vmemmap start" },
+ { -1, NULL },
+};
+
+struct flag_info {
+ u64 mask;
+ u64 val;
+ const char *set;
+ const char *clear;
+ bool is_val;
+ int shift;
+};
+
+static const struct flag_info flag_array[] = {
+ {
+#ifdef CONFIG_PPC_STD_MMU_64
+ .mask = _PAGE_PRIVILEGED,
+ .val = 0,
+#else
+ .mask = _PAGE_USER,
+ .val = _PAGE_USER,
+#endif
+ .set = "user",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_RW,
+ .val = _PAGE_RW,
+ .set = "rw",
+ .clear = "ro",
+ }, {
+ .mask = _PAGE_EXEC,
+ .val = _PAGE_EXEC,
+ .set = " X ",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_PTE,
+ .val = _PAGE_PTE,
+ .set = "pte",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_PRESENT,
+ .val = _PAGE_PRESENT,
+ .set = "present",
+ .clear = " ",
+ }, {
+#ifdef CONFIG_PPC_STD_MMU_64
+ .mask = H_PAGE_HASHPTE,
+ .val = H_PAGE_HASHPTE,
+#else
+ .mask = _PAGE_HASHPTE,
+ .val = _PAGE_HASHPTE,
+#endif
+ .set = "hpte",
+ .clear = " ",
+ }, {
+#ifndef CONFIG_PPC_STD_MMU_64
+ .mask = _PAGE_GUARDED,
+ .val = _PAGE_GUARDED,
+ .set = "guarded",
+ .clear = " ",
+ }, {
+#endif
+ .mask = _PAGE_DIRTY,
+ .val = _PAGE_DIRTY,
+ .set = "dirty",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_ACCESSED,
+ .val = _PAGE_ACCESSED,
+ .set = "accessed",
+ .clear = " ",
+ }, {
+#ifndef CONFIG_PPC_STD_MMU_64
+ .mask = _PAGE_WRITETHRU,
+ .val = _PAGE_WRITETHRU,
+ .set = "write through",
+ .clear = " ",
+ }, {
+#endif
+ .mask = _PAGE_NO_CACHE,
+ .val = _PAGE_NO_CACHE,
+ .set = "no cache",
+ .clear = " ",
+ }, {
+#ifdef CONFIG_PPC_BOOK3S_64
+ .mask = H_PAGE_BUSY,
+ .val = H_PAGE_BUSY,
+ .set = "busy",
+ }, {
+#ifdef CONFIG_PPC_64K_PAGES
+ .mask = H_PAGE_COMBO,
+ .val = H_PAGE_COMBO,
+ .set = "combo",
+ }, {
+ .mask = H_PAGE_4K_PFN,
+ .val = H_PAGE_4K_PFN,
+ .set = "4K_pfn",
+ }, {
+#endif
+ .mask = H_PAGE_F_GIX,
+ .val = H_PAGE_F_GIX,
+ .set = "f_gix",
+ .is_val = true,
+ .shift = H_PAGE_F_GIX_SHIFT,
+ }, {
+ .mask = H_PAGE_F_SECOND,
+ .val = H_PAGE_F_SECOND,
+ .set = "f_second",
+ }, {
+#endif
+ .mask = _PAGE_SPECIAL,
+ .val = _PAGE_SPECIAL,
+ .set = "special",
+ }
+};
+
+struct pgtable_level {
+ const struct flag_info *flag;
+ size_t num;
+ u64 mask;
+};
+
+static struct pgtable_level pg_level[] = {
+ {
+ }, { /* pgd */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* pud */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* pmd */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* pte */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ },
+};
+
+static void dump_flag_info(struct pg_state *st, const struct flag_info
+ *flag, u64 pte, int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++, flag++) {
+ const char *s = NULL;
+ u64 val;
+
+ /* flag not defined so don't check it */
+ if (flag->mask == 0)
+ continue;
+ /* Some 'flags' are actually values */
+ if (flag->is_val) {
+ val = pte & flag->val;
+ if (flag->shift)
+ val = val >> flag->shift;
+ seq_printf(st->seq, " %s:%llx", flag->set, val);
+ } else {
+ if ((pte & flag->mask) == flag->val)
+ s = flag->set;
+ else
+ s = flag->clear;
+ if (s)
+ seq_printf(st->seq, " %s", s);
+ }
+ st->current_flags &= ~flag->mask;
+ }
+ if (st->current_flags != 0)
+ seq_printf(st->seq, " unknown flags:%llx", st->current_flags);
+}
+
+static void dump_addr(struct pg_state *st, unsigned long addr)
+{
+ static const char units[] = "KMGTPE";
+ const char *unit = units;
+ unsigned long delta;
+
+ seq_printf(st->seq, "0x%016lx-0x%016lx ", st->start_address, addr-1);
+ delta = (addr - st->start_address) >> 10;
+ /* Work out what appropriate unit to use */
+ while (!(delta & 1023) && unit[1]) {
+ delta >>= 10;
+ unit++;
+ }
+ seq_printf(st->seq, "%9lu%c", delta, *unit);
+
+}
+
+static void note_page(struct pg_state *st, unsigned long addr,
+ unsigned int level, u64 val)
+{
+ u64 flag = val & pg_level[level].mask;
+ /* At first no level is set */
+ if (!st->level) {
+ st->level = level;
+ st->current_flags = flag;
+ st->start_address = addr;
+ seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ /*
+ * Dump the section of virtual memory when:
+ * - the PTE flags from one entry to the next differs.
+ * - we change levels in the tree.
+ * - the address is in a different section of memory and is thus
+ * used for a different purpose, regardless of the flags.
+ */
+ } else if (flag != st->current_flags || level != st->level ||
+ addr >= st->marker[1].start_address) {
+
+ /* Check the PTE flags */
+ if (st->current_flags) {
+ dump_addr(st, addr);
+
+ /* Dump all the flags */
+ if (pg_level[st->level].flag)
+ dump_flag_info(st, pg_level[st->level].flag,
+ st->current_flags,
+ pg_level[st->level].num);
+
+ seq_puts(st->seq, "\n");
+ }
+
+ /*
+ * Address indicates we have passed the end of the
+ * current section of virtual memory
+ */
+ while (addr >= st->marker[1].start_address) {
+ st->marker++;
+ seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ }
+ st->start_address = addr;
+ st->current_flags = flag;
+ st->level = level;
+ }
+}
+
+static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
+{
+ pte_t *pte = pte_offset_kernel(pmd, 0);
+ unsigned long addr;
+ unsigned int i;
+
+ for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
+ addr = start + i * PAGE_SIZE;
+ note_page(st, addr, 4, pte_val(*pte));
+
+ }
+}
+
+static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
+{
+ pmd_t *pmd = pmd_offset(pud, 0);
+ unsigned long addr;
+ unsigned int i;
+
+ for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
+ addr = start + i * PMD_SIZE;
+ if (!pmd_none(*pmd))
+ /* pmd exists */
+ walk_pte(st, pmd, addr);
+ else
+ note_page(st, addr, 3, pmd_val(*pmd));
+ }
+}
+
+static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
+{
+ pud_t *pud = pud_offset(pgd, 0);
+ unsigned long addr;
+ unsigned int i;
+
+ for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
+ addr = start + i * PUD_SIZE;
+ if (!pud_none(*pud))
+ /* pud exists */
+ walk_pmd(st, pud, addr);
+ else
+ note_page(st, addr, 2, pud_val(*pud));
+ }
+}
+
+static void walk_pagetables(struct pg_state *st)
+{
+ pgd_t *pgd = pgd_offset_k(0UL);
+ unsigned int i;
+ unsigned long addr;
+
+ /*
+ * Traverse the linux pagetable structure and dump pages that are in
+ * the hash pagetable.
+ */
+ for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
+ addr = KERN_VIRT_START + i * PGDIR_SIZE;
+ if (!pgd_none(*pgd))
+ /* pgd exists */
+ walk_pud(st, pgd, addr);
+ else
+ note_page(st, addr, 1, pgd_val(*pgd));
+ }
+}
+
+static void populate_markers(void)
+{
+ address_markers[0].start_address = PAGE_OFFSET;
+ address_markers[1].start_address = VMALLOC_START;
+ address_markers[2].start_address = VMALLOC_END;
+ address_markers[3].start_address = ISA_IO_BASE;
+ address_markers[4].start_address = ISA_IO_END;
+ address_markers[5].start_address = PHB_IO_BASE;
+ address_markers[6].start_address = PHB_IO_END;
+ address_markers[7].start_address = IOREMAP_BASE;
+ address_markers[8].start_address = IOREMAP_END;
+#ifdef CONFIG_PPC_STD_MMU_64
+ address_markers[9].start_address = H_VMEMMAP_BASE;
+#else
+ address_markers[9].start_address = VMEMMAP_BASE;
+#endif
+}
+
+static int ptdump_show(struct seq_file *m, void *v)
+{
+ struct pg_state st = {
+ .seq = m,
+ .start_address = KERN_VIRT_START,
+ .marker = address_markers,
+ };
+ /* Traverse kernel page tables */
+ walk_pagetables(&st);
+ note_page(&st, 0, 0, 0);
+ return 0;
+}
+
+
+static int ptdump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ptdump_show, NULL);
+}
+
+static const struct file_operations ptdump_fops = {
+ .open = ptdump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void build_pgtable_complete_mask(void)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(pg_level); i++)
+ if (pg_level[i].flag)
+ for (j = 0; j < pg_level[i].num; j++)
+ pg_level[i].mask |= pg_level[i].flag[j].mask;
+}
+
+static int ptdump_init(void)
+{
+ struct dentry *debugfs_file;
+
+ populate_markers();
+ build_pgtable_complete_mask();
+ debugfs_file = debugfs_create_file("kernel_pagetables", 0400, NULL,
+ NULL, &ptdump_fops);
+ return debugfs_file ? 0 : -ENOMEM;
+}
+device_initcall(ptdump_init);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index d0b137d96df1..6fd30ac7d14a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -391,6 +391,20 @@ good_area:
if (is_exec) {
/*
+ * An execution fault + no execute ?
+ *
+ * On CPUs that don't have CPU_FTR_COHERENT_ICACHE we
+ * deliberately create NX mappings, and use the fault to do the
+ * cache flush. This is usually handled in hash_page_do_lazy_icache()
+ * but we could end up here if that races with a concurrent PTE
+ * update. In that case we need to fall through here to the VMA
+ * check below.
+ */
+ if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
+ (regs->msr & SRR1_ISI_N_OR_G))
+ goto bad_area;
+
+ /*
* Allow execution from readable areas if the MMU does not
* provide separate controls over reading and executing.
*
@@ -404,6 +418,7 @@ good_area:
(cpu_has_feature(CPU_FTR_NOEXECUTE) ||
!(vma->vm_flags & (VM_READ | VM_WRITE))))
goto bad_area;
+
#ifdef CONFIG_PPC_STD_MMU
/*
* protfault should only happen due to us
@@ -512,7 +527,7 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* Are we prepared to handle this fault? */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
- regs->nip = entry->fixup;
+ regs->nip = extable_fixup(entry);
return;
}
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index ad9fd5245be2..cc332608e656 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -123,8 +123,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
va |= ssize << 8;
sllp = get_sllp_encoding(apsize);
va |= sllp << 5;
- asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
- : : "r"(va) : "memory");
+ asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
+ : : "r" (va), "i" (CPU_FTR_ARCH_206)
+ : "memory");
break;
default:
/* We need 14 to 14 + i bits of va */
@@ -141,8 +142,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
*/
va |= (vpn & 0xfe);
va |= 1; /* L */
- asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
- : : "r"(va) : "memory");
+ asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
+ : : "r" (va), "i" (CPU_FTR_ARCH_206)
+ : "memory");
break;
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a5d3ecdabc44..289df38fb7e0 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -26,6 +26,8 @@
#ifdef CONFIG_HUGETLB_PAGE
#define PAGE_SHIFT_64K 16
+#define PAGE_SHIFT_512K 19
+#define PAGE_SHIFT_8M 23
#define PAGE_SHIFT_16M 24
#define PAGE_SHIFT_16G 34
@@ -38,7 +40,7 @@ unsigned int HPAGE_SHIFT;
* implementations may have more than one gpage size, so we need multiple
* arrays
*/
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
#define MAX_NUMBER_GPAGES 128
struct psize_gpages {
u64 gpage_list[MAX_NUMBER_GPAGES];
@@ -64,14 +66,16 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
{
struct kmem_cache *cachep;
pte_t *new;
-
-#ifdef CONFIG_PPC_FSL_BOOK3E
int i;
- int num_hugepd = 1 << (pshift - pdshift);
- cachep = hugepte_cache;
-#else
- cachep = PGT_CACHE(pdshift - pshift);
-#endif
+ int num_hugepd;
+
+ if (pshift >= pdshift) {
+ cachep = hugepte_cache;
+ num_hugepd = 1 << (pshift - pdshift);
+ } else {
+ cachep = PGT_CACHE(pdshift - pshift);
+ num_hugepd = 1;
+ }
new = kmem_cache_zalloc(cachep, GFP_KERNEL);
@@ -89,7 +93,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
smp_wmb();
spin_lock(&mm->page_table_lock);
-#ifdef CONFIG_PPC_FSL_BOOK3E
+
/*
* We have multiple higher-level entries that point to the same
* actual pte location. Fill in each as we go and backtrack on error.
@@ -100,8 +104,18 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (unlikely(!hugepd_none(*hpdp)))
break;
else
+#ifdef CONFIG_PPC_BOOK3S_64
+ hpdp->pd = __pa(new) |
+ (shift_to_mmu_psize(pshift) << 2);
+#elif defined(CONFIG_PPC_8xx)
+ hpdp->pd = __pa(new) |
+ (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
+ _PMD_PAGE_512K) |
+ _PMD_PRESENT;
+#else
/* We use the old format for PPC_FSL_BOOK3E */
hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
+#endif
}
/* If we bailed from the for loop early, an error occurred, clean up */
if (i < num_hugepd) {
@@ -109,17 +123,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
hpdp->pd = 0;
kmem_cache_free(cachep, new);
}
-#else
- if (!hugepd_none(*hpdp))
- kmem_cache_free(cachep, new);
- else {
-#ifdef CONFIG_PPC_BOOK3S_64
- hpdp->pd = __pa(new) | (shift_to_mmu_psize(pshift) << 2);
-#else
- hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
-#endif
- }
-#endif
spin_unlock(&mm->page_table_lock);
return 0;
}
@@ -128,7 +131,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
* These macros define how to determine which level of the page table holds
* the hpdp.
*/
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
#define HUGEPD_PUD_SHIFT PUD_SHIFT
#else
@@ -136,7 +139,6 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
#define HUGEPD_PUD_SHIFT PMD_SHIFT
#endif
-#ifdef CONFIG_PPC_BOOK3S_64
/*
* At this point we do the placement change only for BOOK3S 64. This would
* possibly work on other subarchs.
@@ -153,6 +155,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
addr &= ~(sz-1);
pg = pgd_offset(mm, addr);
+#ifdef CONFIG_PPC_BOOK3S_64
if (pshift == PGDIR_SHIFT)
/* 16GB huge page */
return (pte_t *) pg;
@@ -178,32 +181,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
hpdp = (hugepd_t *)pm;
}
}
- if (!hpdp)
- return NULL;
-
- BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
-
- if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
- return NULL;
-
- return hugepte_offset(*hpdp, addr, pdshift);
-}
-
#else
-
-pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
-{
- pgd_t *pg;
- pud_t *pu;
- pmd_t *pm;
- hugepd_t *hpdp = NULL;
- unsigned pshift = __ffs(sz);
- unsigned pdshift = PGDIR_SHIFT;
-
- addr &= ~(sz-1);
-
- pg = pgd_offset(mm, addr);
-
if (pshift >= HUGEPD_PGD_SHIFT) {
hpdp = (hugepd_t *)pg;
} else {
@@ -217,7 +195,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
hpdp = (hugepd_t *)pm;
}
}
-
+#endif
if (!hpdp)
return NULL;
@@ -228,9 +206,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
return hugepte_offset(*hpdp, addr, pdshift);
}
-#endif
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
/* Build list of addresses of gigantic pages. This function is used in early
* boot before the buddy allocator is setup.
*/
@@ -310,7 +287,11 @@ static int __init do_gpage_early_setup(char *param, char *val,
npages = 0;
if (npages > MAX_NUMBER_GPAGES) {
pr_warn("MMU: %lu pages requested for page "
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
"size %llu KB, limiting to "
+#else
+ "size %u KB, limiting to "
+#endif
__stringify(MAX_NUMBER_GPAGES) "\n",
npages, size / 1024);
npages = MAX_NUMBER_GPAGES;
@@ -392,7 +373,7 @@ int alloc_bootmem_huge_page(struct hstate *hstate)
}
#endif
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
#define HUGEPD_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
@@ -442,6 +423,8 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
}
put_cpu_var(hugepd_freelist_cur);
}
+#else
+static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
#endif
static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
@@ -453,13 +436,11 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
unsigned long pdmask = ~((1UL << pdshift) - 1);
unsigned int num_hugepd = 1;
+ unsigned int shift = hugepd_shift(*hpdp);
-#ifdef CONFIG_PPC_FSL_BOOK3E
/* Note: On fsl the hpdp may be the first of several */
- num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
-#else
- unsigned int shift = hugepd_shift(*hpdp);
-#endif
+ if (shift > pdshift)
+ num_hugepd = 1 << (shift - pdshift);
start &= pdmask;
if (start < floor)
@@ -475,11 +456,10 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
for (i = 0; i < num_hugepd; i++, hpdp++)
hpdp->pd = 0;
-#ifdef CONFIG_PPC_FSL_BOOK3E
- hugepd_free(tlb, hugepte);
-#else
- pgtable_free_tlb(tlb, hugepte, pdshift - shift);
-#endif
+ if (shift >= pdshift)
+ hugepd_free(tlb, hugepte);
+ else
+ pgtable_free_tlb(tlb, hugepte, pdshift - shift);
}
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -492,6 +472,8 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
start = addr;
do {
+ unsigned long more;
+
pmd = pmd_offset(pud, addr);
next = pmd_addr_end(addr, end);
if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
@@ -502,15 +484,16 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
WARN_ON(!pmd_none_or_clear_bad(pmd));
continue;
}
-#ifdef CONFIG_PPC_FSL_BOOK3E
/*
* Increment next by the size of the huge mapping since
* there may be more than one entry at this level for a
* single hugepage, but all of them point to
* the same kmem cache that holds the hugepte.
*/
- next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
-#endif
+ more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
+ if (more > next)
+ next = more;
+
free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
addr, next, floor, ceiling);
} while (addr = next, addr != end);
@@ -550,15 +533,17 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
ceiling);
} else {
-#ifdef CONFIG_PPC_FSL_BOOK3E
+ unsigned long more;
/*
* Increment next by the size of the huge mapping since
* there may be more than one entry at this level for a
* single hugepage, but all of them point to
* the same kmem cache that holds the hugepte.
*/
- next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
-#endif
+ more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
+ if (more > next)
+ next = more;
+
free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
addr, next, floor, ceiling);
}
@@ -615,15 +600,17 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
continue;
hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
} else {
-#ifdef CONFIG_PPC_FSL_BOOK3E
+ unsigned long more;
/*
* Increment next by the size of the huge mapping since
* there may be more than one entry at the pgd level
* for a single hugepage, but all of them point to the
* same kmem cache that holds the hugepte.
*/
- next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
-#endif
+ more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
+ if (more > next)
+ next = more;
+
free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
addr, next, floor, ceiling);
}
@@ -753,12 +740,13 @@ static int __init add_huge_page_size(unsigned long long size)
/* Check that it is a page size supported by the hardware and
* that it fits within pagetable and slice limits. */
-#ifdef CONFIG_PPC_FSL_BOOK3E
- if ((size < PAGE_SIZE) || !is_power_of_4(size))
+ if (size <= PAGE_SIZE)
return -EINVAL;
-#else
- if (!is_power_of_2(size)
- || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
+#if defined(CONFIG_PPC_FSL_BOOK3E)
+ if (!is_power_of_4(size))
+ return -EINVAL;
+#elif !defined(CONFIG_PPC_8xx)
+ if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT))
return -EINVAL;
#endif
@@ -791,53 +779,15 @@ static int __init hugepage_setup_sz(char *str)
}
__setup("hugepagesz=", hugepage_setup_sz);
-#ifdef CONFIG_PPC_FSL_BOOK3E
struct kmem_cache *hugepte_cache;
static int __init hugetlbpage_init(void)
{
int psize;
- for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
- unsigned shift;
-
- if (!mmu_psize_defs[psize].shift)
- continue;
-
- shift = mmu_psize_to_shift(psize);
-
- /* Don't treat normal page sizes as huge... */
- if (shift != PAGE_SHIFT)
- if (add_huge_page_size(1ULL << shift) < 0)
- continue;
- }
-
- /*
- * Create a kmem cache for hugeptes. The bottom bits in the pte have
- * size information encoded in them, so align them to allow this
- */
- hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t),
- HUGEPD_SHIFT_MASK + 1, 0, NULL);
- if (hugepte_cache == NULL)
- panic("%s: Unable to create kmem cache for hugeptes\n",
- __func__);
-
- /* Default hpage size = 4M */
- if (mmu_psize_defs[MMU_PAGE_4M].shift)
- HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
- else
- panic("%s: Unable to set default huge page size\n", __func__);
-
-
- return 0;
-}
-#else
-static int __init hugetlbpage_init(void)
-{
- int psize;
-
+#if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx)
if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE))
return -ENODEV;
-
+#endif
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
unsigned shift;
unsigned pdshift;
@@ -850,9 +800,9 @@ static int __init hugetlbpage_init(void)
if (add_huge_page_size(1ULL << shift) < 0)
continue;
- if (shift < PMD_SHIFT)
+ if (shift < HUGEPD_PUD_SHIFT)
pdshift = PMD_SHIFT;
- else if (shift < PUD_SHIFT)
+ else if (shift < HUGEPD_PGD_SHIFT)
pdshift = PUD_SHIFT;
else
pdshift = PGDIR_SHIFT;
@@ -860,14 +810,38 @@ static int __init hugetlbpage_init(void)
* if we have pdshift and shift value same, we don't
* use pgt cache for hugepd.
*/
- if (pdshift != shift) {
+ if (pdshift > shift) {
pgtable_cache_add(pdshift - shift, NULL);
if (!PGT_CACHE(pdshift - shift))
panic("hugetlbpage_init(): could not create "
"pgtable cache for %d bit pagesize\n", shift);
}
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
+ else if (!hugepte_cache) {
+ /*
+ * Create a kmem cache for hugeptes. The bottom bits in
+ * the pte have size information encoded in them, so
+ * align them to allow this
+ */
+ hugepte_cache = kmem_cache_create("hugepte-cache",
+ sizeof(pte_t),
+ HUGEPD_SHIFT_MASK + 1,
+ 0, NULL);
+ if (hugepte_cache == NULL)
+ panic("%s: Unable to create kmem cache "
+ "for hugeptes\n", __func__);
+
+ }
+#endif
}
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
+ /* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */
+ if (mmu_psize_defs[MMU_PAGE_4M].shift)
+ HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
+ else if (mmu_psize_defs[MMU_PAGE_512K].shift)
+ HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_512K].shift;
+#else
/* Set default large page size. Currently, we pick 16M or 1M
* depending on what is available
*/
@@ -877,11 +851,13 @@ static int __init hugetlbpage_init(void)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
else if (mmu_psize_defs[MMU_PAGE_2M].shift)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
-
+#endif
+ else
+ panic("%s: Unable to set default huge page size\n", __func__);
return 0;
}
-#endif
+
arch_initcall(hugetlbpage_init);
void flush_dcache_icache_hugepage(struct page *page)
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
new file mode 100644
index 000000000000..a175cd82ae8c
--- /dev/null
+++ b/arch/powerpc/mm/init-common.c
@@ -0,0 +1,107 @@
+/*
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * Dave Engebretsen <engebret@us.ibm.com>
+ * Rework for PPC64 port.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/string.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+
+static void pgd_ctor(void *addr)
+{
+ memset(addr, 0, PGD_TABLE_SIZE);
+}
+
+static void pud_ctor(void *addr)
+{
+ memset(addr, 0, PUD_TABLE_SIZE);
+}
+
+static void pmd_ctor(void *addr)
+{
+ memset(addr, 0, PMD_TABLE_SIZE);
+}
+
+struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
+
+/*
+ * Create a kmem_cache() for pagetables. This is not used for PTE
+ * pages - they're linked to struct page, come from the normal free
+ * pages pool and have a different entry size (see real_pte_t) to
+ * everything else. Caches created by this function are used for all
+ * the higher level pagetables, and for hugepage pagetables.
+ */
+void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
+{
+ char *name;
+ unsigned long table_size = sizeof(void *) << shift;
+ unsigned long align = table_size;
+
+ /* When batching pgtable pointers for RCU freeing, we store
+ * the index size in the low bits. Table alignment must be
+ * big enough to fit it.
+ *
+ * Likewise, hugeapge pagetable pointers contain a (different)
+ * shift value in the low bits. All tables must be aligned so
+ * as to leave enough 0 bits in the address to contain it. */
+ unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
+ HUGEPD_SHIFT_MASK + 1);
+ struct kmem_cache *new;
+
+ /* It would be nice if this was a BUILD_BUG_ON(), but at the
+ * moment, gcc doesn't seem to recognize is_power_of_2 as a
+ * constant expression, so so much for that. */
+ BUG_ON(!is_power_of_2(minalign));
+ BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
+
+ if (PGT_CACHE(shift))
+ return; /* Already have a cache of this size */
+
+ align = max_t(unsigned long, align, minalign);
+ name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
+ new = kmem_cache_create(name, table_size, align, 0, ctor);
+ kfree(name);
+ pgtable_cache[shift - 1] = new;
+ pr_debug("Allocated pgtable cache for order %d\n", shift);
+}
+
+
+void pgtable_cache_init(void)
+{
+ pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
+
+ if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
+ pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
+ /*
+ * In all current configs, when the PUD index exists it's the
+ * same size as either the pgd or pmd index except with THP enabled
+ * on book3s 64
+ */
+ if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
+ pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
+
+ if (!PGT_CACHE(PGD_INDEX_SIZE))
+ panic("Couldn't allocate pgd cache");
+ if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
+ panic("Couldn't allocate pmd pgtable caches");
+ if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
+ panic("Couldn't allocate pud pgtable caches");
+}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 16ada1eb7e26..a000c3585390 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -80,83 +80,6 @@ EXPORT_SYMBOL_GPL(memstart_addr);
phys_addr_t kernstart_addr;
EXPORT_SYMBOL_GPL(kernstart_addr);
-static void pgd_ctor(void *addr)
-{
- memset(addr, 0, PGD_TABLE_SIZE);
-}
-
-static void pud_ctor(void *addr)
-{
- memset(addr, 0, PUD_TABLE_SIZE);
-}
-
-static void pmd_ctor(void *addr)
-{
- memset(addr, 0, PMD_TABLE_SIZE);
-}
-
-struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
-
-/*
- * Create a kmem_cache() for pagetables. This is not used for PTE
- * pages - they're linked to struct page, come from the normal free
- * pages pool and have a different entry size (see real_pte_t) to
- * everything else. Caches created by this function are used for all
- * the higher level pagetables, and for hugepage pagetables.
- */
-void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
-{
- char *name;
- unsigned long table_size = sizeof(void *) << shift;
- unsigned long align = table_size;
-
- /* When batching pgtable pointers for RCU freeing, we store
- * the index size in the low bits. Table alignment must be
- * big enough to fit it.
- *
- * Likewise, hugeapge pagetable pointers contain a (different)
- * shift value in the low bits. All tables must be aligned so
- * as to leave enough 0 bits in the address to contain it. */
- unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
- HUGEPD_SHIFT_MASK + 1);
- struct kmem_cache *new;
-
- /* It would be nice if this was a BUILD_BUG_ON(), but at the
- * moment, gcc doesn't seem to recognize is_power_of_2 as a
- * constant expression, so so much for that. */
- BUG_ON(!is_power_of_2(minalign));
- BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
-
- if (PGT_CACHE(shift))
- return; /* Already have a cache of this size */
-
- align = max_t(unsigned long, align, minalign);
- name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
- new = kmem_cache_create(name, table_size, align, 0, ctor);
- kfree(name);
- pgtable_cache[shift - 1] = new;
- pr_debug("Allocated pgtable cache for order %d\n", shift);
-}
-
-
-void pgtable_cache_init(void)
-{
- pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
- pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
- /*
- * In all current configs, when the PUD index exists it's the
- * same size as either the pgd or pmd index except with THP enabled
- * on book3s 64
- */
- if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
- pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
-
- if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
- panic("Couldn't allocate pgtable caches");
- if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
- panic("Couldn't allocate pud pgtable caches");
-}
-
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
* Given an address within the vmemmap, determine the pfn of the page that
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index b114f8b93ec9..73bf6e14c3aa 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -115,7 +115,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm->context.pte_frag = NULL;
#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
- mm_iommu_init(&mm->context);
+ mm_iommu_init(mm);
#endif
return 0;
}
@@ -156,13 +156,11 @@ static inline void destroy_pagetable_page(struct mm_struct *mm)
}
#endif
-
void destroy_context(struct mm_struct *mm)
{
#ifdef CONFIG_SPAPR_TCE_IOMMU
- mm_iommu_cleanup(&mm->context);
+ WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
#endif
-
#ifdef CONFIG_PPC_ICSWX
drop_cop(mm->context.acop, mm);
kfree(mm->context.cop_lockp);
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index e0f1c33601dd..104bad029ce9 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -56,7 +56,7 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
}
pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
- current->pid,
+ current ? current->pid : 0,
incr ? '+' : '-',
npages << PAGE_SHIFT,
mm->locked_vm << PAGE_SHIFT,
@@ -66,12 +66,9 @@ static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
return ret;
}
-bool mm_iommu_preregistered(void)
+bool mm_iommu_preregistered(struct mm_struct *mm)
{
- if (!current || !current->mm)
- return false;
-
- return !list_empty(&current->mm->context.iommu_group_mem_list);
+ return !list_empty(&mm->context.iommu_group_mem_list);
}
EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
@@ -124,19 +121,16 @@ static int mm_iommu_move_page_from_cma(struct page *page)
return 0;
}
-long mm_iommu_get(unsigned long ua, unsigned long entries,
+long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
struct mm_iommu_table_group_mem_t **pmem)
{
struct mm_iommu_table_group_mem_t *mem;
long i, j, ret = 0, locked_entries = 0;
struct page *page = NULL;
- if (!current || !current->mm)
- return -ESRCH; /* process exited */
-
mutex_lock(&mem_list_mutex);
- list_for_each_entry_rcu(mem, &current->mm->context.iommu_group_mem_list,
+ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
next) {
if ((mem->ua == ua) && (mem->entries == entries)) {
++mem->used;
@@ -154,7 +148,7 @@ long mm_iommu_get(unsigned long ua, unsigned long entries,
}
- ret = mm_iommu_adjust_locked_vm(current->mm, entries, true);
+ ret = mm_iommu_adjust_locked_vm(mm, entries, true);
if (ret)
goto unlock_exit;
@@ -215,11 +209,11 @@ populate:
mem->entries = entries;
*pmem = mem;
- list_add_rcu(&mem->next, &current->mm->context.iommu_group_mem_list);
+ list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
unlock_exit:
if (locked_entries && ret)
- mm_iommu_adjust_locked_vm(current->mm, locked_entries, false);
+ mm_iommu_adjust_locked_vm(mm, locked_entries, false);
mutex_unlock(&mem_list_mutex);
@@ -264,17 +258,13 @@ static void mm_iommu_free(struct rcu_head *head)
static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
{
list_del_rcu(&mem->next);
- mm_iommu_adjust_locked_vm(current->mm, mem->entries, false);
call_rcu(&mem->rcu, mm_iommu_free);
}
-long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
+long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
{
long ret = 0;
- if (!current || !current->mm)
- return -ESRCH; /* process exited */
-
mutex_lock(&mem_list_mutex);
if (mem->used == 0) {
@@ -297,6 +287,8 @@ long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
/* @mapped became 0 so now mappings are disabled, release the region */
mm_iommu_release(mem);
+ mm_iommu_adjust_locked_vm(mm, mem->entries, false);
+
unlock_exit:
mutex_unlock(&mem_list_mutex);
@@ -304,14 +296,12 @@ unlock_exit:
}
EXPORT_SYMBOL_GPL(mm_iommu_put);
-struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
- unsigned long size)
+struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
+ unsigned long ua, unsigned long size)
{
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
- list_for_each_entry_rcu(mem,
- &current->mm->context.iommu_group_mem_list,
- next) {
+ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
if ((mem->ua <= ua) &&
(ua + size <= mem->ua +
(mem->entries << PAGE_SHIFT))) {
@@ -324,14 +314,12 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
}
EXPORT_SYMBOL_GPL(mm_iommu_lookup);
-struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
- unsigned long entries)
+struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
+ unsigned long ua, unsigned long entries)
{
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
- list_for_each_entry_rcu(mem,
- &current->mm->context.iommu_group_mem_list,
- next) {
+ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
if ((mem->ua == ua) && (mem->entries == entries)) {
ret = mem;
break;
@@ -373,17 +361,7 @@ void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
}
EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
-void mm_iommu_init(mm_context_t *ctx)
+void mm_iommu_init(struct mm_struct *mm)
{
- INIT_LIST_HEAD_RCU(&ctx->iommu_group_mem_list);
-}
-
-void mm_iommu_cleanup(mm_context_t *ctx)
-{
- struct mm_iommu_table_group_mem_t *mem, *tmp;
-
- list_for_each_entry_safe(mem, tmp, &ctx->iommu_group_mem_list, next) {
- list_del_rcu(&mem->next);
- mm_iommu_do_free(mem);
- }
+ INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
}
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index f4f437cbabf1..ebf9782bacf9 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -35,7 +35,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
#endif
changed = !pmd_same(*(pmdp), entry);
if (changed) {
- __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp), pmd_pte(entry));
+ __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp),
+ pmd_pte(entry), address);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
return changed;
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 8d941c692eb3..cfa53ccc8baf 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -159,7 +159,7 @@ redo:
* Allocate Partition table and process table for the
* host.
*/
- BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 23), "Process table size too large.");
+ BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 36), "Process table size too large.");
process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
/*
* Fill in the process table.
@@ -240,7 +240,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
/* top 3 bit is AP encoding */
shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
ap = be32_to_cpu(prop[0]) >> 29;
- pr_info("Page size sift = %d AP=0x%x\n", shift, ap);
+ pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
idx = get_idx_from_shift(shift);
if (idx < 0)
@@ -312,6 +312,38 @@ static void update_hid_for_radix(void)
cpu_relax();
}
+static void radix_init_amor(void)
+{
+ /*
+ * In HV mode, we init AMOR (Authority Mask Override Register) so that
+ * the hypervisor and guest can setup IAMR (Instruction Authority Mask
+ * Register), enable key 0 and set it to 1.
+ *
+ * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
+ */
+ mtspr(SPRN_AMOR, (3ul << 62));
+}
+
+static void radix_init_iamr(void)
+{
+ unsigned long iamr;
+
+ /*
+ * The IAMR should set to 0 on DD1.
+ */
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ iamr = 0;
+ else
+ iamr = (1ul << 62);
+
+ /*
+ * Radix always uses key0 of the IAMR to determine if an access is
+ * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
+ * fetch.
+ */
+ mtspr(SPRN_IAMR, iamr);
+}
+
void __init radix__early_init_mmu(void)
{
unsigned long lpcr;
@@ -368,10 +400,12 @@ void __init radix__early_init_mmu(void)
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
radix_init_partition_table();
+ radix_init_amor();
}
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
+ radix_init_iamr();
radix_init_pgtable();
}
@@ -391,7 +425,9 @@ void radix__early_init_mmu_secondary(void)
mtspr(SPRN_PTCR,
__pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
+ radix_init_amor();
}
+ radix_init_iamr();
}
void radix__mmu_cleanup_all(void)
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 911fdfb63ec1..cb39c8bd2436 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -224,7 +224,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
if (changed) {
if (!is_vm_hugetlb_page(vma))
assert_pte_locked(vma->vm_mm, address);
- __ptep_set_access_flags(vma->vm_mm, ptep, entry);
+ __ptep_set_access_flags(vma->vm_mm, ptep, entry, address);
flush_tlb_page(vma, address);
}
return changed;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 0ae0572bc239..a65c0b4c0669 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -42,43 +42,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
extern char etext[], _stext[], _sinittext[], _einittext[];
-#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT)
-
-#ifndef CONFIG_PPC_4K_PAGES
-static struct kmem_cache *pgtable_cache;
-
-void pgtable_cache_init(void)
-{
- pgtable_cache = kmem_cache_create("PGDIR cache", 1 << PGDIR_ORDER,
- 1 << PGDIR_ORDER, 0, NULL);
- if (pgtable_cache == NULL)
- panic("Couldn't allocate pgtable caches");
-}
-#endif
-
-pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- pgd_t *ret;
-
- /* pgdir take page or two with 4K pages and a page fraction otherwise */
-#ifndef CONFIG_PPC_4K_PAGES
- ret = kmem_cache_alloc(pgtable_cache, GFP_KERNEL | __GFP_ZERO);
-#else
- ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
- PGDIR_ORDER - PAGE_SHIFT);
-#endif
- return ret;
-}
-
-void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
-#ifndef CONFIG_PPC_4K_PAGES
- kmem_cache_free(pgtable_cache, (void *)pgd);
-#else
- free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT);
-#endif
-}
-
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 3493cf4e0452..61b79119065f 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -428,3 +428,21 @@ void radix__flush_tlb_all(void)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
+
+void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
+ unsigned long address)
+{
+ /*
+ * We track page size in pte only for DD1, So we can
+ * call this only on DD1.
+ */
+ if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+ VM_WARN_ON(1);
+ return;
+ }
+
+ if (old_pte & _PAGE_LARGE)
+ radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
+ else
+ radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
+}
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 050badc0ebd3..ba28fcb98597 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -53,7 +53,7 @@
* other sizes not listed here. The .ind field is only used on MMUs that have
* indirect page table entries.
*/
-#ifdef CONFIG_PPC_BOOK3E_MMU
+#if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
#ifdef CONFIG_PPC_FSL_BOOK3E
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
[MMU_PAGE_4K] = {
@@ -85,6 +85,25 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
.enc = BOOK3E_PAGESZ_1GB,
},
};
+#elif defined(CONFIG_PPC_8xx)
+struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
+ /* we only manage 4k and 16k pages as normal pages */
+#ifdef CONFIG_PPC_4K_PAGES
+ [MMU_PAGE_4K] = {
+ .shift = 12,
+ },
+#else
+ [MMU_PAGE_16K] = {
+ .shift = 14,
+ },
+#endif
+ [MMU_PAGE_512K] = {
+ .shift = 19,
+ },
+ [MMU_PAGE_8M] = {
+ .shift = 23,
+ },
+};
#else
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
[MMU_PAGE_4K] = {
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 6143c99f3ec5..50e598cf644b 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -12,6 +12,40 @@
*/
#include "isa207-common.h"
+PMU_FORMAT_ATTR(event, "config:0-49");
+PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
+PMU_FORMAT_ATTR(mark, "config:8");
+PMU_FORMAT_ATTR(combine, "config:11");
+PMU_FORMAT_ATTR(unit, "config:12-15");
+PMU_FORMAT_ATTR(pmc, "config:16-19");
+PMU_FORMAT_ATTR(cache_sel, "config:20-23");
+PMU_FORMAT_ATTR(sample_mode, "config:24-28");
+PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
+PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
+PMU_FORMAT_ATTR(thresh_start, "config:36-39");
+PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
+
+struct attribute *isa207_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_pmcxsel.attr,
+ &format_attr_mark.attr,
+ &format_attr_combine.attr,
+ &format_attr_unit.attr,
+ &format_attr_pmc.attr,
+ &format_attr_cache_sel.attr,
+ &format_attr_sample_mode.attr,
+ &format_attr_thresh_sel.attr,
+ &format_attr_thresh_stop.attr,
+ &format_attr_thresh_start.attr,
+ &format_attr_thresh_cmp.attr,
+ NULL,
+};
+
+struct attribute_group isa207_pmu_format_group = {
+ .name = "format",
+ .attrs = isa207_pmu_format_attr,
+};
+
static inline bool event_is_fab_match(u64 event)
{
/* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
@@ -21,6 +55,48 @@ static inline bool event_is_fab_match(u64 event)
return (event == 0x30056 || event == 0x4f052);
}
+static bool is_event_valid(u64 event)
+{
+ u64 valid_mask = EVENT_VALID_MASK;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ valid_mask = p9_EVENT_VALID_MASK;
+
+ return !(event & ~valid_mask);
+}
+
+static u64 mmcra_sdar_mode(u64 event)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
+
+ return MMCRA_SDAR_MODE_TLB;
+}
+
+static u64 thresh_cmp_val(u64 value)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ return value << p9_MMCRA_THR_CMP_SHIFT;
+
+ return value << MMCRA_THR_CMP_SHIFT;
+}
+
+static unsigned long combine_from_event(u64 event)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ return p9_EVENT_COMBINE(event);
+
+ return EVENT_COMBINE(event);
+}
+
+static unsigned long combine_shift(unsigned long pmc)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ return p9_MMCR1_COMBINE_SHIFT(pmc);
+
+ return MMCR1_COMBINE_SHIFT(pmc);
+}
+
int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
{
unsigned int unit, pmc, cache, ebb;
@@ -28,7 +104,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
mask = value = 0;
- if (event & ~EVENT_VALID_MASK)
+ if (!is_event_valid(event))
return -1;
pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
@@ -155,15 +231,13 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
pmc_inuse |= 1 << pmc;
}
- /* In continuous sampling mode, update SDAR on TLB miss */
- mmcra = MMCRA_SDAR_MODE_TLB;
- mmcr1 = mmcr2 = 0;
+ mmcra = mmcr1 = mmcr2 = 0;
/* Second pass: assign PMCs, set all MMCR1 fields */
for (i = 0; i < n_ev; ++i) {
pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
- combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
+ combine = combine_from_event(event[i]);
psel = event[i] & EVENT_PSEL_MASK;
if (!pmc) {
@@ -177,10 +251,13 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
if (pmc <= 4) {
mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
- mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
+ mmcr1 |= combine << combine_shift(pmc);
mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
}
+ /* In continuous sampling mode, update SDAR on TLB miss */
+ mmcra |= mmcra_sdar_mode(event[i]);
+
if (event[i] & EVENT_IS_L1) {
cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
@@ -211,7 +288,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
mmcra |= val << MMCRA_THR_SEL_SHIFT;
val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
- mmcra |= val << MMCRA_THR_CMP_SHIFT;
+ mmcra |= thresh_cmp_val(val);
}
if (event[i] & EVENT_WANTS_BHRB) {
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index 4d0a4e5017c2..90495f1580c7 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -107,6 +107,7 @@
#define EVENT_UNIT_MASK 0xf
#define EVENT_COMBINE_SHIFT 11 /* Combine bit */
#define EVENT_COMBINE_MASK 0x1
+#define EVENT_COMBINE(v) (((v) >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK)
#define EVENT_MARKED_SHIFT 8 /* Marked bit */
#define EVENT_MARKED_MASK 0x1
#define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
@@ -134,6 +135,26 @@
PERF_SAMPLE_BRANCH_KERNEL |\
PERF_SAMPLE_BRANCH_HV)
+/* Contants to support power9 raw encoding format */
+#define p9_EVENT_COMBINE_SHIFT 10 /* Combine bit */
+#define p9_EVENT_COMBINE_MASK 0x3ull
+#define p9_EVENT_COMBINE(v) (((v) >> p9_EVENT_COMBINE_SHIFT) & p9_EVENT_COMBINE_MASK)
+#define p9_SDAR_MODE_SHIFT 50
+#define p9_SDAR_MODE_MASK 0x3ull
+#define p9_SDAR_MODE(v) (((v) >> p9_SDAR_MODE_SHIFT) & p9_SDAR_MODE_MASK)
+
+#define p9_EVENT_VALID_MASK \
+ ((p9_SDAR_MODE_MASK << p9_SDAR_MODE_SHIFT | \
+ (EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
+ (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \
+ (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \
+ (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \
+ (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
+ (p9_EVENT_COMBINE_MASK << p9_EVENT_COMBINE_SHIFT) | \
+ (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
+ EVENT_LINUX_MASK | \
+ EVENT_PSEL_MASK))
+
/*
* Layout of constraint bits:
*
@@ -210,15 +231,22 @@
#define MMCR1_DC_QUAL_SHIFT 47
#define MMCR1_IC_QUAL_SHIFT 46
+/* MMCR1 Combine bits macro for power9 */
+#define p9_MMCR1_COMBINE_SHIFT(pmc) (38 - ((pmc - 1) * 2))
+
/* Bits in MMCRA for PowerISA v2.07 */
#define MMCRA_SAMP_MODE_SHIFT 1
#define MMCRA_SAMP_ELIG_SHIFT 4
#define MMCRA_THR_CTL_SHIFT 8
#define MMCRA_THR_SEL_SHIFT 16
#define MMCRA_THR_CMP_SHIFT 32
-#define MMCRA_SDAR_MODE_TLB (1ull << 42)
+#define MMCRA_SDAR_MODE_SHIFT 42
+#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT)
#define MMCRA_IFM_SHIFT 30
+/* MMCR1 Threshold Compare bit constant for power9 */
+#define p9_MMCRA_THR_CMP_SHIFT 45
+
/* Bits in MMCR2 for PowerISA v2.07 */
#define MMCR2_FCS(pmc) (1ull << (63 - (((pmc) - 1) * 9)))
#define MMCR2_FCP(pmc) (1ull << (62 - (((pmc) - 1) * 9)))
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index ab830d106ec5..d07186382f3a 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -30,6 +30,9 @@ enum {
#define POWER8_MMCRA_IFM2 0x0000000080000000UL
#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
+/* PowerISA v2.07 format attribute structure*/
+extern struct attribute_group isa207_pmu_format_group;
+
/* Table of alternatives, sorted by column 0 */
static const unsigned int event_alternatives[][MAX_ALT] = {
{ PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT },
@@ -175,42 +178,8 @@ static struct attribute_group power8_pmu_events_group = {
.attrs = power8_events_attr,
};
-PMU_FORMAT_ATTR(event, "config:0-49");
-PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
-PMU_FORMAT_ATTR(mark, "config:8");
-PMU_FORMAT_ATTR(combine, "config:11");
-PMU_FORMAT_ATTR(unit, "config:12-15");
-PMU_FORMAT_ATTR(pmc, "config:16-19");
-PMU_FORMAT_ATTR(cache_sel, "config:20-23");
-PMU_FORMAT_ATTR(sample_mode, "config:24-28");
-PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
-PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
-PMU_FORMAT_ATTR(thresh_start, "config:36-39");
-PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
-
-static struct attribute *power8_pmu_format_attr[] = {
- &format_attr_event.attr,
- &format_attr_pmcxsel.attr,
- &format_attr_mark.attr,
- &format_attr_combine.attr,
- &format_attr_unit.attr,
- &format_attr_pmc.attr,
- &format_attr_cache_sel.attr,
- &format_attr_sample_mode.attr,
- &format_attr_thresh_sel.attr,
- &format_attr_thresh_stop.attr,
- &format_attr_thresh_start.attr,
- &format_attr_thresh_cmp.attr,
- NULL,
-};
-
-static struct attribute_group power8_pmu_format_group = {
- .name = "format",
- .attrs = power8_pmu_format_attr,
-};
-
static const struct attribute_group *power8_pmu_attr_groups[] = {
- &power8_pmu_format_group,
+ &isa207_pmu_format_group,
&power8_pmu_events_group,
NULL,
};
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 8e9a81967ff8..346010e8d463 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -16,6 +16,78 @@
#include "isa207-common.h"
/*
+ * Raw event encoding for Power9:
+ *
+ * 60 56 52 48 44 40 36 32
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * | | [ ] [ ] [ thresh_cmp ] [ thresh_ctl ]
+ * | | | | |
+ * | | *- IFM (Linux) | thresh start/stop OR FAB match -*
+ * | *- BHRB (Linux) *sm
+ * *- EBB (Linux)
+ *
+ * 28 24 20 16 12 8 4 0
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ ] [ sample ] [cache] [ pmc ] [unit ] [] m [ pmcxsel ]
+ * | | | | |
+ * | | | | *- mark
+ * | | *- L1/L2/L3 cache_sel |
+ * | | |
+ * | *- sampling mode for marked events *- combine
+ * |
+ * *- thresh_sel
+ *
+ * Below uses IBM bit numbering.
+ *
+ * MMCR1[x:y] = unit (PMCxUNIT)
+ * MMCR1[24] = pmc1combine[0]
+ * MMCR1[25] = pmc1combine[1]
+ * MMCR1[26] = pmc2combine[0]
+ * MMCR1[27] = pmc2combine[1]
+ * MMCR1[28] = pmc3combine[0]
+ * MMCR1[29] = pmc3combine[1]
+ * MMCR1[30] = pmc4combine[0]
+ * MMCR1[31] = pmc4combine[1]
+ *
+ * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
+ * # PM_MRK_FAB_RSP_MATCH
+ * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
+ * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
+ * # PM_MRK_FAB_RSP_MATCH_CYC
+ * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
+ * else
+ * MMCRA[48:55] = thresh_ctl (THRESH START/END)
+ *
+ * if thresh_sel:
+ * MMCRA[45:47] = thresh_sel
+ *
+ * if thresh_cmp:
+ * MMCRA[9:11] = thresh_cmp[0:2]
+ * MMCRA[12:18] = thresh_cmp[3:9]
+ *
+ * if unit == 6 or unit == 7
+ * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
+ * else if unit == 8 or unit == 9:
+ * if cache_sel[0] == 0: # L3 bank
+ * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
+ * else if cache_sel[0] == 1:
+ * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
+ * else if cache_sel[1]: # L1 event
+ * MMCR1[16] = cache_sel[2]
+ * MMCR1[17] = cache_sel[3]
+ *
+ * if mark:
+ * MMCRA[63] = 1 (SAMPLE_ENABLE)
+ * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
+ * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
+ *
+ * if EBB and BHRB:
+ * MMCRA[32:33] = IFM
+ *
+ * MMCRA[SDAR_MODE] = sm
+ */
+
+/*
* Some power9 event codes.
*/
#define EVENT(_name, _code) _name = _code,
@@ -31,6 +103,9 @@ enum {
#define POWER9_MMCRA_IFM2 0x0000000080000000UL
#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
+/* PowerISA v2.07 format attribute structure*/
+extern struct attribute_group isa207_pmu_format_group;
+
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
@@ -90,10 +165,16 @@ static struct attribute_group power9_pmu_events_group = {
.attrs = power9_events_attr,
};
-PMU_FORMAT_ATTR(event, "config:0-49");
+static const struct attribute_group *power9_isa207_pmu_attr_groups[] = {
+ &isa207_pmu_format_group,
+ &power9_pmu_events_group,
+ NULL,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-51");
PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
PMU_FORMAT_ATTR(mark, "config:8");
-PMU_FORMAT_ATTR(combine, "config:11");
+PMU_FORMAT_ATTR(combine, "config:10-11");
PMU_FORMAT_ATTR(unit, "config:12-15");
PMU_FORMAT_ATTR(pmc, "config:16-19");
PMU_FORMAT_ATTR(cache_sel, "config:20-23");
@@ -102,6 +183,7 @@ PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
PMU_FORMAT_ATTR(thresh_start, "config:36-39");
PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
+PMU_FORMAT_ATTR(sdar_mode, "config:50-51");
static struct attribute *power9_pmu_format_attr[] = {
&format_attr_event.attr,
@@ -116,6 +198,7 @@ static struct attribute *power9_pmu_format_attr[] = {
&format_attr_thresh_stop.attr,
&format_attr_thresh_start.attr,
&format_attr_thresh_cmp.attr,
+ &format_attr_sdar_mode.attr,
NULL,
};
@@ -291,6 +374,24 @@ static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
#undef C
+static struct power_pmu power9_isa207_pmu = {
+ .name = "POWER9",
+ .n_counter = MAX_PMU_COUNTERS,
+ .add_fields = ISA207_ADD_FIELDS,
+ .test_adder = ISA207_TEST_ADDER,
+ .compute_mmcr = isa207_compute_mmcr,
+ .config_bhrb = power9_config_bhrb,
+ .bhrb_filter_map = power9_bhrb_filter_map,
+ .get_constraint = isa207_get_constraint,
+ .disable_pmc = isa207_disable_pmc,
+ .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
+ .n_generic = ARRAY_SIZE(power9_generic_events),
+ .generic_events = power9_generic_events,
+ .cache_events = &power9_cache_events,
+ .attr_groups = power9_isa207_pmu_attr_groups,
+ .bhrb_nr = 32,
+};
+
static struct power_pmu power9_pmu = {
.name = "POWER9",
.n_counter = MAX_PMU_COUNTERS,
@@ -311,14 +412,19 @@ static struct power_pmu power9_pmu = {
static int __init init_power9_pmu(void)
{
- int rc;
+ int rc = 0;
/* Comes from cpu_specs[] */
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power9"))
return -ENODEV;
- rc = register_power_pmu(&power9_pmu);
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+ rc = register_power_pmu(&power9_isa207_pmu);
+ } else {
+ rc = register_power_pmu(&power9_pmu);
+ }
+
if (rc)
return rc;
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index 1d7c1b142bf4..abc24501c4c0 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -103,18 +103,18 @@ config 405GP
bool
select IBM405_ERR77
select IBM405_ERR51
- select IBM_EMAC_ZMII
+ select IBM_EMAC_ZMII if IBM_EMAC
config 405EX
bool
- select IBM_EMAC_EMAC4
- select IBM_EMAC_RGMII
+ select IBM_EMAC_EMAC4 if IBM_EMAC
+ select IBM_EMAC_RGMII if IBM_EMAC
config 405EZ
bool
- select IBM_EMAC_NO_FLOW_CTRL
- select IBM_EMAC_MAL_CLR_ICINTSTAT
- select IBM_EMAC_MAL_COMMON_ERR
+ select IBM_EMAC_NO_FLOW_CTRL if IBM_EMAC
+ select IBM_EMAC_MAL_CLR_ICINTSTAT if IBM_EMAC
+ select IBM_EMAC_MAL_COMMON_ERR if IBM_EMAC
config XILINX_VIRTEX
bool
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 25b8d641ff9f..9b0afe935cc1 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -26,7 +26,7 @@ config BLUESTONE
select PCI_MSI
select PPC4xx_MSI
select PPC4xx_PCI_EXPRESS
- select IBM_EMAC_RGMII
+ select IBM_EMAC_RGMII if IBM_EMAC
help
This option enables support for the APM APM821xx Evaluation board.
@@ -125,8 +125,8 @@ config CANYONLANDS
select PPC4xx_PCI_EXPRESS
select PCI_MSI
select PPC4xx_MSI
- select IBM_EMAC_RGMII
- select IBM_EMAC_ZMII
+ select IBM_EMAC_RGMII if IBM_EMAC
+ select IBM_EMAC_ZMII if IBM_EMAC
help
This option enables support for the AMCC PPC460EX evaluation board.
@@ -138,8 +138,8 @@ config GLACIER
select 460EX # Odd since it uses 460GT but the effects are the same
select PCI
select PPC4xx_PCI_EXPRESS
- select IBM_EMAC_RGMII
- select IBM_EMAC_ZMII
+ select IBM_EMAC_RGMII if IBM_EMAC
+ select IBM_EMAC_ZMII if IBM_EMAC
help
This option enables support for the AMCC PPC460GT evaluation board.
@@ -164,7 +164,7 @@ config EIGER
select 460SX
select PCI
select PPC4xx_PCI_EXPRESS
- select IBM_EMAC_RGMII
+ select IBM_EMAC_RGMII if IBM_EMAC
help
This option enables support for the AMCC PPC460SX evaluation board.
@@ -213,7 +213,7 @@ config AKEBONO
select NETDEVICES
select ETHERNET
select NET_VENDOR_IBM
- select IBM_EMAC_EMAC4
+ select IBM_EMAC_EMAC4 if IBM_EMAC
select USB if USB_SUPPORT
select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
@@ -291,54 +291,54 @@ config 440EP
bool
select PPC_FPU
select IBM440EP_ERR42
- select IBM_EMAC_ZMII
+ select IBM_EMAC_ZMII if IBM_EMAC
config 440EPX
bool
select PPC_FPU
- select IBM_EMAC_EMAC4
- select IBM_EMAC_RGMII
- select IBM_EMAC_ZMII
+ select IBM_EMAC_EMAC4 if IBM_EMAC
+ select IBM_EMAC_RGMII if IBM_EMAC
+ select IBM_EMAC_ZMII if IBM_EMAC
select USB_EHCI_BIG_ENDIAN_MMIO
select USB_EHCI_BIG_ENDIAN_DESC
config 440GRX
bool
- select IBM_EMAC_EMAC4
- select IBM_EMAC_RGMII
- select IBM_EMAC_ZMII
+ select IBM_EMAC_EMAC4 if IBM_EMAC
+ select IBM_EMAC_RGMII if IBM_EMAC
+ select IBM_EMAC_ZMII if IBM_EMAC
config 440GP
bool
- select IBM_EMAC_ZMII
+ select IBM_EMAC_ZMII if IBM_EMAC
config 440GX
bool
- select IBM_EMAC_EMAC4
- select IBM_EMAC_RGMII
- select IBM_EMAC_ZMII #test only
- select IBM_EMAC_TAH #test only
+ select IBM_EMAC_EMAC4 if IBM_EMAC
+ select IBM_EMAC_RGMII if IBM_EMAC
+ select IBM_EMAC_ZMII if IBM_EMAC #test only
+ select IBM_EMAC_TAH if IBM_EMAC #test only
config 440SP
bool
config 440SPe
bool
- select IBM_EMAC_EMAC4
+ select IBM_EMAC_EMAC4 if IBM_EMAC
config 460EX
bool
select PPC_FPU
- select IBM_EMAC_EMAC4
- select IBM_EMAC_TAH
+ select IBM_EMAC_EMAC4 if IBM_EMAC
+ select IBM_EMAC_TAH if IBM_EMAC
config 460SX
bool
select PPC_FPU
- select IBM_EMAC_EMAC4
- select IBM_EMAC_RGMII
- select IBM_EMAC_ZMII
- select IBM_EMAC_TAH
+ select IBM_EMAC_EMAC4 if IBM_EMAC
+ select IBM_EMAC_RGMII if IBM_EMAC
+ select IBM_EMAC_ZMII if IBM_EMAC
+ select IBM_EMAC_TAH if IBM_EMAC
config 476FPE
bool
@@ -347,8 +347,8 @@ config 476FPE
config APM821xx
bool
select PPC_FPU
- select IBM_EMAC_EMAC4
- select IBM_EMAC_TAH
+ select IBM_EMAC_EMAC4 if IBM_EMAC
+ select IBM_EMAC_TAH if IBM_EMAC
config 476FPE_ERR46
depends on 476FPE
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 24717d060008..08f92f6ed228 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -441,8 +441,4 @@ static struct platform_driver pmc_driver = {
.remove = pmc_remove
};
-static int pmc_init(void)
-{
- return platform_driver_register(&pmc_driver);
-}
-device_initcall(pmc_init);
+builtin_platform_driver(pmc_driver);
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 9dc1d28975b9..47b389dc4938 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -253,6 +253,8 @@ endif # PPC32
config PPC_QEMU_E500
bool "QEMU generic e500 platform"
select DEFAULT_UIMAGE
+ select E500
+ select PPC_E500MC if PPC64
help
This option enables support for running as a QEMU guest using
QEMU's generic e500 machine. This is not required if you're
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 1179115a4b5c..3803b0addf65 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -220,7 +220,7 @@ define_machine(corenet_generic) {
*
* Likewise, problems have been seen with kexec when coreint is enabled.
*/
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
.get_irq = mpic_get_irq,
#else
.get_irq = mpic_get_coreint_irq,
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index fe9f19e5e935..a83a6d26090d 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -349,13 +349,13 @@ struct smp_ops_t smp_85xx_ops = {
.cpu_disable = generic_cpu_disable,
.cpu_die = generic_cpu_die,
#endif
-#if defined(CONFIG_KEXEC) && !defined(CONFIG_PPC64)
+#if defined(CONFIG_KEXEC_CORE) && !defined(CONFIG_PPC64)
.give_timebase = smp_generic_give_timebase,
.take_timebase = smp_generic_take_timebase,
#endif
};
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
#ifdef CONFIG_PPC32
atomic_t kexec_down_cpus = ATOMIC_INIT(0);
@@ -458,7 +458,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
default_machine_kexec(image);
}
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
static void smp_85xx_basic_setup(int cpu_nr)
{
@@ -512,7 +512,7 @@ void __init mpc85xx_smp_init(void)
#endif
smp_ops = &smp_85xx_ops;
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down;
ppc_md.machine_kexec = mpc85xx_smp_machine_kexec;
#endif
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index 564d99bb2a26..80cbcb0ad9b1 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -130,6 +130,7 @@ config 8xx_CPU6
config 8xx_CPU15
bool "CPU15 Silicon Errata"
+ depends on !HUGETLB_PAGE
default y
help
This enables a workaround for erratum CPU15 on MPC8xx chips.
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index fbdae8377b71..7e3a2ebba29b 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -168,17 +168,6 @@ config MPIC_BROKEN_REGREAD
well, but enabling it uses about 8KB of memory to keep copies
of the register contents in software.
-config IBMVIO
- depends on PPC_PSERIES
- bool
- default y
-
-config IBMEBUS
- depends on PPC_PSERIES
- bool "Support for GX bus based adapters"
- help
- Bus device driver for GX bus based adapters.
-
config EEH
bool
depends on (PPC_POWERNV || PPC_PSERIES) && PCI
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index ca2da30ad2ab..6e89e5a8d4fb 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -34,6 +34,7 @@ config PPC_8xx
select FSL_SOC
select 8xx
select PPC_LIB_RHEAP
+ select SYS_SUPPORTS_HUGETLBFS
config 40x
bool "AMCC 40x"
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index d9088f0b8fcc..a4522f09d65e 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -17,10 +17,10 @@ config PPC_CELL_NATIVE
select PPC_CELL_COMMON
select MPIC
select PPC_IO_WORKAROUNDS
- select IBM_EMAC_EMAC4
- select IBM_EMAC_RGMII
- select IBM_EMAC_ZMII #test only
- select IBM_EMAC_TAH #test only
+ select IBM_EMAC_EMAC4 if IBM_EMAC
+ select IBM_EMAC_RGMII if IBM_EMAC
+ select IBM_EMAC_ZMII if IBM_EMAC #test only
+ select IBM_EMAC_TAH if IBM_EMAC #test only
default n
config PPC_IBM_CELL_BLADE
@@ -46,7 +46,6 @@ config SPU_FS
default m
depends on PPC_CELL
select SPU_BASE
- select MEMORY_HOTPLUG
help
The SPU file system is used to access Synergistic Processing
Units on machines implementing the Broadband Processor
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index e84d8fbc2e21..96c2b8a40630 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -676,7 +676,7 @@ static ssize_t spu_stat_show(struct device *dev,
static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL);
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
struct crash_spu_info {
struct spu *spu;
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index dfd310031549..0409714e8070 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -263,7 +263,7 @@ static int ppc750_machine_check_exception(struct pt_regs *regs)
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
regs->msr |= MSR_RI;
- regs->nip = entry->fixup;
+ regs->nip = extable_fixup(entry);
return 1;
}
return 0;
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index f97bab8e37a2..9de100e22bf3 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -174,7 +174,7 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs)
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
regs->msr |= MSR_RI;
- regs->nip = entry->fixup;
+ regs->nip = extable_fixup(entry);
return 1;
}
return 0;
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index c8c217b7dd33..f627c9fd7b48 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -90,6 +90,7 @@ struct pmac_i2c_bus
int opened;
int polled; /* open mode */
struct platform_device *platform_dev;
+ struct lock_class_key lock_key;
/* ops */
int (*open)(struct pmac_i2c_bus *bus);
@@ -587,6 +588,7 @@ static void __init kw_i2c_add(struct pmac_i2c_host_kw *host,
bus->close = kw_i2c_close;
bus->xfer = kw_i2c_xfer;
mutex_init(&bus->mutex);
+ lockdep_set_class(&bus->mutex, &bus->lock_key);
if (controller == busnode)
bus->flags = pmac_i2c_multibus;
list_add(&bus->link, &pmac_i2c_busses);
@@ -815,6 +817,7 @@ static void __init pmu_i2c_probe(void)
bus->hostdata = bus + 1;
bus->xfer = pmu_i2c_xfer;
mutex_init(&bus->mutex);
+ lockdep_set_class(&bus->mutex, &bus->lock_key);
bus->flags = pmac_i2c_multibus;
list_add(&bus->link, &pmac_i2c_busses);
@@ -938,6 +941,7 @@ static void __init smu_i2c_probe(void)
bus->hostdata = bus + 1;
bus->xfer = smu_i2c_xfer;
mutex_init(&bus->mutex);
+ lockdep_set_class(&bus->mutex, &bus->lock_key);
bus->flags = 0;
list_add(&bus->link, &pmac_i2c_busses);
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 2354ea51e871..6fb5522acd70 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -393,7 +393,7 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
/* Create PE */
ret = eeh_add_to_parent_pe(edev);
if (ret) {
- pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%d)\n",
+ pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%x)\n",
__func__, hose->global_number, pdn->busno,
PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret);
return NULL;
@@ -1097,7 +1097,7 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
bus = eeh_pe_bus_get(pe);
if (!bus) {
- pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
+ pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
__func__, pe->phb->global_number, pe->addr);
return -EIO;
}
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index aec85e778028..73b155fd4481 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -263,7 +263,7 @@ static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
/* Enable the bypass window */
top = roundup_pow_of_two(top);
- dev_info(&npe->pdev->dev, "Enabling bypass for PE %d\n",
+ dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n",
npe->pe_number);
rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
npe->pe_number, npe->pe_number,
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index b3b8930ac52f..282293572dc8 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -632,21 +632,11 @@ static void __init opal_dump_region_init(void)
"rc = %d\n", rc);
}
-static void opal_pdev_init(struct device_node *opal_node,
- const char *compatible)
+static void opal_pdev_init(const char *compatible)
{
struct device_node *np;
- for_each_child_of_node(opal_node, np)
- if (of_device_is_compatible(np, compatible))
- of_platform_device_create(np, NULL, NULL);
-}
-
-static void opal_i2c_create_devs(void)
-{
- struct device_node *np;
-
- for_each_compatible_node(np, NULL, "ibm,opal-i2c")
+ for_each_compatible_node(np, NULL, compatible)
of_platform_device_create(np, NULL, NULL);
}
@@ -718,7 +708,7 @@ static int __init opal_init(void)
opal_hmi_handler_init();
/* Create i2c platform devices */
- opal_i2c_create_devs();
+ opal_pdev_init("ibm,opal-i2c");
/* Setup a heatbeat thread if requested by OPAL */
opal_init_heartbeat();
@@ -753,12 +743,12 @@ static int __init opal_init(void)
}
/* Initialize platform devices: IPMI backend, PRD & flash interface */
- opal_pdev_init(opal_node, "ibm,opal-ipmi");
- opal_pdev_init(opal_node, "ibm,opal-flash");
- opal_pdev_init(opal_node, "ibm,opal-prd");
+ opal_pdev_init("ibm,opal-ipmi");
+ opal_pdev_init("ibm,opal-flash");
+ opal_pdev_init("ibm,opal-prd");
/* Initialise platform device: oppanel interface */
- opal_pdev_init(opal_node, "ibm,opal-oppanel");
+ opal_pdev_init("ibm,opal-oppanel");
/* Initialise OPAL kmsg dumper for flushing console on panic */
opal_kmsg_init();
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index d4b33dd2d9e7..b07680cd2518 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -83,7 +83,7 @@ void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
#endif /* CONFIG_PCI_IOV*/
- printk("%spci %s: [PE# %.3d] %pV",
+ printk("%spci %s: [PE# %.2x] %pV",
level, pfix, pe->pe_number, &vaf);
va_end(args);
@@ -145,8 +145,8 @@ static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
*/
rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
- if (rc != OPAL_SUCCESS)
- pr_warn("%s: Error %lld unfreezing PHB#%d-PE#%d\n",
+ if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
+ pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n",
__func__, rc, phb->hose->global_number, pe_no);
return &phb->ioda.pe_array[pe_no];
@@ -155,13 +155,13 @@ static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
{
if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) {
- pr_warn("%s: Invalid PE %d on PHB#%x\n",
+ pr_warn("%s: Invalid PE %x on PHB#%x\n",
__func__, pe_no, phb->hose->global_number);
return;
}
if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
- pr_debug("%s: PE %d was reserved on PHB#%x\n",
+ pr_debug("%s: PE %x was reserved on PHB#%x\n",
__func__, pe_no, phb->hose->global_number);
pnv_ioda_init_pe(phb, pe_no);
@@ -229,7 +229,7 @@ static int pnv_ioda2_init_m64(struct pnv_phb *phb)
else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
r->end -= (2 * phb->ioda.m64_segsize);
else
- pr_warn(" Cannot strip M64 segment for reserved PE#%d\n",
+ pr_warn(" Cannot strip M64 segment for reserved PE#%x\n",
phb->ioda.reserved_pe_idx);
return 0;
@@ -291,7 +291,7 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb)
OPAL_M64_WINDOW_TYPE, index, base, 0,
PNV_IODA1_M64_SEGS * segsz);
if (rc != OPAL_SUCCESS) {
- pr_warn(" Error %lld setting M64 PHB#%d-BAR#%d\n",
+ pr_warn(" Error %lld setting M64 PHB#%x-BAR#%d\n",
rc, phb->hose->global_number, index);
goto fail;
}
@@ -300,7 +300,7 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb)
OPAL_M64_WINDOW_TYPE, index,
OPAL_ENABLE_M64_SPLIT);
if (rc != OPAL_SUCCESS) {
- pr_warn(" Error %lld enabling M64 PHB#%d-BAR#%d\n",
+ pr_warn(" Error %lld enabling M64 PHB#%x-BAR#%d\n",
rc, phb->hose->global_number, index);
goto fail;
}
@@ -316,7 +316,7 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb)
else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1))
r->end -= (2 * phb->ioda.m64_segsize);
else
- WARN(1, "Wrong reserved PE#%d on PHB#%d\n",
+ WARN(1, "Wrong reserved PE#%x on PHB#%x\n",
phb->ioda.reserved_pe_idx, phb->hose->global_number);
return 0;
@@ -414,7 +414,7 @@ static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
pe->pe_number / PNV_IODA1_M64_SEGS,
pe->pe_number % PNV_IODA1_M64_SEGS);
if (rc != OPAL_SUCCESS)
- pr_warn("%s: Error %lld mapping M64 for PHB#%d-PE#%d\n",
+ pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n",
__func__, rc, phb->hose->global_number,
pe->pe_number);
}
@@ -941,14 +941,14 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
pe->mve_number = pe->pe_number;
rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
if (rc != OPAL_SUCCESS) {
- pe_err(pe, "OPAL error %ld setting up MVE %d\n",
+ pe_err(pe, "OPAL error %ld setting up MVE %x\n",
rc, pe->mve_number);
pe->mve_number = -1;
} else {
rc = opal_pci_set_mve_enable(phb->opal_id,
pe->mve_number, OPAL_ENABLE_MVE);
if (rc) {
- pe_err(pe, "OPAL error %ld enabling MVE %d\n",
+ pe_err(pe, "OPAL error %ld enabling MVE %x\n",
rc, pe->mve_number);
pe->mve_number = -1;
}
@@ -1159,10 +1159,10 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
pe->rid = bus->busn_res.start << 8;
if (all)
- pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
+ pe_info(pe, "Secondary bus %d..%d associated with PE#%x\n",
bus->busn_res.start, bus->busn_res.end, pe->pe_number);
else
- pe_info(pe, "Secondary bus %d associated with PE#%d\n",
+ pe_info(pe, "Secondary bus %d associated with PE#%x\n",
bus->busn_res.start, pe->pe_number);
if (pnv_ioda_configure_pe(phb, pe)) {
@@ -1213,7 +1213,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
* peer NPU.
*/
dev_info(&npu_pdev->dev,
- "Associating to existing PE %d\n", pe_num);
+ "Associating to existing PE %x\n", pe_num);
pci_dev_get(npu_pdev);
npu_pdn = pci_get_pdn(npu_pdev);
rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
@@ -1539,7 +1539,7 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
pci_iov_virtfn_devfn(pdev, vf_index);
- pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n",
+ pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
hose->global_number, pdev->bus->number,
PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
@@ -2844,7 +2844,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
pnv_set_msi_irq_chip(phb, virq);
pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
- " address=%x_%08x data=%x PE# %d\n",
+ " address=%x_%08x data=%x PE# %x\n",
pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
msg->address_hi, msg->address_lo, data, pe->pe_number);
@@ -2993,7 +2993,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
if (rc != OPAL_SUCCESS) {
- pr_err("%s: Error %lld mapping IO segment#%d to PE#%d\n",
+ pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n",
__func__, rc, index, pe->pe_number);
break;
}
@@ -3017,7 +3017,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
if (rc != OPAL_SUCCESS) {
- pr_err("%s: Error %lld mapping M32 segment#%d to PE#%d",
+ pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x",
__func__, rc, index, pe->pe_number);
break;
}
@@ -3281,7 +3281,7 @@ static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
pnv_pci_ioda2_setup_dma_pe(phb, pe);
break;
default:
- pr_warn("%s: No DMA for PHB#%d (type %d)\n",
+ pr_warn("%s: No DMA for PHB#%x (type %d)\n",
__func__, phb->hose->global_number, phb->type);
}
}
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index db7b8020f68e..c6d554fe585c 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -234,7 +234,7 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
int i;
data = (struct OpalIoP7IOCPhbErrorData *)common;
- pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n",
+ pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
hose->global_number, be32_to_cpu(common->version));
if (data->brdgCtl)
@@ -326,7 +326,7 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
int i;
data = (struct OpalIoPhb3ErrorData*)common;
- pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n",
+ pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
hose->global_number, be32_to_cpu(common->version));
if (data->brdgCtl)
pr_info("brdgCtl: %08x\n",
@@ -516,7 +516,7 @@ static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
}
}
- pr_devel(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
+ pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
(pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
/* Clear the frozen state if applicable */
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index efe8b6bb168b..d50c7d99baaf 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -174,7 +174,7 @@ static void pnv_shutdown(void)
opal_shutdown();
}
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
static void pnv_kexec_wait_secondaries_down(void)
{
int my_cpu, i, notified = -1;
@@ -245,7 +245,7 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
}
}
-#endif /* CONFIG_KEXEC */
+#endif /* CONFIG_KEXEC_CORE */
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
static unsigned long pnv_memory_block_size(void)
@@ -311,7 +311,7 @@ define_machine(powernv) {
.machine_shutdown = pnv_shutdown,
.power_save = NULL,
.calibrate_decr = generic_calibrate_decr,
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.kexec_cpu_down = pnv_kexec_cpu_down,
#endif
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 3a487e7f4a5e..6244bc849469 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -250,7 +250,7 @@ static int __init ps3_probe(void)
return 1;
}
-#if defined(CONFIG_KEXEC)
+#if defined(CONFIG_KEXEC_CORE)
static void ps3_kexec_cpu_down(int crash_shutdown, int secondary)
{
int cpu = smp_processor_id();
@@ -276,7 +276,7 @@ define_machine(ps3) {
.progress = ps3_progress,
.restart = ps3_restart,
.halt = ps3_halt,
-#if defined(CONFIG_KEXEC)
+#if defined(CONFIG_KEXEC_CORE)
.kexec_cpu_down = ps3_kexec_cpu_down,
#endif
};
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index bec90fb30425..e1c280a95d58 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -127,3 +127,14 @@ config HV_PERF_CTRS
systems. 24x7 is available on Power 8 systems.
If unsure, select Y.
+
+config IBMVIO
+ depends on PPC_PSERIES
+ bool
+ default y
+
+config IBMEBUS
+ depends on PPC_PSERIES && !CPU_LITTLE_ENDIAN
+ bool "Support for GX bus based adapters"
+ help
+ Bus device driver for GX bus based adapters.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index fedc2ccf029d..8f4ba089e802 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -8,7 +8,7 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
pci.o pci_dlpar.o eeh_pseries.o msi.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCANLOG) += scanlog.o
-obj-$(CONFIG_KEXEC) += kexec.o
+obj-$(CONFIG_KEXEC_CORE) += kexec.o
obj-$(CONFIG_PSERIES_ENERGY) += pseries_energy.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o
@@ -21,6 +21,8 @@ obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_DTL) += dtl.o
obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
obj-$(CONFIG_LPARCFG) += lparcfg.o
+obj-$(CONFIG_IBMVIO) += vio.o
+obj-$(CONFIG_IBMEBUS) += ibmebus.o
ifeq ($(CONFIG_PPC_PSERIES),y)
obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 66e7227469b8..972328829387 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -41,6 +41,8 @@
#include <linux/memory.h>
#include <asm/plpar_wrappers.h>
+#include "pseries.h"
+
#define CMM_DRIVER_VERSION "1.0.0"
#define CMM_DEFAULT_DELAY 1
#define CMM_HOTPLUG_DELAY 5
@@ -109,6 +111,38 @@ static int hotplug_occurred; /* protected by the hotplug mutex */
static struct task_struct *cmm_thread_ptr;
+static long plpar_page_set_loaned(unsigned long vpa)
+{
+ unsigned long cmo_page_sz = cmo_get_page_size();
+ long rc = 0;
+ int i;
+
+ for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
+ rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
+
+ for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
+ plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
+ vpa + i - cmo_page_sz, 0);
+
+ return rc;
+}
+
+static long plpar_page_set_active(unsigned long vpa)
+{
+ unsigned long cmo_page_sz = cmo_get_page_size();
+ long rc = 0;
+ int i;
+
+ for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
+ rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
+
+ for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
+ plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
+ vpa + i - cmo_page_sz, 0);
+
+ return rc;
+}
+
/**
* cmm_alloc_pages - Allocate pages and mark them as loaned
* @nr: number of pages to allocate
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 423e450efe07..76caa4a45ccd 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -418,84 +418,136 @@ void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
}
}
-static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
- const char *buf, size_t count)
+static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
{
- struct pseries_hp_errorlog *hp_elog;
- struct completion hotplug_done;
- const char *arg;
- int rc;
+ char *arg;
- hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
- if (!hp_elog) {
- rc = -ENOMEM;
- goto dlpar_store_out;
- }
+ arg = strsep(cmd, " ");
+ if (!arg)
+ return -EINVAL;
- /* Parse out the request from the user, this will be in the form
- * <resource> <action> <id_type> <id>
- */
- arg = buf;
- if (!strncmp(arg, "memory", 6)) {
+ if (sysfs_streq(arg, "memory")) {
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
- arg += strlen("memory ");
- } else if (!strncmp(arg, "cpu", 3)) {
+ } else if (sysfs_streq(arg, "cpu")) {
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
- arg += strlen("cpu ");
} else {
- pr_err("Invalid resource specified: \"%s\"\n", buf);
- rc = -EINVAL;
- goto dlpar_store_out;
+ pr_err("Invalid resource specified.\n");
+ return -EINVAL;
}
- if (!strncmp(arg, "add", 3)) {
+ return 0;
+}
+
+static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
+{
+ char *arg;
+
+ arg = strsep(cmd, " ");
+ if (!arg)
+ return -EINVAL;
+
+ if (sysfs_streq(arg, "add")) {
hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
- arg += strlen("add ");
- } else if (!strncmp(arg, "remove", 6)) {
+ } else if (sysfs_streq(arg, "remove")) {
hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
- arg += strlen("remove ");
} else {
- pr_err("Invalid action specified: \"%s\"\n", buf);
- rc = -EINVAL;
- goto dlpar_store_out;
+ pr_err("Invalid action specified.\n");
+ return -EINVAL;
}
- if (!strncmp(arg, "index", 5)) {
- u32 index;
+ return 0;
+}
+static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
+{
+ char *arg;
+ u32 count, index;
+
+ arg = strsep(cmd, " ");
+ if (!arg)
+ return -EINVAL;
+
+ if (sysfs_streq(arg, "index")) {
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
- arg += strlen("index ");
+ arg = strsep(cmd, " ");
+ if (!arg) {
+ pr_err("No DRC Index specified.\n");
+ return -EINVAL;
+ }
+
if (kstrtou32(arg, 0, &index)) {
- rc = -EINVAL;
- pr_err("Invalid drc_index specified: \"%s\"\n", buf);
- goto dlpar_store_out;
+ pr_err("Invalid DRC Index specified.\n");
+ return -EINVAL;
}
hp_elog->_drc_u.drc_index = cpu_to_be32(index);
- } else if (!strncmp(arg, "count", 5)) {
- u32 count;
-
+ } else if (sysfs_streq(arg, "count")) {
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
- arg += strlen("count ");
+ arg = strsep(cmd, " ");
+ if (!arg) {
+ pr_err("No DRC count specified.\n");
+ return -EINVAL;
+ }
+
if (kstrtou32(arg, 0, &count)) {
- rc = -EINVAL;
- pr_err("Invalid count specified: \"%s\"\n", buf);
- goto dlpar_store_out;
+ pr_err("Invalid DRC count specified.\n");
+ return -EINVAL;
}
hp_elog->_drc_u.drc_count = cpu_to_be32(count);
} else {
- pr_err("Invalid id_type specified: \"%s\"\n", buf);
- rc = -EINVAL;
- goto dlpar_store_out;
+ pr_err("Invalid id_type specified.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pseries_hp_errorlog *hp_elog;
+ struct completion hotplug_done;
+ char *argbuf;
+ char *args;
+ int rc;
+
+ args = argbuf = kstrdup(buf, GFP_KERNEL);
+ hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
+ if (!hp_elog || !argbuf) {
+ pr_info("Could not allocate resources for DLPAR operation\n");
+ kfree(argbuf);
+ kfree(hp_elog);
+ return -ENOMEM;
}
+ /*
+ * Parse out the request from the user, this will be in the form:
+ * <resource> <action> <id_type> <id>
+ */
+ rc = dlpar_parse_resource(&args, hp_elog);
+ if (rc)
+ goto dlpar_store_out;
+
+ rc = dlpar_parse_action(&args, hp_elog);
+ if (rc)
+ goto dlpar_store_out;
+
+ rc = dlpar_parse_id_type(&args, hp_elog);
+ if (rc)
+ goto dlpar_store_out;
+
init_completion(&hotplug_done);
queue_hotplug_event(hp_elog, &hotplug_done, &rc);
wait_for_completion(&hotplug_done);
dlpar_store_out:
+ kfree(argbuf);
kfree(hp_elog);
+
+ if (rc)
+ pr_err("Could not handle DLPAR request \"%s\"\n", buf);
+
return rc ? rc : count;
}
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 1c428f06b14c..1eef46d9cf30 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -270,7 +270,7 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
eeh_add_flag(EEH_ENABLED);
eeh_add_to_parent_pe(edev);
- pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%d-PE#%x\n",
+ pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%x-PE#%x\n",
__func__, pdn->busno, PCI_SLOT(pdn->devfn),
PCI_FUNC(pdn->devfn), pe.phb->global_number,
pe.addr);
@@ -371,7 +371,7 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
pe->config_addr, BUID_HI(pe->phb->buid),
BUID_LO(pe->phb->buid), 0);
if (ret) {
- pr_warn("%s: Failed to get address for PHB#%d-PE#%x\n",
+ pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
__func__, pe->phb->global_number, pe->config_addr);
return 0;
}
@@ -384,7 +384,7 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
pe->config_addr, BUID_HI(pe->phb->buid),
BUID_LO(pe->phb->buid), 0);
if (ret) {
- pr_warn("%s: Failed to get address for PHB#%d-PE#%x\n",
+ pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
__func__, pe->phb->global_number, pe->config_addr);
return 0;
}
@@ -653,7 +653,7 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
rtas_busy_delay(ret);
}
- pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
+ pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n",
__func__, pe->phb->global_number, pe->addr, ret);
return ret;
}
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 76ec104e88be..2617f9f356bd 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -472,12 +472,15 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
/* Validate that there are enough LMBs to satisfy the request */
for (i = 0; i < num_lmbs; i++) {
- if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
+ if (lmb_is_removable(&lmbs[i]))
lmbs_available++;
}
- if (lmbs_available < lmbs_to_remove)
+ if (lmbs_available < lmbs_to_remove) {
+ pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
+ lmbs_available, lmbs_to_remove);
return -EINVAL;
+ }
for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) {
rc = dlpar_remove_lmb(&lmbs[i]);
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index 6ca9a2ffaac7..614c28537141 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -180,6 +180,7 @@ static int ibmebus_create_device(struct device_node *dn)
static int ibmebus_create_devices(const struct of_device_id *matches)
{
struct device_node *root, *child;
+ struct device *dev;
int ret = 0;
root = of_find_node_by_path("/");
@@ -188,9 +189,12 @@ static int ibmebus_create_devices(const struct of_device_id *matches)
if (!of_match_node(matches, child))
continue;
- if (bus_find_device(&ibmebus_bus_type, NULL, child,
- ibmebus_match_node))
+ dev = bus_find_device(&ibmebus_bus_type, NULL, child,
+ ibmebus_match_node);
+ if (dev) {
+ put_device(dev);
continue;
+ }
ret = ibmebus_create_device(child);
if (ret) {
@@ -262,6 +266,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
const char *buf, size_t count)
{
struct device_node *dn = NULL;
+ struct device *dev;
char *path;
ssize_t rc = 0;
@@ -269,8 +274,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus,
if (!path)
return -ENOMEM;
- if (bus_find_device(&ibmebus_bus_type, NULL, path,
- ibmebus_match_path)) {
+ dev = bus_find_device(&ibmebus_bus_type, NULL, path,
+ ibmebus_match_path);
+ if (dev) {
+ put_device(dev);
printk(KERN_WARNING "%s: %s has already been probed\n",
__func__, path);
rc = -EEXIST;
@@ -307,6 +314,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus,
if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
ibmebus_match_path))) {
of_device_unregister(to_platform_device(dev));
+ put_device(dev);
kfree(path);
return count;
@@ -415,303 +423,6 @@ static struct device_attribute ibmebus_bus_device_attrs[] = {
__ATTR_NULL
};
-#ifdef CONFIG_PM_SLEEP
-static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg)
-{
- struct platform_device *of_dev = to_platform_device(dev);
- struct platform_driver *drv = to_platform_driver(dev->driver);
- int ret = 0;
-
- if (dev->driver && drv->suspend)
- ret = drv->suspend(of_dev, mesg);
- return ret;
-}
-
-static int ibmebus_bus_legacy_resume(struct device *dev)
-{
- struct platform_device *of_dev = to_platform_device(dev);
- struct platform_driver *drv = to_platform_driver(dev->driver);
- int ret = 0;
-
- if (dev->driver && drv->resume)
- ret = drv->resume(of_dev);
- return ret;
-}
-
-static int ibmebus_bus_pm_prepare(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (drv && drv->pm && drv->pm->prepare)
- ret = drv->pm->prepare(dev);
-
- return ret;
-}
-
-static void ibmebus_bus_pm_complete(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv && drv->pm && drv->pm->complete)
- drv->pm->complete(dev);
-}
-
-#ifdef CONFIG_SUSPEND
-
-static int ibmebus_bus_pm_suspend(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend)
- ret = drv->pm->suspend(dev);
- } else {
- ret = ibmebus_bus_legacy_suspend(dev, PMSG_SUSPEND);
- }
-
- return ret;
-}
-
-static int ibmebus_bus_pm_suspend_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend_noirq)
- ret = drv->pm->suspend_noirq(dev);
- }
-
- return ret;
-}
-
-static int ibmebus_bus_pm_resume(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume)
- ret = drv->pm->resume(dev);
- } else {
- ret = ibmebus_bus_legacy_resume(dev);
- }
-
- return ret;
-}
-
-static int ibmebus_bus_pm_resume_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume_noirq)
- ret = drv->pm->resume_noirq(dev);
- }
-
- return ret;
-}
-
-#else /* !CONFIG_SUSPEND */
-
-#define ibmebus_bus_pm_suspend NULL
-#define ibmebus_bus_pm_resume NULL
-#define ibmebus_bus_pm_suspend_noirq NULL
-#define ibmebus_bus_pm_resume_noirq NULL
-
-#endif /* !CONFIG_SUSPEND */
-
-#ifdef CONFIG_HIBERNATE_CALLBACKS
-
-static int ibmebus_bus_pm_freeze(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze)
- ret = drv->pm->freeze(dev);
- } else {
- ret = ibmebus_bus_legacy_suspend(dev, PMSG_FREEZE);
- }
-
- return ret;
-}
-
-static int ibmebus_bus_pm_freeze_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze_noirq)
- ret = drv->pm->freeze_noirq(dev);
- }
-
- return ret;
-}
-
-static int ibmebus_bus_pm_thaw(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw)
- ret = drv->pm->thaw(dev);
- } else {
- ret = ibmebus_bus_legacy_resume(dev);
- }
-
- return ret;
-}
-
-static int ibmebus_bus_pm_thaw_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw_noirq)
- ret = drv->pm->thaw_noirq(dev);
- }
-
- return ret;
-}
-
-static int ibmebus_bus_pm_poweroff(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff)
- ret = drv->pm->poweroff(dev);
- } else {
- ret = ibmebus_bus_legacy_suspend(dev, PMSG_HIBERNATE);
- }
-
- return ret;
-}
-
-static int ibmebus_bus_pm_poweroff_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff_noirq)
- ret = drv->pm->poweroff_noirq(dev);
- }
-
- return ret;
-}
-
-static int ibmebus_bus_pm_restore(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore)
- ret = drv->pm->restore(dev);
- } else {
- ret = ibmebus_bus_legacy_resume(dev);
- }
-
- return ret;
-}
-
-static int ibmebus_bus_pm_restore_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore_noirq)
- ret = drv->pm->restore_noirq(dev);
- }
-
- return ret;
-}
-
-#else /* !CONFIG_HIBERNATE_CALLBACKS */
-
-#define ibmebus_bus_pm_freeze NULL
-#define ibmebus_bus_pm_thaw NULL
-#define ibmebus_bus_pm_poweroff NULL
-#define ibmebus_bus_pm_restore NULL
-#define ibmebus_bus_pm_freeze_noirq NULL
-#define ibmebus_bus_pm_thaw_noirq NULL
-#define ibmebus_bus_pm_poweroff_noirq NULL
-#define ibmebus_bus_pm_restore_noirq NULL
-
-#endif /* !CONFIG_HIBERNATE_CALLBACKS */
-
-static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
- .prepare = ibmebus_bus_pm_prepare,
- .complete = ibmebus_bus_pm_complete,
- .suspend = ibmebus_bus_pm_suspend,
- .resume = ibmebus_bus_pm_resume,
- .freeze = ibmebus_bus_pm_freeze,
- .thaw = ibmebus_bus_pm_thaw,
- .poweroff = ibmebus_bus_pm_poweroff,
- .restore = ibmebus_bus_pm_restore,
- .suspend_noirq = ibmebus_bus_pm_suspend_noirq,
- .resume_noirq = ibmebus_bus_pm_resume_noirq,
- .freeze_noirq = ibmebus_bus_pm_freeze_noirq,
- .thaw_noirq = ibmebus_bus_pm_thaw_noirq,
- .poweroff_noirq = ibmebus_bus_pm_poweroff_noirq,
- .restore_noirq = ibmebus_bus_pm_restore_noirq,
-};
-
-#define IBMEBUS_BUS_PM_OPS_PTR (&ibmebus_bus_dev_pm_ops)
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define IBMEBUS_BUS_PM_OPS_PTR NULL
-
-#endif /* !CONFIG_PM_SLEEP */
-
struct bus_type ibmebus_bus_type = {
.name = "ibmebus",
.uevent = of_device_uevent_modalias,
@@ -721,7 +432,6 @@ struct bus_type ibmebus_bus_type = {
.remove = ibmebus_bus_device_remove,
.shutdown = ibmebus_bus_device_shutdown,
.dev_attrs = ibmebus_bus_device_attrs,
- .pm = IBMEBUS_BUS_PM_OPS_PTR,
};
EXPORT_SYMBOL(ibmebus_bus_type);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index a78da511ffeb..5dc1c3c6e716 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -221,7 +221,7 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
return -1;
}
-static void pSeries_lpar_hptab_clear(void)
+static void manual_hpte_clear_all(void)
{
unsigned long size_bytes = 1UL << ppc64_pft_size;
unsigned long hpte_count = size_bytes >> 4;
@@ -249,6 +249,26 @@ static void pSeries_lpar_hptab_clear(void)
&(ptes[j].pteh), &(ptes[j].ptel));
}
}
+}
+
+static int hcall_hpte_clear_all(void)
+{
+ int rc;
+
+ do {
+ rc = plpar_hcall_norets(H_CLEAR_HPT);
+ } while (rc == H_CONTINUE);
+
+ return rc;
+}
+
+static void pseries_hpte_clear_all(void)
+{
+ int rc;
+
+ rc = hcall_hpte_clear_all();
+ if (rc != H_SUCCESS)
+ manual_hpte_clear_all();
#ifdef __LITTLE_ENDIAN__
/*
@@ -598,7 +618,7 @@ void __init hpte_init_pseries(void)
mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove;
mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted;
mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
- mmu_hash_ops.hpte_clear_all = pSeries_lpar_hptab_clear;
+ mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
}
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index afa05a2cb702..e6397976060e 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -37,6 +37,7 @@
#include <asm/mmu.h>
#include <asm/machdep.h>
+#include "pseries.h"
/*
* This isn't a module but we expose that to userspace
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index b1be7b713fe6..1361a9db534b 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -79,4 +79,23 @@ extern struct pci_controller_ops pseries_pci_controller_ops;
unsigned long pseries_memory_block_size(void);
+extern int CMO_PrPSP;
+extern int CMO_SecPSP;
+extern unsigned long CMO_PageSize;
+
+static inline int cmo_get_primary_psp(void)
+{
+ return CMO_PrPSP;
+}
+
+static inline int cmo_get_secondary_psp(void)
+{
+ return CMO_SecPSP;
+}
+
+static inline unsigned long cmo_get_page_size(void)
+{
+ return CMO_PageSize;
+}
+
#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 97aa3f332f24..7736352f7279 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -367,7 +367,7 @@ void pseries_disable_reloc_on_exc(void)
}
EXPORT_SYMBOL(pseries_disable_reloc_on_exc);
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
static void pSeries_machine_kexec(struct kimage *image)
{
if (firmware_has_feature(FW_FEATURE_SET_MODE))
@@ -725,7 +725,7 @@ define_machine(pseries) {
.progress = rtas_progress,
.system_reset_exception = pSeries_system_reset_exception,
.machine_check_exception = pSeries_machine_check_exception,
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_KEXEC_CORE
.machine_kexec = pSeries_machine_kexec,
.kexec_cpu_down = pseries_kexec_cpu_down,
#endif
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/platforms/pseries/vio.c
index b3813ddb2fb4..2c8fb3ec989e 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -1648,6 +1648,9 @@ static struct vio_dev *vio_find_name(const char *name)
/**
* vio_find_node - find an already-registered vio_dev
* @vnode: device_node of the virtual device we're looking for
+ *
+ * Takes a reference to the embedded struct device which needs to be dropped
+ * after use.
*/
struct vio_dev *vio_find_node(struct device_node *vnode)
{
diff --git a/arch/powerpc/purgatory/.gitignore b/arch/powerpc/purgatory/.gitignore
new file mode 100644
index 000000000000..e9e66f178a6d
--- /dev/null
+++ b/arch/powerpc/purgatory/.gitignore
@@ -0,0 +1,2 @@
+kexec-purgatory.c
+purgatory.ro
diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile
new file mode 100644
index 000000000000..ac8793c13348
--- /dev/null
+++ b/arch/powerpc/purgatory/Makefile
@@ -0,0 +1,15 @@
+targets += trampoline.o purgatory.ro kexec-purgatory.c
+
+LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined
+
+$(obj)/purgatory.ro: $(obj)/trampoline.o FORCE
+ $(call if_changed,ld)
+
+CMD_BIN2C = $(objtree)/scripts/basic/bin2c
+quiet_cmd_bin2c = BIN2C $@
+ cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@
+
+$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
+ $(call if_changed,bin2c)
+
+obj-y += kexec-purgatory.o
diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S
new file mode 100644
index 000000000000..f9760ccf4032
--- /dev/null
+++ b/arch/powerpc/purgatory/trampoline.S
@@ -0,0 +1,128 @@
+/*
+ * kexec trampoline
+ *
+ * Based on code taken from kexec-tools and kexec-lite.
+ *
+ * Copyright (C) 2004 - 2005, Milton D Miller II, IBM Corporation
+ * Copyright (C) 2006, Mohan Kumar M, IBM Corporation
+ * Copyright (C) 2013, Anton Blanchard, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free
+ * Software Foundation (version 2 of the License).
+ */
+
+#if defined(__LITTLE_ENDIAN__)
+#define STWX_BE stwbrx
+#define LWZX_BE lwbrx
+#elif defined(__BIG_ENDIAN__)
+#define STWX_BE stwx
+#define LWZX_BE lwzx
+#else
+#error no endianness defined!
+#endif
+
+ .machine ppc64
+ .balign 256
+ .globl purgatory_start
+purgatory_start:
+ b master
+
+ /* ABI: possible run_at_load flag at 0x5c */
+ .org purgatory_start + 0x5c
+ .globl run_at_load
+run_at_load:
+ .long 0
+ .size run_at_load, . - run_at_load
+
+ /* ABI: slaves start at 60 with r3=phys */
+ .org purgatory_start + 0x60
+slave:
+ b .
+ /* ABI: end of copied region */
+ .org purgatory_start + 0x100
+ .size purgatory_start, . - purgatory_start
+
+/*
+ * The above 0x100 bytes at purgatory_start are replaced with the
+ * code from the kernel (or next stage) by setup_purgatory().
+ */
+
+master:
+ or %r1,%r1,%r1 /* low priority to let other threads catchup */
+ isync
+ mr %r17,%r3 /* save cpu id to r17 */
+ mr %r15,%r4 /* save physical address in reg15 */
+
+ or %r3,%r3,%r3 /* ok now to high priority, lets boot */
+ lis %r6,0x1
+ mtctr %r6 /* delay a bit for slaves to catch up */
+ bdnz . /* before we overwrite 0-100 again */
+
+ bl 0f /* Work out where we're running */
+0: mflr %r18
+
+ /* load device-tree address */
+ ld %r3, (dt_offset - 0b)(%r18)
+ mr %r16,%r3 /* save dt address in reg16 */
+ li %r4,20
+ LWZX_BE %r6,%r3,%r4 /* fetch __be32 version number at byte 20 */
+ cmpwi %r0,%r6,2 /* v2 or later? */
+ blt 1f
+ li %r4,28
+ STWX_BE %r17,%r3,%r4 /* Store my cpu as __be32 at byte 28 */
+1:
+ /* load the kernel address */
+ ld %r4,(kernel - 0b)(%r18)
+
+ /* load the run_at_load flag */
+ /* possibly patched by kexec */
+ ld %r6,(run_at_load - 0b)(%r18)
+ /* and patch it into the kernel */
+ stw %r6,(0x5c)(%r4)
+
+ mr %r3,%r16 /* restore dt address */
+
+ li %r5,0 /* r5 will be 0 for kernel */
+
+ mfmsr %r11
+ andi. %r10,%r11,1 /* test MSR_LE */
+ bne .Little_endian
+
+ mtctr %r4 /* prepare branch to */
+ bctr /* start kernel */
+
+.Little_endian:
+ mtsrr0 %r4 /* prepare branch to */
+
+ clrrdi %r11,%r11,1 /* clear MSR_LE */
+ mtsrr1 %r11
+
+ rfid /* update MSR and start kernel */
+
+
+ .balign 8
+ .globl kernel
+kernel:
+ .llong 0x0
+ .size kernel, . - kernel
+
+ .balign 8
+ .globl dt_offset
+dt_offset:
+ .llong 0x0
+ .size dt_offset, . - dt_offset
+
+
+ .data
+ .balign 8
+.globl sha256_digest
+sha256_digest:
+ .skip 32
+ .size sha256_digest, . - sha256_digest
+
+ .balign 8
+.globl sha_regions
+sha_regions:
+ .skip 8 * 2 * 16
+ .size sha_regions, . - sha_regions
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 424b67fdb57f..5340a483cf55 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -31,7 +31,7 @@
#include <asm/prom.h>
#include <asm/fsl_lbc.h>
-static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock);
+static DEFINE_SPINLOCK(fsl_lbc_lock);
struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev;
EXPORT_SYMBOL(fsl_lbc_ctrl_dev);
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index 1d6fd7c59fe9..232225e7f863 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -85,8 +85,4 @@ static struct platform_driver pmc_driver = {
.probe = pmc_probe,
};
-static int __init pmc_init(void)
-{
- return platform_driver_register(&pmc_driver);
-}
-device_initcall(pmc_init);
+builtin_platform_driver(pmc_driver);
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 3cc7cace194a..1c41c51f22cb 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -80,10 +80,8 @@
"3: li %1,-1\n" \
" li %0,%3\n" \
" b 2b\n" \
- ".section __ex_table,\"a\"\n" \
- PPC_LONG_ALIGN "\n" \
- PPC_LONG "1b,3b\n" \
- ".text" \
+ ".previous\n" \
+ EX_TABLE(1b, 3b) \
: "=r" (err), "=r" (x) \
: "b" (addr), "i" (-EFAULT), "0" (err))
@@ -113,7 +111,7 @@ int fsl_rio_mcheck_exception(struct pt_regs *regs)
out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
0);
regs->msr |= MSR_RI;
- regs->nip = entry->fixup;
+ regs->nip = extable_fixup(entry);
return 1;
}
}
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index d93056eedcb0..19101f9cfcfc 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -77,13 +77,10 @@ phys_addr_t get_immrbase(void)
EXPORT_SYMBOL(get_immrbase);
-static u32 sysfreq = -1;
-
u32 fsl_get_sys_freq(void)
{
+ static u32 sysfreq = -1;
struct device_node *soc;
- const u32 *prop;
- int size;
if (sysfreq != -1)
return sysfreq;
@@ -92,12 +89,9 @@ u32 fsl_get_sys_freq(void)
if (!soc)
return -1;
- prop = of_get_property(soc, "clock-frequency", &size);
- if (!prop || size != sizeof(*prop) || *prop == 0)
- prop = of_get_property(soc, "bus-frequency", &size);
-
- if (prop && size == sizeof(*prop))
- sysfreq = *prop;
+ of_property_read_u32(soc, "clock-frequency", &sysfreq);
+ if (sysfreq == -1 || !sysfreq)
+ of_property_read_u32(soc, "bus-frequency", &sysfreq);
of_node_put(soc);
return sysfreq;
@@ -106,23 +100,17 @@ EXPORT_SYMBOL(fsl_get_sys_freq);
#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
-static u32 brgfreq = -1;
-
u32 get_brgfreq(void)
{
+ static u32 brgfreq = -1;
struct device_node *node;
- const unsigned int *prop;
- int size;
if (brgfreq != -1)
return brgfreq;
node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg");
if (node) {
- prop = of_get_property(node, "clock-frequency", &size);
- if (prop && size == 4)
- brgfreq = *prop;
-
+ of_property_read_u32(node, "clock-frequency", &brgfreq);
of_node_put(node);
return brgfreq;
}
@@ -135,15 +123,11 @@ u32 get_brgfreq(void)
node = of_find_node_by_type(NULL, "qe");
if (node) {
- prop = of_get_property(node, "brg-frequency", &size);
- if (prop && size == 4)
- brgfreq = *prop;
-
- if (brgfreq == -1 || brgfreq == 0) {
- prop = of_get_property(node, "bus-frequency", &size);
- if (prop && size == 4)
- brgfreq = *prop / 2;
- }
+ of_property_read_u32(node, "brg-frequency", &brgfreq);
+ if (brgfreq == -1 || !brgfreq)
+ if (!of_property_read_u32(node, "bus-frequency",
+ &brgfreq))
+ brgfreq /= 2;
of_node_put(node);
}
@@ -152,10 +136,9 @@ u32 get_brgfreq(void)
EXPORT_SYMBOL(get_brgfreq);
-static u32 fs_baudrate = -1;
-
u32 get_baudrate(void)
{
+ static u32 fs_baudrate = -1;
struct device_node *node;
if (fs_baudrate != -1)
@@ -163,12 +146,7 @@ u32 get_baudrate(void)
node = of_find_node_by_type(NULL, "serial");
if (node) {
- int size;
- const unsigned int *prop = of_get_property(node,
- "current-speed", &size);
-
- if (prop)
- fs_baudrate = *prop;
+ of_property_read_u32(node, "current-speed", &fs_baudrate);
of_node_put(node);
}
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 57c971b7839c..53a16aa4d384 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -137,10 +137,8 @@ void tsi108_clear_pci_error(u32 pci_cfg_base)
".section .fixup,\"ax\"\n" \
"3: li %0,-1\n" \
" b 2b\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 2\n" \
- " .long 1b,3b\n" \
- ".text" \
+ ".previous\n" \
+ EX_TABLE(1b, 3b) \
: "=r"(x) : "r"(addr))
int
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 760545519a0b..9c0e17cf6886 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -10,6 +10,8 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+
+#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/smp.h>
@@ -225,6 +227,7 @@ Commands:\n\
#endif
"\
dr dump stream of raw bytes\n\
+ dt dump the tracing buffers (uses printk)\n\
e print exception information\n\
f flush cache\n\
la lookup symbol+offset of specified address\n\
@@ -2364,6 +2367,9 @@ dump(void)
dump_log_buf();
} else if (c == 'o') {
dump_opal_msglog();
+ } else if (c == 't') {
+ ftrace_dump(DUMP_ALL);
+ tracing_on();
} else if (c == 'r') {
scanhex(&ndump);
if (ndump == 0)
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 650830e39e3a..3741461c63a0 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -631,9 +631,9 @@ static int determine_backup_region(u64 start, u64 end, void *arg)
int crash_load_segments(struct kimage *image)
{
- unsigned long src_start, src_sz, elf_sz;
- void *elf_addr;
int ret;
+ struct kexec_buf kbuf = { .image = image, .buf_min = 0,
+ .buf_max = ULONG_MAX, .top_down = false };
/*
* Determine and load a segment for backup area. First 640K RAM
@@ -647,43 +647,44 @@ int crash_load_segments(struct kimage *image)
if (ret < 0)
return ret;
- src_start = image->arch.backup_src_start;
- src_sz = image->arch.backup_src_sz;
-
/* Add backup segment. */
- if (src_sz) {
+ if (image->arch.backup_src_sz) {
+ kbuf.buffer = &crash_zero_bytes;
+ kbuf.bufsz = sizeof(crash_zero_bytes);
+ kbuf.memsz = image->arch.backup_src_sz;
+ kbuf.buf_align = PAGE_SIZE;
/*
* Ideally there is no source for backup segment. This is
* copied in purgatory after crash. Just add a zero filled
* segment for now to make sure checksum logic works fine.
*/
- ret = kexec_add_buffer(image, (char *)&crash_zero_bytes,
- sizeof(crash_zero_bytes), src_sz,
- PAGE_SIZE, 0, -1, 0,
- &image->arch.backup_load_addr);
+ ret = kexec_add_buffer(&kbuf);
if (ret)
return ret;
+ image->arch.backup_load_addr = kbuf.mem;
pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
- image->arch.backup_load_addr, src_start, src_sz);
+ image->arch.backup_load_addr,
+ image->arch.backup_src_start, kbuf.memsz);
}
/* Prepare elf headers and add a segment */
- ret = prepare_elf_headers(image, &elf_addr, &elf_sz);
+ ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
if (ret)
return ret;
- image->arch.elf_headers = elf_addr;
- image->arch.elf_headers_sz = elf_sz;
+ image->arch.elf_headers = kbuf.buffer;
+ image->arch.elf_headers_sz = kbuf.bufsz;
- ret = kexec_add_buffer(image, (char *)elf_addr, elf_sz, elf_sz,
- ELF_CORE_HEADER_ALIGN, 0, -1, 0,
- &image->arch.elf_load_addr);
+ kbuf.memsz = kbuf.bufsz;
+ kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
+ ret = kexec_add_buffer(&kbuf);
if (ret) {
vfree((void *)image->arch.elf_headers);
return ret;
}
+ image->arch.elf_load_addr = kbuf.mem;
pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
- image->arch.elf_load_addr, elf_sz, elf_sz);
+ image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
return ret;
}
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 3407b148c240..d0a814a9d96a 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -331,17 +331,17 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
struct setup_header *header;
int setup_sects, kern16_size, ret = 0;
- unsigned long setup_header_size, params_cmdline_sz, params_misc_sz;
+ unsigned long setup_header_size, params_cmdline_sz;
struct boot_params *params;
unsigned long bootparam_load_addr, kernel_load_addr, initrd_load_addr;
unsigned long purgatory_load_addr;
- unsigned long kernel_bufsz, kernel_memsz, kernel_align;
- char *kernel_buf;
struct bzimage64_data *ldata;
struct kexec_entry64_regs regs64;
void *stack;
unsigned int setup_hdr_offset = offsetof(struct boot_params, hdr);
unsigned int efi_map_offset, efi_map_sz, efi_setup_data_offset;
+ struct kexec_buf kbuf = { .image = image, .buf_max = ULONG_MAX,
+ .top_down = true };
header = (struct setup_header *)(kernel + setup_hdr_offset);
setup_sects = header->setup_sects;
@@ -402,11 +402,11 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
params_cmdline_sz = sizeof(struct boot_params) + cmdline_len +
MAX_ELFCOREHDR_STR_LEN;
params_cmdline_sz = ALIGN(params_cmdline_sz, 16);
- params_misc_sz = params_cmdline_sz + efi_map_sz +
+ kbuf.bufsz = params_cmdline_sz + efi_map_sz +
sizeof(struct setup_data) +
sizeof(struct efi_setup_data);
- params = kzalloc(params_misc_sz, GFP_KERNEL);
+ params = kzalloc(kbuf.bufsz, GFP_KERNEL);
if (!params)
return ERR_PTR(-ENOMEM);
efi_map_offset = params_cmdline_sz;
@@ -418,37 +418,41 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
/* Is there a limit on setup header size? */
memcpy(&params->hdr, (kernel + setup_hdr_offset), setup_header_size);
- ret = kexec_add_buffer(image, (char *)params, params_misc_sz,
- params_misc_sz, 16, MIN_BOOTPARAM_ADDR,
- ULONG_MAX, 1, &bootparam_load_addr);
+ kbuf.buffer = params;
+ kbuf.memsz = kbuf.bufsz;
+ kbuf.buf_align = 16;
+ kbuf.buf_min = MIN_BOOTPARAM_ADDR;
+ ret = kexec_add_buffer(&kbuf);
if (ret)
goto out_free_params;
+ bootparam_load_addr = kbuf.mem;
pr_debug("Loaded boot_param, command line and misc at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
- bootparam_load_addr, params_misc_sz, params_misc_sz);
+ bootparam_load_addr, kbuf.bufsz, kbuf.bufsz);
/* Load kernel */
- kernel_buf = kernel + kern16_size;
- kernel_bufsz = kernel_len - kern16_size;
- kernel_memsz = PAGE_ALIGN(header->init_size);
- kernel_align = header->kernel_alignment;
-
- ret = kexec_add_buffer(image, kernel_buf,
- kernel_bufsz, kernel_memsz, kernel_align,
- MIN_KERNEL_LOAD_ADDR, ULONG_MAX, 1,
- &kernel_load_addr);
+ kbuf.buffer = kernel + kern16_size;
+ kbuf.bufsz = kernel_len - kern16_size;
+ kbuf.memsz = PAGE_ALIGN(header->init_size);
+ kbuf.buf_align = header->kernel_alignment;
+ kbuf.buf_min = MIN_KERNEL_LOAD_ADDR;
+ ret = kexec_add_buffer(&kbuf);
if (ret)
goto out_free_params;
+ kernel_load_addr = kbuf.mem;
pr_debug("Loaded 64bit kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
- kernel_load_addr, kernel_memsz, kernel_memsz);
+ kernel_load_addr, kbuf.bufsz, kbuf.memsz);
/* Load initrd high */
if (initrd) {
- ret = kexec_add_buffer(image, initrd, initrd_len, initrd_len,
- PAGE_SIZE, MIN_INITRD_LOAD_ADDR,
- ULONG_MAX, 1, &initrd_load_addr);
+ kbuf.buffer = initrd;
+ kbuf.bufsz = kbuf.memsz = initrd_len;
+ kbuf.buf_align = PAGE_SIZE;
+ kbuf.buf_min = MIN_INITRD_LOAD_ADDR;
+ ret = kexec_add_buffer(&kbuf);
if (ret)
goto out_free_params;
+ initrd_load_addr = kbuf.mem;
pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
initrd_load_addr, initrd_len, initrd_len);
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index d28690f6e262..5d80810934df 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -102,7 +102,6 @@ config ADB_PMU_LED_DISK
bool "Use front LED as DISK LED by default"
depends on ADB_PMU_LED
depends on LEDS_CLASS
- depends on IDE_GD_ATA
select LEDS_TRIGGERS
select LEDS_TRIGGER_DISK
help
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 2e5233b60971..1b35e33d2434 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -9,18 +9,119 @@
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <misc/cxl.h>
-#include <linux/fs.h>
#include <asm/pnv-pci.h>
#include <linux/msi.h>
+#include <linux/module.h>
+#include <linux/mount.h>
#include "cxl.h"
+/*
+ * Since we want to track memory mappings to be able to force-unmap
+ * when the AFU is no longer reachable, we need an inode. For devices
+ * opened through the cxl user API, this is not a problem, but a
+ * userland process can also get a cxl fd through the cxl_get_fd()
+ * API, which is used by the cxlflash driver.
+ *
+ * Therefore we implement our own simple pseudo-filesystem and inode
+ * allocator. We don't use the anonymous inode, as we need the
+ * meta-data associated with it (address_space) and it is shared by
+ * other drivers/processes, so it could lead to cxl unmapping VMAs
+ * from random processes.
+ */
+
+#define CXL_PSEUDO_FS_MAGIC 0x1697697f
+
+static int cxl_fs_cnt;
+static struct vfsmount *cxl_vfs_mount;
+
+static const struct dentry_operations cxl_fs_dops = {
+ .d_dname = simple_dname,
+};
+
+static struct dentry *cxl_fs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_pseudo(fs_type, "cxl:", NULL, &cxl_fs_dops,
+ CXL_PSEUDO_FS_MAGIC);
+}
+
+static struct file_system_type cxl_fs_type = {
+ .name = "cxl",
+ .owner = THIS_MODULE,
+ .mount = cxl_fs_mount,
+ .kill_sb = kill_anon_super,
+};
+
+
+void cxl_release_mapping(struct cxl_context *ctx)
+{
+ if (ctx->kernelapi && ctx->mapping)
+ simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
+}
+
+static struct file *cxl_getfile(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags)
+{
+ struct qstr this;
+ struct path path;
+ struct file *file;
+ struct inode *inode = NULL;
+ int rc;
+
+ /* strongly inspired by anon_inode_getfile() */
+
+ if (fops->owner && !try_module_get(fops->owner))
+ return ERR_PTR(-ENOENT);
+
+ rc = simple_pin_fs(&cxl_fs_type, &cxl_vfs_mount, &cxl_fs_cnt);
+ if (rc < 0) {
+ pr_err("Cannot mount cxl pseudo filesystem: %d\n", rc);
+ file = ERR_PTR(rc);
+ goto err_module;
+ }
+
+ inode = alloc_anon_inode(cxl_vfs_mount->mnt_sb);
+ if (IS_ERR(inode)) {
+ file = ERR_CAST(inode);
+ goto err_fs;
+ }
+
+ file = ERR_PTR(-ENOMEM);
+ this.name = name;
+ this.len = strlen(name);
+ this.hash = 0;
+ path.dentry = d_alloc_pseudo(cxl_vfs_mount->mnt_sb, &this);
+ if (!path.dentry)
+ goto err_inode;
+
+ path.mnt = mntget(cxl_vfs_mount);
+ d_instantiate(path.dentry, inode);
+
+ file = alloc_file(&path, OPEN_FMODE(flags), fops);
+ if (IS_ERR(file))
+ goto err_dput;
+ file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
+ file->private_data = priv;
+
+ return file;
+
+err_dput:
+ path_put(&path);
+err_inode:
+ iput(inode);
+err_fs:
+ simple_release_fs(&cxl_vfs_mount, &cxl_fs_cnt);
+err_module:
+ module_put(fops->owner);
+ return file;
+}
+
struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
{
- struct address_space *mapping;
struct cxl_afu *afu;
struct cxl_context *ctx;
int rc;
@@ -30,38 +131,20 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
return ERR_CAST(afu);
ctx = cxl_context_alloc();
- if (IS_ERR(ctx)) {
- rc = PTR_ERR(ctx);
- goto err_dev;
- }
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
ctx->kernelapi = true;
- /*
- * Make our own address space since we won't have one from the
- * filesystem like the user api has, and even if we do associate a file
- * with this context we don't want to use the global anonymous inode's
- * address space as that can invalidate unrelated users:
- */
- mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
- if (!mapping) {
- rc = -ENOMEM;
- goto err_ctx;
- }
- address_space_init_once(mapping);
-
/* Make it a slave context. We can promote it later? */
- rc = cxl_context_init(ctx, afu, false, mapping);
+ rc = cxl_context_init(ctx, afu, false);
if (rc)
- goto err_mapping;
+ goto err_ctx;
return ctx;
-err_mapping:
- kfree(mapping);
err_ctx:
kfree(ctx);
-err_dev:
return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(cxl_dev_context_init);
@@ -340,6 +423,11 @@ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
{
struct file *file;
int rc, flags, fdtmp;
+ char *name = NULL;
+
+ /* only allow one per context */
+ if (ctx->mapping)
+ return ERR_PTR(-EEXIST);
flags = O_RDWR | O_CLOEXEC;
@@ -363,12 +451,13 @@ struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops,
} else /* use default ops */
fops = (struct file_operations *)&afu_fops;
- file = anon_inode_getfile("cxl", fops, ctx, flags);
+ name = kasprintf(GFP_KERNEL, "cxl:%d", ctx->pe);
+ file = cxl_getfile(name, fops, ctx, flags);
+ kfree(name);
if (IS_ERR(file))
goto err_fd;
- file->f_mapping = ctx->mapping;
-
+ cxl_context_set_mapping(ctx, file->f_mapping);
*fd = fdtmp;
return file;
@@ -541,7 +630,7 @@ int _cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
if (remaining > 0) {
new_ctx = cxl_dev_context_init(pdev);
- if (!new_ctx) {
+ if (IS_ERR(new_ctx)) {
pr_warn("%s: Failed to allocate enough contexts for MSIs\n", pci_name(pdev));
return -ENOSPC;
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 5d36dcc7f47e..3907387b6d15 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -34,8 +34,7 @@ struct cxl_context *cxl_context_alloc(void)
/*
* Initialises a CXL context.
*/
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
- struct address_space *mapping)
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
{
int i;
@@ -44,7 +43,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
ctx->master = master;
ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
mutex_init(&ctx->mapping_lock);
- ctx->mapping = mapping;
+ ctx->mapping = NULL;
/*
* Allocate the segment table before we put it in the IDR so that we
@@ -114,6 +113,14 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
return 0;
}
+void cxl_context_set_mapping(struct cxl_context *ctx,
+ struct address_space *mapping)
+{
+ mutex_lock(&ctx->mapping_lock);
+ ctx->mapping = mapping;
+ mutex_unlock(&ctx->mapping_lock);
+}
+
static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct cxl_context *ctx = vma->vm_file->private_data;
@@ -299,8 +306,6 @@ static void reclaim_ctx(struct rcu_head *rcu)
if (ctx->ff_page)
__free_page(ctx->ff_page);
ctx->sstp = NULL;
- if (ctx->kernelapi)
- kfree(ctx->mapping);
kfree(ctx->irq_bitmap);
@@ -312,6 +317,8 @@ static void reclaim_ctx(struct rcu_head *rcu)
void cxl_context_free(struct cxl_context *ctx)
{
+ if (ctx->kernelapi && ctx->mapping)
+ cxl_release_mapping(ctx);
mutex_lock(&ctx->afu->contexts_lock);
idr_remove(&ctx->afu->contexts_idr, ctx->pe);
mutex_unlock(&ctx->afu->contexts_lock);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index a144073593fa..b24d76723fb0 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -817,8 +817,9 @@ void cxl_dump_debug_buffer(void *addr, size_t size);
void init_cxl_native(void);
struct cxl_context *cxl_context_alloc(void);
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
- struct address_space *mapping);
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master);
+void cxl_context_set_mapping(struct cxl_context *ctx,
+ struct address_space *mapping);
void cxl_context_free(struct cxl_context *ctx);
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
@@ -877,6 +878,7 @@ void cxl_native_err_irq_dump_regs(struct cxl *adapter);
void cxl_stop_trace(struct cxl *cxl);
int cxl_pci_vphb_add(struct cxl_afu *afu);
void cxl_pci_vphb_remove(struct cxl_afu *afu);
+void cxl_release_mapping(struct cxl_context *ctx);
extern struct pci_driver cxl_pci_driver;
extern struct platform_driver cxl_of_driver;
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index ec7b8a017439..9c06ac8fa5ac 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -43,12 +43,14 @@ static int debugfs_io_u64_set(void *data, u64 val)
out_be64((u64 __iomem *)data, val);
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set, "0x%016llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_io_x64, debugfs_io_u64_get, debugfs_io_u64_set,
+ "0x%016llx\n");
static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
struct dentry *parent, u64 __iomem *value)
{
- return debugfs_create_file(name, mode, parent, (void __force *)value, &fops_io_x64);
+ return debugfs_create_file_unsafe(name, mode, parent,
+ (void __force *)value, &fops_io_x64);
}
void cxl_debugfs_add_adapter_psl_regs(struct cxl *adapter, struct dentry *dir)
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 77080cc5fa0a..859959f19f10 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -86,9 +86,12 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
goto err_put_afu;
}
- if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
+ rc = cxl_context_init(ctx, afu, master);
+ if (rc)
goto err_put_afu;
+ cxl_context_set_mapping(ctx, inode->i_mapping);
+
pr_devel("afu_open pe: %i\n", ctx->pe);
file->private_data = ctx;
cxl_ctx_get();
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 3e102cd6ed91..e04bc4ddfd74 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -887,7 +887,7 @@ static void afu_handle_errstate(struct work_struct *work)
afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
return;
- if (afu_guest->handle_err == true)
+ if (afu_guest->handle_err)
schedule_delayed_work(&afu_guest->work_err,
msecs_to_jiffies(3000));
}
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index dec60f58a767..1a402bbed687 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -104,7 +104,7 @@ irqreturn_t cxl_irq(int irq, struct cxl_context *ctx, struct cxl_irq_info *irq_i
} else {
spin_lock(&ctx->lock);
ctx->afu_err = irq_info->afu_err;
- ctx->pending_afu_err = 1;
+ ctx->pending_afu_err = true;
spin_unlock(&ctx->lock);
wake_up_all(&ctx->wq);
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index a217a74ccc98..09505f432eda 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -10,7 +10,6 @@
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
@@ -54,7 +53,7 @@ static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
AFU_Cntl | command);
cpu_relax();
AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
- };
+ }
if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
/*
@@ -167,7 +166,7 @@ int cxl_psl_purge(struct cxl_afu *afu)
cpu_relax();
}
PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
- };
+ }
end = local_clock();
pr_devel("PSL purged in %lld ns\n", end - start);
@@ -931,9 +930,18 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
struct cxl_afu *afu = data;
struct cxl_context *ctx;
struct cxl_irq_info irq_info;
- int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
- int ret;
-
+ u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
+ int ph, ret;
+
+ /* check if eeh kicked in while the interrupt was in flight */
+ if (unlikely(phreg == ~0ULL)) {
+ dev_warn(&afu->dev,
+ "Ignoring slice interrupt(%d) due to fenced card",
+ irq);
+ return IRQ_HANDLED;
+ }
+ /* Mask the pe-handle from register value */
+ ph = phreg & 0xffff;
if ((ret = native_get_irq_info(afu, &irq_info))) {
WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
return fail_psl_irq(afu, &irq_info);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index e96be9ca4e60..80a87ab25b83 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1921,7 +1921,7 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
goto err;
ctx = cxl_dev_context_init(afu_dev);
- if (!ctx)
+ if (IS_ERR(ctx))
goto err;
afu_dev->dev.archdata.cxl_ctx = ctx;
diff --git a/drivers/misc/cxl/phb.c b/drivers/misc/cxl/phb.c
index 0935d44c1770..6ec69ada19f4 100644
--- a/drivers/misc/cxl/phb.c
+++ b/drivers/misc/cxl/phb.c
@@ -20,7 +20,7 @@ bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu
* in the virtual phb, we'll need a default context to attach them to.
*/
ctx = cxl_dev_context_init(dev);
- if (!ctx)
+ if (IS_ERR(ctx))
return false;
dev->dev.archdata.cxl_ctx = ctx;
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index dc67f39779ec..c614ff7c3bc3 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -257,8 +257,13 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn)
static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn)
{
- if (vio_find_node(dn))
+ struct vio_dev *vio_dev;
+
+ vio_dev = vio_find_node(dn);
+ if (vio_dev) {
+ put_device(&vio_dev->dev);
return -EINVAL;
+ }
if (!vio_register_device_node(dn)) {
printk(KERN_ERR
@@ -334,6 +339,9 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn)
return -EINVAL;
vio_unregister_device(vio_dev);
+
+ put_device(&vio_dev->dev);
+
return 0;
}
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
index ffa48fdbb1a9..a3d6d7cfa929 100644
--- a/drivers/soc/fsl/qbman/bman.c
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -167,12 +167,12 @@ struct bm_portal {
/* Cache-inhibited register access. */
static inline u32 bm_in(struct bm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ci + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ci + offset));
}
static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
{
- __raw_writel(val, p->addr.ci + offset);
+ __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
}
/* Cache Enabled Portal Access */
@@ -188,7 +188,7 @@ static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ce + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ce + offset));
}
struct bman_portal {
@@ -391,7 +391,7 @@ static void bm_rcr_finish(struct bm_portal *portal)
i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
if (i != rcr_ptr2idx(rcr->cursor))
- pr_crit("losing uncommited RCR entries\n");
+ pr_crit("losing uncommitted RCR entries\n");
i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
if (i != rcr->ci)
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index 9deb0524543f..a8e8389a6894 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -181,8 +181,7 @@ static int fsl_bman_probe(struct platform_device *pdev)
node->full_name);
return -ENXIO;
}
- bm_ccsr_start = devm_ioremap(dev, res->start,
- res->end - res->start + 1);
+ bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
if (!bm_ccsr_start)
return -ENXIO;
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index 986f64690e6e..8354d4dabdad 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -126,15 +126,19 @@ static int bman_portal_probe(struct platform_device *pdev)
pcfg->irq = irq;
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CE failed\n");
goto err_ioremap1;
+ }
pcfg->addr_virt[DPAA_PORTAL_CE] = va;
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
_PAGE_GUARDED | _PAGE_NO_CACHE);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2;
+ }
pcfg->addr_virt[DPAA_PORTAL_CI] = va;
@@ -150,8 +154,10 @@ static int bman_portal_probe(struct platform_device *pdev)
spin_unlock(&bman_lock);
pcfg->cpu = cpu;
- if (!init_pcfg(pcfg))
- goto err_ioremap2;
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
+ }
/* clear irq affinity if assigned cpu is offline */
if (!cpu_online(cpu))
@@ -159,10 +165,11 @@ static int bman_portal_probe(struct platform_device *pdev)
return 0;
+err_portal_init:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
err_ioremap2:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
err_ioremap1:
- dev_err(dev, "ioremap failed\n");
return -ENXIO;
}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
index b63fd72295c6..2eaf3184f61d 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.h
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -38,6 +38,7 @@
#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/prefetch.h>
#include <linux/genalloc.h>
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 119054bc922b..6f509f68085e 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -140,10 +140,10 @@ enum qm_mr_cmode { /* matches QCSP_CFG::MM */
struct qm_eqcr_entry {
u8 _ncw_verb; /* writes to this are non-coherent */
u8 dca;
- u16 seqnum;
- u32 orp; /* 24-bit */
- u32 fqid; /* 24-bit */
- u32 tag;
+ __be16 seqnum;
+ u8 __reserved[4];
+ __be32 fqid; /* 24-bit */
+ __be32 tag;
struct qm_fd fd;
u8 __reserved3[32];
} __packed;
@@ -183,41 +183,22 @@ struct qm_mr {
};
/* MC (Management Command) command */
-/* "Query FQ" */
-struct qm_mcc_queryfq {
+/* "FQ" command layout */
+struct qm_mcc_fq {
u8 _ncw_verb;
u8 __reserved1[3];
- u32 fqid; /* 24-bit */
+ __be32 fqid; /* 24-bit */
u8 __reserved2[56];
} __packed;
-/* "Alter FQ State Commands " */
-struct qm_mcc_alterfq {
- u8 _ncw_verb;
- u8 __reserved1[3];
- u32 fqid; /* 24-bit */
- u8 __reserved2;
- u8 count; /* number of consecutive FQID */
- u8 __reserved3[10];
- u32 context_b; /* frame queue context b */
- u8 __reserved4[40];
-} __packed;
-/* "Query CGR" */
-struct qm_mcc_querycgr {
+/* "CGR" command layout */
+struct qm_mcc_cgr {
u8 _ncw_verb;
u8 __reserved1[30];
u8 cgid;
u8 __reserved2[32];
};
-struct qm_mcc_querywq {
- u8 _ncw_verb;
- u8 __reserved;
- /* select channel if verb != QUERYWQ_DEDICATED */
- u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
- u8 __reserved2[60];
-} __packed;
-
#define QM_MCC_VERB_VBIT 0x80
#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
#define QM_MCC_VERB_INITFQ_PARKED 0x40
@@ -243,12 +224,9 @@ union qm_mc_command {
u8 __reserved[63];
};
struct qm_mcc_initfq initfq;
- struct qm_mcc_queryfq queryfq;
- struct qm_mcc_alterfq alterfq;
struct qm_mcc_initcgr initcgr;
- struct qm_mcc_querycgr querycgr;
- struct qm_mcc_querywq querywq;
- struct qm_mcc_queryfq_np queryfq_np;
+ struct qm_mcc_fq fq;
+ struct qm_mcc_cgr cgr;
};
/* MC (Management Command) result */
@@ -343,12 +321,12 @@ struct qm_portal {
/* Cache-inhibited register access. */
static inline u32 qm_in(struct qm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ci + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ci + offset));
}
static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
{
- __raw_writel(val, p->addr.ci + offset);
+ __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
}
/* Cache Enabled Portal Access */
@@ -364,7 +342,7 @@ static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
{
- return __raw_readl(p->addr.ce + offset);
+ return be32_to_cpu(__raw_readl(p->addr.ce + offset));
}
/* --- EQCR API --- */
@@ -443,7 +421,7 @@ static inline void qm_eqcr_finish(struct qm_portal *portal)
DPAA_ASSERT(!eqcr->busy);
if (pi != eqcr_ptr2idx(eqcr->cursor))
- pr_crit("losing uncommited EQCR entries\n");
+ pr_crit("losing uncommitted EQCR entries\n");
if (ci != eqcr->ci)
pr_crit("missing existing EQCR completions\n");
if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
@@ -492,8 +470,7 @@ static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
{
DPAA_ASSERT(eqcr->busy);
- DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff));
- DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
+ DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
DPAA_ASSERT(eqcr->available >= 1);
}
@@ -962,8 +939,6 @@ struct qman_portal {
u32 sdqcr;
/* probing time config params for cpu-affine portals */
const struct qm_portal_config *config;
- /* needed for providing a non-NULL device to dma_map_***() */
- struct platform_device *pdev;
/* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
struct qman_cgrs *cgrs;
/* linked-list of CSCN handlers. */
@@ -1133,7 +1108,6 @@ static int qman_create_portal(struct qman_portal *portal,
const struct qman_cgrs *cgrs)
{
struct qm_portal *p;
- char buf[16];
int ret;
u32 isdr;
@@ -1196,15 +1170,6 @@ static int qman_create_portal(struct qman_portal *portal,
portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
- sprintf(buf, "qportal-%d", c->channel);
- portal->pdev = platform_device_alloc(buf, -1);
- if (!portal->pdev)
- goto fail_devalloc;
- if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
- goto fail_devadd;
- ret = platform_device_add(portal->pdev);
- if (ret)
- goto fail_devadd;
isdr = 0xffffffff;
qm_out(p, QM_REG_ISDR, isdr);
portal->irq_sources = 0;
@@ -1239,8 +1204,8 @@ static int qman_create_portal(struct qman_portal *portal,
/* special handling, drain just in case it's a few FQRNIs */
const union qm_mr_entry *e = qm_mr_current(p);
- dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x",
- e->verb, e->ern.rc, e->ern.fd.addr_lo);
+ dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
+ e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
goto fail_dqrr_mr_empty;
}
/* Success */
@@ -1256,10 +1221,6 @@ fail_eqcr_empty:
fail_affinity:
free_irq(c->irq, portal);
fail_irq:
- platform_device_del(portal->pdev);
-fail_devadd:
- platform_device_put(portal->pdev);
-fail_devalloc:
kfree(portal->cgrs);
fail_cgrs:
qm_mc_finish(p);
@@ -1321,9 +1282,6 @@ static void qman_destroy_portal(struct qman_portal *qm)
qm_dqrr_finish(&qm->p);
qm_eqcr_finish(&qm->p);
- platform_device_del(qm->pdev);
- platform_device_put(qm->pdev);
-
qm->config = NULL;
}
@@ -1428,7 +1386,7 @@ static void qm_mr_process_task(struct work_struct *work)
case QM_MR_VERB_FQRN:
case QM_MR_VERB_FQRL:
/* Lookup in the retirement table */
- fq = fqid_to_fq(msg->fq.fqid);
+ fq = fqid_to_fq(qm_fqid_get(&msg->fq));
if (WARN_ON(!fq))
break;
fq_state_change(p, fq, msg, verb);
@@ -1437,7 +1395,7 @@ static void qm_mr_process_task(struct work_struct *work)
break;
case QM_MR_VERB_FQPN:
/* Parked */
- fq = tag_to_fq(msg->fq.contextB);
+ fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
fq_state_change(p, fq, msg, verb);
if (fq->cb.fqs)
fq->cb.fqs(p, fq, msg);
@@ -1451,7 +1409,7 @@ static void qm_mr_process_task(struct work_struct *work)
}
} else {
/* Its a software ERN */
- fq = tag_to_fq(msg->ern.tag);
+ fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
fq->cb.ern(p, fq, msg);
}
num++;
@@ -1536,7 +1494,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
/*
- * VDQCR: don't trust contextB as the FQ may have
+ * VDQCR: don't trust context_b as the FQ may have
* been configured for h/w consumption and we're
* draining it post-retirement.
*/
@@ -1562,8 +1520,8 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
clear_vdqcr(p, fq);
} else {
- /* SDQCR: contextB points to the FQ */
- fq = tag_to_fq(dq->contextB);
+ /* SDQCR: context_b points to the FQ */
+ fq = tag_to_fq(be32_to_cpu(dq->context_b));
/* Now let the callback do its stuff */
res = fq->cb.dqrr(p, fq, dq);
/*
@@ -1780,9 +1738,9 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
return -EINVAL;
#endif
- if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+ if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
/* And can't be set at the same time as TDTHRESH */
- if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
return -EINVAL;
}
/* Issue an INITFQ_[PARKED|SCHED] management command */
@@ -1796,37 +1754,49 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
mcc = qm_mc_start(&p->p);
if (opts)
mcc->initfq = *opts;
- mcc->initfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
mcc->initfq.count = 0;
/*
- * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+ * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
* demux pointer. Otherwise, the caller-provided value is allowed to
* stand, don't overwrite it.
*/
if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
dma_addr_t phys_fq;
- mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
- mcc->initfq.fqd.context_b = fq_to_tag(fq);
+ mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
+ mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
/*
* and the physical address - NB, if the user wasn't trying to
* set CONTEXTA, clear the stashing settings.
*/
- if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
- mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
memset(&mcc->initfq.fqd.context_a, 0,
sizeof(mcc->initfq.fqd.context_a));
} else {
- phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
- DMA_TO_DEVICE);
+ struct qman_portal *p = qman_dma_portal;
+
+ phys_fq = dma_map_single(p->config->dev, fq,
+ sizeof(*fq), DMA_TO_DEVICE);
+ if (dma_mapping_error(p->config->dev, phys_fq)) {
+ dev_err(p->config->dev, "dma_mapping failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
}
}
if (flags & QMAN_INITFQ_FLAG_LOCAL) {
int wq = 0;
- if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
- mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ if (!(be16_to_cpu(mcc->initfq.we_mask) &
+ QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |=
+ cpu_to_be16(QM_INITFQ_WE_DESTWQ);
wq = 4;
}
qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
@@ -1845,13 +1815,13 @@ int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
goto out;
}
if (opts) {
- if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
- if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
+ if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
fq_set(fq, QMAN_FQ_STATE_CGR_EN);
else
fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
}
- if (opts->we_mask & QM_INITFQ_WE_CGID)
+ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
fq->cgr_groupid = opts->fqd.cgid;
}
fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
@@ -1884,7 +1854,7 @@ int qman_schedule_fq(struct qman_fq *fq)
goto out;
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(p->config->dev, "ALTER_SCHED timeout\n");
@@ -1927,7 +1897,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
goto out;
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
@@ -1970,8 +1940,8 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
msg.verb = QM_MR_VERB_FQRNI;
msg.fq.fqs = mcr->alterfq.fqs;
- msg.fq.fqid = fq->fqid;
- msg.fq.contextB = fq_to_tag(fq);
+ qm_fqid_set(&msg.fq, fq->fqid);
+ msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
fq->cb.fqs(p, fq, &msg);
}
} else if (res == QM_MCR_RESULT_PENDING) {
@@ -2006,7 +1976,7 @@ int qman_oos_fq(struct qman_fq *fq)
goto out;
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2032,7 +2002,7 @@ int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
int ret = 0;
mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2058,7 +2028,7 @@ static int qman_query_fq_np(struct qman_fq *fq,
int ret = 0;
mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fq->fqid;
+ qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2086,7 +2056,7 @@ static int qman_query_cgr(struct qman_cgr *cgr,
int ret = 0;
mcc = qm_mc_start(&p->p);
- mcc->querycgr.cgid = cgr->cgrid;
+ mcc->cgr.cgid = cgr->cgrid;
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2239,8 +2209,8 @@ int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
if (unlikely(!eq))
goto out;
- eq->fqid = fq->fqid;
- eq->tag = fq_to_tag(fq);
+ qm_fqid_set(eq, fq->fqid);
+ eq->tag = cpu_to_be32(fq_to_tag(fq));
eq->fd = *fd;
qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
@@ -2282,7 +2252,24 @@ out:
}
#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
-#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n))
+
+/* congestion state change notification target update control */
+static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
+ else
+ cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
+}
+
+static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
+{
+ if (qman_ip_rev >= QMAN_REV30)
+ cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
+ else
+ cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
+}
static u8 qman_cgr_cpus[CGR_NUM];
@@ -2305,7 +2292,6 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
struct qm_mcc_initcgr *opts)
{
struct qm_mcr_querycgr cgr_state;
- struct qm_mcc_initcgr local_opts = {};
int ret;
struct qman_portal *p;
@@ -2327,22 +2313,18 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
spin_lock(&p->cgr_lock);
if (opts) {
+ struct qm_mcc_initcgr local_opts = *opts;
+
ret = qman_query_cgr(cgr, &cgr_state);
if (ret)
goto out;
- if (opts)
- local_opts = *opts;
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
- local_opts.cgr.cscn_targ_upd_ctrl =
- QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
- else
- /* Overwrite TARG */
- local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
- TARG_MASK(p);
- local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+ local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
/* send init if flags indicate so */
- if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
&local_opts);
else
@@ -2405,13 +2387,11 @@ int qman_delete_cgr(struct qman_cgr *cgr)
list_add(&cgr->node, &p->cgr_cbs);
goto release_lock;
}
- /* Overwrite TARG */
- local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
- if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
- local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
- else
- local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
- ~(TARG_MASK(p));
+
+ local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
+ qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
+ be32_to_cpu(cgr_state.cgr.cscn_targ));
+
ret = qm_modify_cgr(cgr, 0, &local_opts);
if (ret)
/* add back to the list */
@@ -2501,7 +2481,7 @@ static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
} while (wait && !dqrr);
while (dqrr) {
- if (dqrr->fqid == fqid && (dqrr->stat & s))
+ if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
found = 1;
qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
qm_dqrr_pvb_update(p);
@@ -2537,7 +2517,7 @@ static int qman_shutdown_fq(u32 fqid)
dev = p->config->dev;
/* Determine the state of the FQID */
mcc = qm_mc_start(&p->p);
- mcc->queryfq_np.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ_NP timeout\n");
@@ -2552,7 +2532,7 @@ static int qman_shutdown_fq(u32 fqid)
/* Query which channel the FQ is using */
mcc = qm_mc_start(&p->p);
- mcc->queryfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ timeout\n");
@@ -2572,7 +2552,7 @@ static int qman_shutdown_fq(u32 fqid)
case QM_MCR_NP_STATE_PARKED:
orl_empty = 0;
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_err(dev, "QUERYFQ_NP timeout\n");
@@ -2667,7 +2647,7 @@ static int qman_shutdown_fq(u32 fqid)
cpu_relax();
}
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2687,7 +2667,7 @@ static int qman_shutdown_fq(u32 fqid)
case QM_MCR_NP_STATE_RETIRED:
/* Send OOS Command */
mcc = qm_mc_start(&p->p);
- mcc->alterfq.fqid = fqid;
+ qm_fqid_set(&mcc->fq, fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT;
@@ -2722,6 +2702,7 @@ const struct qm_portal_config *qman_get_qm_portal_config(
{
return portal->config;
}
+EXPORT_SYMBOL(qman_get_qm_portal_config);
struct gen_pool *qm_fqalloc; /* FQID allocator */
struct gen_pool *qm_qpalloc; /* pool-channel allocator */
@@ -2789,15 +2770,18 @@ static int qpool_cleanup(u32 qp)
struct qm_mcr_queryfq_np np;
err = qman_query_fq_np(&fq, &np);
- if (err)
+ if (err == -ERANGE)
/* FQID range exceeded, found no problems */
return 0;
+ else if (WARN_ON(err))
+ return err;
+
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
struct qm_fqd fqd;
err = qman_query_fq(&fq, &fqd);
if (WARN_ON(err))
- return 0;
+ return err;
if (qm_fqd_get_chan(&fqd) == qp) {
/* The channel is the FQ's target, clean it */
err = qman_shutdown_fq(fq.fqid);
@@ -2836,7 +2820,7 @@ static int cgr_cleanup(u32 cgrid)
* error, looking for non-OOS FQDs whose CGR is the CGR being released
*/
struct qman_fq fq = {
- .fqid = 1
+ .fqid = QM_FQID_RANGE_START
};
int err;
@@ -2844,16 +2828,19 @@ static int cgr_cleanup(u32 cgrid)
struct qm_mcr_queryfq_np np;
err = qman_query_fq_np(&fq, &np);
- if (err)
+ if (err == -ERANGE)
/* FQID range exceeded, found no problems */
return 0;
+ else if (WARN_ON(err))
+ return err;
+
if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
struct qm_fqd fqd;
err = qman_query_fq(&fq, &fqd);
if (WARN_ON(err))
- return 0;
- if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+ return err;
+ if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
fqd.cgid == cgrid) {
pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
cgrid, fq.fqid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 0cace9e0077e..f4e6e70de259 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -444,6 +444,9 @@ static int zero_priv_mem(struct device *dev, struct device_node *node,
/* map as cacheable, non-guarded */
void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+ if (!tmpp)
+ return -ENOMEM;
+
memset_io(tmpp, 0, sz);
flush_dcache_range((unsigned long)tmpp,
(unsigned long)tmpp + sz);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index d068e4820f49..adbaa30d3c5a 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -30,6 +30,9 @@
#include "qman_priv.h"
+struct qman_portal *qman_dma_portal;
+EXPORT_SYMBOL(qman_dma_portal);
+
/* Enable portal interupts (as opposed to polling mode) */
#define CONFIG_FSL_DPA_PIRQ_SLOW 1
#define CONFIG_FSL_DPA_PIRQ_FAST 1
@@ -150,6 +153,10 @@ static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
/* all assigned portals are initialized now */
qman_init_cgr_all();
}
+
+ if (!qman_dma_portal)
+ qman_dma_portal = p;
+
spin_unlock(&qman_lock);
dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
@@ -217,9 +224,9 @@ static int qman_portal_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
struct qm_portal_config *pcfg;
struct resource *addr_phys[2];
- const u32 *channel;
void __iomem *va;
- int irq, len, cpu;
+ int irq, cpu, err;
+ u32 val;
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
if (!pcfg)
@@ -243,13 +250,13 @@ static int qman_portal_probe(struct platform_device *pdev)
return -ENXIO;
}
- channel = of_get_property(node, "cell-index", &len);
- if (!channel || (len != 4)) {
+ err = of_property_read_u32(node, "cell-index", &val);
+ if (err) {
dev_err(dev, "Can't get %s property 'cell-index'\n",
node->full_name);
- return -ENXIO;
+ return err;
}
- pcfg->channel = *channel;
+ pcfg->channel = val;
pcfg->cpu = -1;
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
@@ -259,15 +266,19 @@ static int qman_portal_probe(struct platform_device *pdev)
pcfg->irq = irq;
va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CE failed\n");
goto err_ioremap1;
+ }
pcfg->addr_virt[DPAA_PORTAL_CE] = va;
va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
_PAGE_GUARDED | _PAGE_NO_CACHE);
- if (!va)
+ if (!va) {
+ dev_err(dev, "ioremap::CI failed\n");
goto err_ioremap2;
+ }
pcfg->addr_virt[DPAA_PORTAL_CI] = va;
@@ -285,8 +296,15 @@ static int qman_portal_probe(struct platform_device *pdev)
spin_unlock(&qman_lock);
pcfg->cpu = cpu;
- if (!init_pcfg(pcfg))
- goto err_ioremap2;
+ if (dma_set_mask(dev, DMA_BIT_MASK(40))) {
+ dev_err(dev, "dma_set_mask() failed\n");
+ goto err_portal_init;
+ }
+
+ if (!init_pcfg(pcfg)) {
+ dev_err(dev, "portal init failed\n");
+ goto err_portal_init;
+ }
/* clear irq affinity if assigned cpu is offline */
if (!cpu_online(cpu))
@@ -294,10 +312,11 @@ static int qman_portal_probe(struct platform_device *pdev)
return 0;
+err_portal_init:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CI]);
err_ioremap2:
iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
err_ioremap1:
- dev_err(dev, "ioremap failed\n");
return -ENXIO;
}
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 5cf821e623a9..53685b59718e 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -73,29 +73,23 @@ struct qm_mcr_querycgr {
struct __qm_mc_cgr cgr; /* CGR fields */
u8 __reserved2[6];
u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
- u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ __be32 i_bcnt_lo; /* low 32-bits of 40-bit */
u8 __reserved3[3];
u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
- u32 a_bcnt_lo; /* low 32-bits of 40-bit */
- u32 cscn_targ_swp[4];
+ __be32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ __be32 cscn_targ_swp[4];
} __packed;
static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
{
- return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+ return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
}
static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
{
- return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+ return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
}
/* "Query FQ Non-Programmable Fields" */
-struct qm_mcc_queryfq_np {
- u8 _ncw_verb;
- u8 __reserved1[3];
- u32 fqid; /* 24-bit */
- u8 __reserved2[56];
-} __packed;
struct qm_mcr_queryfq_np {
u8 verb;
@@ -367,5 +361,6 @@ int qman_alloc_fq_table(u32 num_fqids);
#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
extern struct qman_portal *affine_portals[NR_CPUS];
+extern struct qman_portal *qman_dma_portal;
const struct qm_portal_config *qman_get_qm_portal_config(
struct qman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
index 6880ff17f45e..2895d062cf51 100644
--- a/drivers/soc/fsl/qbman/qman_test_api.c
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -65,7 +65,7 @@ static void fd_init(struct qm_fd *fd)
{
qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
qm_fd_set_contig_big(fd, 0x0000ffff);
- fd->cmd = 0xfeedf00d;
+ fd->cmd = cpu_to_be32(0xfeedf00d);
}
static void fd_inc(struct qm_fd *fd)
@@ -86,26 +86,19 @@ static void fd_inc(struct qm_fd *fd)
len--;
qm_fd_set_param(fd, fmt, off, len);
- fd->cmd++;
+ fd->cmd = cpu_to_be32(be32_to_cpu(fd->cmd) + 1);
}
/* The only part of the 'fd' we can't memcmp() is the ppid */
-static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
+static bool fd_neq(const struct qm_fd *a, const struct qm_fd *b)
{
- int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
+ bool neq = qm_fd_addr_get64(a) != qm_fd_addr_get64(b);
- if (!r) {
- enum qm_fd_format fmt_a, fmt_b;
+ neq |= qm_fd_get_format(a) != qm_fd_get_format(b);
+ neq |= a->cfg != b->cfg;
+ neq |= a->cmd != b->cmd;
- fmt_a = qm_fd_get_format(a);
- fmt_b = qm_fd_get_format(b);
- r = fmt_a - fmt_b;
- }
- if (!r)
- r = a->cfg - b->cfg;
- if (!r)
- r = a->cmd - b->cmd;
- return r;
+ return neq;
}
/* test */
@@ -217,12 +210,12 @@ static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
struct qman_fq *fq,
const struct qm_dqrr_entry *dq)
{
- if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) {
+ if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
pr_err("BADNESS: dequeued frame doesn't match;\n");
return qman_cb_dqrr_consume;
}
fd_inc(&fd_dq);
- if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_neq(&fd_dq, &fd)) {
sdqcr_complete = 1;
wake_up(&waitqueue);
}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
index 43cf66ba42f5..e87b65403b67 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -175,7 +175,7 @@ static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
/* links together the hp_cpu structs, in first-come first-serve order. */
static LIST_HEAD(hp_cpu_list);
-static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+static DEFINE_SPINLOCK(hp_lock);
static unsigned int hp_cpu_list_length;
@@ -191,6 +191,9 @@ static void *__frame_ptr;
static u32 *frame_ptr;
static dma_addr_t frame_dma;
+/* needed for dma_map*() */
+static const struct qm_portal_config *pcfg;
+
/* the main function waits on this */
static DECLARE_WAIT_QUEUE_HEAD(queue);
@@ -210,16 +213,14 @@ static int allocate_frame_data(void)
{
u32 lfsr = HP_FIRST_WORD;
int loop;
- struct platform_device *pdev = platform_device_alloc("foobar", -1);
- if (!pdev) {
- pr_crit("platform_device_alloc() failed");
- return -EIO;
- }
- if (platform_device_add(pdev)) {
- pr_crit("platform_device_add() failed");
+ if (!qman_dma_portal) {
+ pr_crit("portal not available\n");
return -EIO;
}
+
+ pcfg = qman_get_qm_portal_config(qman_dma_portal);
+
__frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
if (!__frame_ptr)
return -ENOMEM;
@@ -229,15 +230,22 @@ static int allocate_frame_data(void)
frame_ptr[loop] = lfsr;
lfsr = do_lfsr(lfsr);
}
- frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
+
+ frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
DMA_BIDIRECTIONAL);
- platform_device_del(pdev);
- platform_device_put(pdev);
+ if (dma_mapping_error(pcfg->dev, frame_dma)) {
+ pr_crit("dma mapping failure\n");
+ kfree(__frame_ptr);
+ return -EIO;
+ }
+
return 0;
}
static void deallocate_frame_data(void)
{
+ dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
kfree(__frame_ptr);
}
@@ -249,7 +257,8 @@ static inline int process_frame_data(struct hp_handler *handler,
int loop;
if (qm_fd_addr_get64(fd) != handler->addr) {
- pr_crit("bad frame address");
+ pr_crit("bad frame address, [%llX != %llX]\n",
+ qm_fd_addr_get64(fd), handler->addr);
return -EIO;
}
for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
@@ -397,8 +406,9 @@ static int init_handler(void *h)
goto failed;
}
memset(&opts, 0, sizeof(opts));
- opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
- opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
+ opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA);
+ opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
QMAN_INITFQ_FLAG_LOCAL, &opts);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 2707a827261b..ade168f5328e 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -717,9 +717,5 @@ static struct platform_driver qe_driver = {
.resume = qe_resume,
};
-static int __init qe_drv_init(void)
-{
- return platform_driver_register(&qe_driver);
-}
-device_initcall(qe_drv_init);
+builtin_platform_driver(qe_driver);
#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 80378ddadc5c..c8823578a1b2 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -31,49 +31,49 @@
static void tce_iommu_detach_group(void *iommu_data,
struct iommu_group *iommu_group);
-static long try_increment_locked_vm(long npages)
+static long try_increment_locked_vm(struct mm_struct *mm, long npages)
{
long ret = 0, locked, lock_limit;
- if (!current || !current->mm)
- return -ESRCH; /* process exited */
+ if (WARN_ON_ONCE(!mm))
+ return -EPERM;
if (!npages)
return 0;
- down_write(&current->mm->mmap_sem);
- locked = current->mm->locked_vm + npages;
+ down_write(&mm->mmap_sem);
+ locked = mm->locked_vm + npages;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
ret = -ENOMEM;
else
- current->mm->locked_vm += npages;
+ mm->locked_vm += npages;
pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
npages << PAGE_SHIFT,
- current->mm->locked_vm << PAGE_SHIFT,
+ mm->locked_vm << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK),
ret ? " - exceeded" : "");
- up_write(&current->mm->mmap_sem);
+ up_write(&mm->mmap_sem);
return ret;
}
-static void decrement_locked_vm(long npages)
+static void decrement_locked_vm(struct mm_struct *mm, long npages)
{
- if (!current || !current->mm || !npages)
- return; /* process exited */
+ if (!mm || !npages)
+ return;
- down_write(&current->mm->mmap_sem);
- if (WARN_ON_ONCE(npages > current->mm->locked_vm))
- npages = current->mm->locked_vm;
- current->mm->locked_vm -= npages;
+ down_write(&mm->mmap_sem);
+ if (WARN_ON_ONCE(npages > mm->locked_vm))
+ npages = mm->locked_vm;
+ mm->locked_vm -= npages;
pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
npages << PAGE_SHIFT,
- current->mm->locked_vm << PAGE_SHIFT,
+ mm->locked_vm << PAGE_SHIFT,
rlimit(RLIMIT_MEMLOCK));
- up_write(&current->mm->mmap_sem);
+ up_write(&mm->mmap_sem);
}
/*
@@ -89,6 +89,15 @@ struct tce_iommu_group {
};
/*
+ * A container needs to remember which preregistered region it has
+ * referenced to do proper cleanup at the userspace process exit.
+ */
+struct tce_iommu_prereg {
+ struct list_head next;
+ struct mm_iommu_table_group_mem_t *mem;
+};
+
+/*
* The container descriptor supports only a single group per container.
* Required by the API as the container is not supplied with the IOMMU group
* at the moment of initialization.
@@ -97,24 +106,68 @@ struct tce_container {
struct mutex lock;
bool enabled;
bool v2;
+ bool def_window_pending;
unsigned long locked_pages;
+ struct mm_struct *mm;
struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
struct list_head group_list;
+ struct list_head prereg_list;
};
+static long tce_iommu_mm_set(struct tce_container *container)
+{
+ if (container->mm) {
+ if (container->mm == current->mm)
+ return 0;
+ return -EPERM;
+ }
+ BUG_ON(!current->mm);
+ container->mm = current->mm;
+ atomic_inc(&container->mm->mm_count);
+
+ return 0;
+}
+
+static long tce_iommu_prereg_free(struct tce_container *container,
+ struct tce_iommu_prereg *tcemem)
+{
+ long ret;
+
+ ret = mm_iommu_put(container->mm, tcemem->mem);
+ if (ret)
+ return ret;
+
+ list_del(&tcemem->next);
+ kfree(tcemem);
+
+ return 0;
+}
+
static long tce_iommu_unregister_pages(struct tce_container *container,
__u64 vaddr, __u64 size)
{
struct mm_iommu_table_group_mem_t *mem;
+ struct tce_iommu_prereg *tcemem;
+ bool found = false;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
return -EINVAL;
- mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT);
+ mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
if (!mem)
return -ENOENT;
- return mm_iommu_put(mem);
+ list_for_each_entry(tcemem, &container->prereg_list, next) {
+ if (tcemem->mem == mem) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENOENT;
+
+ return tce_iommu_prereg_free(container, tcemem);
}
static long tce_iommu_register_pages(struct tce_container *container,
@@ -122,22 +175,36 @@ static long tce_iommu_register_pages(struct tce_container *container,
{
long ret = 0;
struct mm_iommu_table_group_mem_t *mem = NULL;
+ struct tce_iommu_prereg *tcemem;
unsigned long entries = size >> PAGE_SHIFT;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
((vaddr + size) < vaddr))
return -EINVAL;
- ret = mm_iommu_get(vaddr, entries, &mem);
+ mem = mm_iommu_find(container->mm, vaddr, entries);
+ if (mem) {
+ list_for_each_entry(tcemem, &container->prereg_list, next) {
+ if (tcemem->mem == mem)
+ return -EBUSY;
+ }
+ }
+
+ ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
if (ret)
return ret;
+ tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
+ tcemem->mem = mem;
+ list_add(&tcemem->next, &container->prereg_list);
+
container->enabled = true;
return 0;
}
-static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
+static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
+ struct mm_struct *mm)
{
unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
tbl->it_size, PAGE_SIZE);
@@ -146,13 +213,13 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
BUG_ON(tbl->it_userspace);
- ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
+ ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
if (ret)
return ret;
uas = vzalloc(cb);
if (!uas) {
- decrement_locked_vm(cb >> PAGE_SHIFT);
+ decrement_locked_vm(mm, cb >> PAGE_SHIFT);
return -ENOMEM;
}
tbl->it_userspace = uas;
@@ -160,7 +227,8 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
return 0;
}
-static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
+static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
+ struct mm_struct *mm)
{
unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
tbl->it_size, PAGE_SIZE);
@@ -170,7 +238,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
vfree(tbl->it_userspace);
tbl->it_userspace = NULL;
- decrement_locked_vm(cb >> PAGE_SHIFT);
+ decrement_locked_vm(mm, cb >> PAGE_SHIFT);
}
static bool tce_page_is_contained(struct page *page, unsigned page_shift)
@@ -230,9 +298,6 @@ static int tce_iommu_enable(struct tce_container *container)
struct iommu_table_group *table_group;
struct tce_iommu_group *tcegrp;
- if (!current->mm)
- return -ESRCH; /* process exited */
-
if (container->enabled)
return -EBUSY;
@@ -277,8 +342,12 @@ static int tce_iommu_enable(struct tce_container *container)
if (!table_group->tce32_size)
return -EPERM;
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
locked = table_group->tce32_size >> PAGE_SHIFT;
- ret = try_increment_locked_vm(locked);
+ ret = try_increment_locked_vm(container->mm, locked);
if (ret)
return ret;
@@ -296,10 +365,8 @@ static void tce_iommu_disable(struct tce_container *container)
container->enabled = false;
- if (!current->mm)
- return;
-
- decrement_locked_vm(container->locked_pages);
+ BUG_ON(!container->mm);
+ decrement_locked_vm(container->mm, container->locked_pages);
}
static void *tce_iommu_open(unsigned long arg)
@@ -317,6 +384,7 @@ static void *tce_iommu_open(unsigned long arg)
mutex_init(&container->lock);
INIT_LIST_HEAD_RCU(&container->group_list);
+ INIT_LIST_HEAD_RCU(&container->prereg_list);
container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
@@ -326,7 +394,8 @@ static void *tce_iommu_open(unsigned long arg)
static int tce_iommu_clear(struct tce_container *container,
struct iommu_table *tbl,
unsigned long entry, unsigned long pages);
-static void tce_iommu_free_table(struct iommu_table *tbl);
+static void tce_iommu_free_table(struct tce_container *container,
+ struct iommu_table *tbl);
static void tce_iommu_release(void *iommu_data)
{
@@ -351,10 +420,20 @@ static void tce_iommu_release(void *iommu_data)
continue;
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
- tce_iommu_free_table(tbl);
+ tce_iommu_free_table(container, tbl);
+ }
+
+ while (!list_empty(&container->prereg_list)) {
+ struct tce_iommu_prereg *tcemem;
+
+ tcemem = list_first_entry(&container->prereg_list,
+ struct tce_iommu_prereg, next);
+ WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
}
tce_iommu_disable(container);
+ if (container->mm)
+ mmdrop(container->mm);
mutex_destroy(&container->lock);
kfree(container);
@@ -369,13 +448,14 @@ static void tce_iommu_unuse_page(struct tce_container *container,
put_page(page);
}
-static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
+static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
+ unsigned long tce, unsigned long size,
unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
{
long ret = 0;
struct mm_iommu_table_group_mem_t *mem;
- mem = mm_iommu_lookup(tce, size);
+ mem = mm_iommu_lookup(container->mm, tce, size);
if (!mem)
return -EINVAL;
@@ -388,18 +468,18 @@ static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
return 0;
}
-static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
- unsigned long entry)
+static void tce_iommu_unuse_page_v2(struct tce_container *container,
+ struct iommu_table *tbl, unsigned long entry)
{
struct mm_iommu_table_group_mem_t *mem = NULL;
int ret;
unsigned long hpa = 0;
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
- if (!pua || !current || !current->mm)
+ if (!pua)
return;
- ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
+ ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
&hpa, &mem);
if (ret)
pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
@@ -429,7 +509,7 @@ static int tce_iommu_clear(struct tce_container *container,
continue;
if (container->v2) {
- tce_iommu_unuse_page_v2(tbl, entry);
+ tce_iommu_unuse_page_v2(container, tbl, entry);
continue;
}
@@ -509,13 +589,19 @@ static long tce_iommu_build_v2(struct tce_container *container,
unsigned long hpa;
enum dma_data_direction dirtmp;
+ if (!tbl->it_userspace) {
+ ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < pages; ++i) {
struct mm_iommu_table_group_mem_t *mem = NULL;
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
entry + i);
- ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
- &hpa, &mem);
+ ret = tce_iommu_prereg_ua_to_hpa(container,
+ tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
if (ret)
break;
@@ -536,7 +622,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
if (ret) {
/* dirtmp cannot be DMA_NONE here */
- tce_iommu_unuse_page_v2(tbl, entry + i);
+ tce_iommu_unuse_page_v2(container, tbl, entry + i);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
__func__, entry << tbl->it_page_shift,
tce, ret);
@@ -544,7 +630,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
}
if (dirtmp != DMA_NONE)
- tce_iommu_unuse_page_v2(tbl, entry + i);
+ tce_iommu_unuse_page_v2(container, tbl, entry + i);
*pua = tce;
@@ -572,7 +658,7 @@ static long tce_iommu_create_table(struct tce_container *container,
if (!table_size)
return -EINVAL;
- ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
+ ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
if (ret)
return ret;
@@ -582,25 +668,17 @@ static long tce_iommu_create_table(struct tce_container *container,
WARN_ON(!ret && !(*ptbl)->it_ops->free);
WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
- if (!ret && container->v2) {
- ret = tce_iommu_userspace_view_alloc(*ptbl);
- if (ret)
- (*ptbl)->it_ops->free(*ptbl);
- }
-
- if (ret)
- decrement_locked_vm(table_size >> PAGE_SHIFT);
-
return ret;
}
-static void tce_iommu_free_table(struct iommu_table *tbl)
+static void tce_iommu_free_table(struct tce_container *container,
+ struct iommu_table *tbl)
{
unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
- tce_iommu_userspace_view_free(tbl);
+ tce_iommu_userspace_view_free(tbl, container->mm);
tbl->it_ops->free(tbl);
- decrement_locked_vm(pages);
+ decrement_locked_vm(container->mm, pages);
}
static long tce_iommu_create_window(struct tce_container *container,
@@ -663,7 +741,7 @@ unset_exit:
table_group = iommu_group_get_iommudata(tcegrp->grp);
table_group->ops->unset_window(table_group, num);
}
- tce_iommu_free_table(tbl);
+ tce_iommu_free_table(container, tbl);
return ret;
}
@@ -701,12 +779,41 @@ static long tce_iommu_remove_window(struct tce_container *container,
/* Free table */
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
- tce_iommu_free_table(tbl);
+ tce_iommu_free_table(container, tbl);
container->tables[num] = NULL;
return 0;
}
+static long tce_iommu_create_default_window(struct tce_container *container)
+{
+ long ret;
+ __u64 start_addr = 0;
+ struct tce_iommu_group *tcegrp;
+ struct iommu_table_group *table_group;
+
+ if (!container->def_window_pending)
+ return 0;
+
+ if (!tce_groups_attached(container))
+ return -ENODEV;
+
+ tcegrp = list_first_entry(&container->group_list,
+ struct tce_iommu_group, next);
+ table_group = iommu_group_get_iommudata(tcegrp->grp);
+ if (!table_group)
+ return -ENODEV;
+
+ ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
+ table_group->tce32_size, 1, &start_addr);
+ WARN_ON_ONCE(!ret && start_addr);
+
+ if (!ret)
+ container->def_window_pending = false;
+
+ return ret;
+}
+
static long tce_iommu_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
@@ -727,7 +834,17 @@ static long tce_iommu_ioctl(void *iommu_data,
}
return (ret < 0) ? 0 : ret;
+ }
+
+ /*
+ * Sanity check to prevent one userspace from manipulating
+ * another userspace mm.
+ */
+ BUG_ON(!container);
+ if (container->mm && container->mm != current->mm)
+ return -EPERM;
+ switch (cmd) {
case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
struct vfio_iommu_spapr_tce_info info;
struct tce_iommu_group *tcegrp;
@@ -797,6 +914,10 @@ static long tce_iommu_ioctl(void *iommu_data,
VFIO_DMA_MAP_FLAG_WRITE))
return -EINVAL;
+ ret = tce_iommu_create_default_window(container);
+ if (ret)
+ return ret;
+
num = tce_iommu_find_table(container, param.iova, &tbl);
if (num < 0)
return -ENXIO;
@@ -860,6 +981,10 @@ static long tce_iommu_ioctl(void *iommu_data,
if (param.flags)
return -EINVAL;
+ ret = tce_iommu_create_default_window(container);
+ if (ret)
+ return ret;
+
num = tce_iommu_find_table(container, param.iova, &tbl);
if (num < 0)
return -ENXIO;
@@ -888,6 +1013,10 @@ static long tce_iommu_ioctl(void *iommu_data,
minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
size);
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
if (copy_from_user(&param, (void __user *)arg, minsz))
return -EFAULT;
@@ -911,6 +1040,9 @@ static long tce_iommu_ioctl(void *iommu_data,
if (!container->v2)
break;
+ if (!container->mm)
+ return -EPERM;
+
minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
size);
@@ -969,6 +1101,10 @@ static long tce_iommu_ioctl(void *iommu_data,
if (!container->v2)
break;
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
if (!tce_groups_attached(container))
return -ENXIO;
@@ -986,6 +1122,10 @@ static long tce_iommu_ioctl(void *iommu_data,
mutex_lock(&container->lock);
+ ret = tce_iommu_create_default_window(container);
+ if (ret)
+ return ret;
+
ret = tce_iommu_create_window(container, create.page_shift,
create.window_size, create.levels,
&create.start_addr);
@@ -1003,6 +1143,10 @@ static long tce_iommu_ioctl(void *iommu_data,
if (!container->v2)
break;
+ ret = tce_iommu_mm_set(container);
+ if (ret)
+ return ret;
+
if (!tce_groups_attached(container))
return -ENXIO;
@@ -1018,6 +1162,11 @@ static long tce_iommu_ioctl(void *iommu_data,
if (remove.flags)
return -EINVAL;
+ if (container->def_window_pending && !remove.start_addr) {
+ container->def_window_pending = false;
+ return 0;
+ }
+
mutex_lock(&container->lock);
ret = tce_iommu_remove_window(container, remove.start_addr);
@@ -1043,7 +1192,7 @@ static void tce_iommu_release_ownership(struct tce_container *container,
continue;
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
- tce_iommu_userspace_view_free(tbl);
+ tce_iommu_userspace_view_free(tbl, container->mm);
if (tbl->it_map)
iommu_release_ownership(tbl);
@@ -1062,10 +1211,7 @@ static int tce_iommu_take_ownership(struct tce_container *container,
if (!tbl || !tbl->it_map)
continue;
- rc = tce_iommu_userspace_view_alloc(tbl);
- if (!rc)
- rc = iommu_take_ownership(tbl);
-
+ rc = iommu_take_ownership(tbl);
if (rc) {
for (j = 0; j < i; ++j)
iommu_release_ownership(
@@ -1100,9 +1246,6 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container,
static long tce_iommu_take_ownership_ddw(struct tce_container *container,
struct iommu_table_group *table_group)
{
- long i, ret = 0;
- struct iommu_table *tbl = NULL;
-
if (!table_group->ops->create_table || !table_group->ops->set_window ||
!table_group->ops->release_ownership) {
WARN_ON_ONCE(1);
@@ -1111,47 +1254,7 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container,
table_group->ops->take_ownership(table_group);
- /*
- * If it the first group attached, check if there is
- * a default DMA window and create one if none as
- * the userspace expects it to exist.
- */
- if (!tce_groups_attached(container) && !container->tables[0]) {
- ret = tce_iommu_create_table(container,
- table_group,
- 0, /* window number */
- IOMMU_PAGE_SHIFT_4K,
- table_group->tce32_size,
- 1, /* default levels */
- &tbl);
- if (ret)
- goto release_exit;
- else
- container->tables[0] = tbl;
- }
-
- /* Set all windows to the new group */
- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
- tbl = container->tables[i];
-
- if (!tbl)
- continue;
-
- /* Set the default window to a new group */
- ret = table_group->ops->set_window(table_group, i, tbl);
- if (ret)
- goto release_exit;
- }
-
return 0;
-
-release_exit:
- for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
- table_group->ops->unset_window(table_group, i);
-
- table_group->ops->release_ownership(table_group);
-
- return ret;
}
static int tce_iommu_attach_group(void *iommu_data,
@@ -1203,10 +1306,13 @@ static int tce_iommu_attach_group(void *iommu_data,
}
if (!table_group->ops || !table_group->ops->take_ownership ||
- !table_group->ops->release_ownership)
+ !table_group->ops->release_ownership) {
ret = tce_iommu_take_ownership(container, table_group);
- else
+ } else {
ret = tce_iommu_take_ownership_ddw(container, table_group);
+ if (!tce_groups_attached(container) && !container->tables[0])
+ container->def_window_pending = true;
+ }
if (!ret) {
tcegrp->grp = iommu_group;
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d7437777baaa..d419d0e51fe5 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -148,7 +148,36 @@ struct kexec_file_ops {
kexec_verify_sig_t *verify_sig;
#endif
};
-#endif
+
+/**
+ * struct kexec_buf - parameters for finding a place for a buffer in memory
+ * @image: kexec image in which memory to search.
+ * @buffer: Contents which will be copied to the allocated memory.
+ * @bufsz: Size of @buffer.
+ * @mem: On return will have address of the buffer in memory.
+ * @memsz: Size for the buffer in memory.
+ * @buf_align: Minimum alignment needed.
+ * @buf_min: The buffer can't be placed below this address.
+ * @buf_max: The buffer can't be placed above this address.
+ * @top_down: Allocate from top of memory.
+ */
+struct kexec_buf {
+ struct kimage *image;
+ void *buffer;
+ unsigned long bufsz;
+ unsigned long mem;
+ unsigned long memsz;
+ unsigned long buf_align;
+ unsigned long buf_min;
+ unsigned long buf_max;
+ bool top_down;
+};
+
+int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
+ int (*func)(u64, u64, void *));
+extern int kexec_add_buffer(struct kexec_buf *kbuf);
+int kexec_locate_mem_hole(struct kexec_buf *kbuf);
+#endif /* CONFIG_KEXEC_FILE */
struct kimage {
kimage_entry_t head;
@@ -212,11 +241,6 @@ extern asmlinkage long sys_kexec_load(unsigned long entry,
struct kexec_segment __user *segments,
unsigned long flags);
extern int kernel_kexec(void);
-extern int kexec_add_buffer(struct kimage *image, char *buffer,
- unsigned long bufsz, unsigned long memsz,
- unsigned long buf_align, unsigned long buf_min,
- unsigned long buf_max, bool top_down,
- unsigned long *load_addr);
extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order);
extern int kexec_load_purgatory(struct kimage *image, unsigned long min,
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 37f3eb001a16..3d4df74a96de 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -244,11 +244,11 @@ static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
struct qm_dqrr_entry {
u8 verb;
u8 stat;
- u16 seqnum; /* 15-bit */
+ __be16 seqnum; /* 15-bit */
u8 tok;
u8 __reserved2[3];
- u32 fqid; /* 24-bit */
- u32 contextB;
+ __be32 fqid; /* 24-bit */
+ __be32 context_b;
struct qm_fd fd;
u8 __reserved4[32];
} __packed;
@@ -262,6 +262,11 @@ struct qm_dqrr_entry {
#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
+/* 'fqid' is a 24-bit field in every h/w descriptor */
+#define QM_FQID_MASK GENMASK(23, 0)
+#define qm_fqid_set(p, v) ((p)->fqid = cpu_to_be32((v) & QM_FQID_MASK))
+#define qm_fqid_get(p) (be32_to_cpu((p)->fqid) & QM_FQID_MASK)
+
/* "ERN Message Response" */
/* "FQ State Change Notification" */
union qm_mr_entry {
@@ -272,12 +277,11 @@ union qm_mr_entry {
struct {
u8 verb;
u8 dca;
- u16 seqnum;
+ __be16 seqnum;
u8 rc; /* Rej Code: 8-bit */
- u8 orp_hi; /* ORP: 24-bit */
- u16 orp_lo;
- u32 fqid; /* 24-bit */
- u32 tag;
+ u8 __reserved[3];
+ __be32 fqid; /* 24-bit */
+ __be32 tag;
struct qm_fd fd;
u8 __reserved1[32];
} __packed ern;
@@ -285,8 +289,8 @@ union qm_mr_entry {
u8 verb;
u8 fqs; /* Frame Queue Status */
u8 __reserved1[6];
- u32 fqid; /* 24-bit */
- u32 contextB;
+ __be32 fqid; /* 24-bit */
+ __be32 context_b;
u8 __reserved2[48];
} __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
};
@@ -405,13 +409,13 @@ static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
{
- fqd->context_a.context_hi = upper_32_bits(addr);
- fqd->context_a.context_lo = lower_32_bits(addr);
+ fqd->context_a.context_hi = cpu_to_be16(upper_32_bits(addr));
+ fqd->context_a.context_lo = cpu_to_be32(lower_32_bits(addr));
}
static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
{
- fqd->context_a.hi = cpu_to_be16(upper_32_bits(addr));
+ fqd->context_a.hi = cpu_to_be32(upper_32_bits(addr));
fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
}
@@ -521,7 +525,7 @@ static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
*/
struct qm_cgr_wr_parm {
/* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
- u32 word;
+ __be32 word;
};
/*
* This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
@@ -532,7 +536,7 @@ struct qm_cgr_wr_parm {
*/
struct qm_cgr_cs_thres {
/* _res[13-15], TA[5-12], Tn[0-4] */
- u16 word;
+ __be16 word;
};
/*
* This identical structure of CGR fields is present in the "Init/Modify CGR"
@@ -549,10 +553,10 @@ struct __qm_mc_cgr {
u8 cscn_en; /* boolean, use QM_CGR_EN */
union {
struct {
- u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
- u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+ __be16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */
+ __be16 cscn_targ_dcp_low;
};
- u32 cscn_targ; /* use QM_CGR_TARG_* */
+ __be32 cscn_targ; /* use QM_CGR_TARG_* */
};
u8 cstd_en; /* boolean, use QM_CGR_EN */
u8 cs; /* boolean, only used in query response */
@@ -568,7 +572,9 @@ struct __qm_mc_cgr {
/* Convert CGR thresholds to/from "cs_thres" format */
static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
{
- return ((th->word >> 5) & 0xff) << (th->word & 0x1f);
+ int thres = be16_to_cpu(th->word);
+
+ return ((thres >> 5) & 0xff) << (thres & 0x1f);
}
static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
@@ -584,23 +590,23 @@ static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
if (roundup && oddbit)
val++;
}
- th->word = ((val & 0xff) << 5) | (e & 0x1f);
+ th->word = cpu_to_be16(((val & 0xff) << 5) | (e & 0x1f));
return 0;
}
/* "Initialize FQ" */
struct qm_mcc_initfq {
u8 __reserved1[2];
- u16 we_mask; /* Write Enable Mask */
- u32 fqid; /* 24-bit */
- u16 count; /* Initialises 'count+1' FQDs */
+ __be16 we_mask; /* Write Enable Mask */
+ __be32 fqid; /* 24-bit */
+ __be16 count; /* Initialises 'count+1' FQDs */
struct qm_fqd fqd; /* the FQD fields go here */
u8 __reserved2[30];
} __packed;
/* "Initialize/Modify CGR" */
struct qm_mcc_initcgr {
u8 __reserve1[2];
- u16 we_mask; /* Write Enable Mask */
+ __be16 we_mask; /* Write Enable Mask */
struct __qm_mc_cgr cgr; /* CGR fields */
u8 __reserved2[2];
u8 cgid;
@@ -654,7 +660,7 @@ struct qman_cgr;
/*
* This enum, and the callback type that returns it, are used when handling
* dequeued frames via DQRR. Note that for "null" callbacks registered with the
- * portal object (for handling dequeues that do not demux because contextB is
+ * portal object (for handling dequeues that do not demux because context_b is
* NULL), the return value *MUST* be qman_cb_dqrr_consume.
*/
enum qman_cb_dqrr_result {
@@ -859,11 +865,11 @@ void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
* qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
* pre-existing frame-queues that aren't to be otherwise interfered with, it
* prevents all other modifications to the frame queue. The TO_DCPORTAL flag
- * causes the driver to honour any contextB modifications requested in the
+ * causes the driver to honour any context_b modifications requested in the
* qm_init_fq() API, as this indicates the frame queue will be consumed by a
* direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
- * software portals, the contextB field is controlled by the driver and can't be
- * modified by the caller.
+ * software portals, the context_b field is controlled by the driver and can't
+ * be modified by the caller.
*/
int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 037c321c5618..0c2df7f73792 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -428,25 +428,65 @@ static int locate_mem_hole_callback(u64 start, u64 end, void *arg)
return locate_mem_hole_bottom_up(start, end, kbuf);
}
-/*
- * Helper function for placing a buffer in a kexec segment. This assumes
- * that kexec_mutex is held.
+/**
+ * arch_kexec_walk_mem - call func(data) on free memory regions
+ * @kbuf: Context info for the search. Also passed to @func.
+ * @func: Function to call for each memory region.
+ *
+ * Return: The memory walk will stop when func returns a non-zero value
+ * and that value will be returned. If all free regions are visited without
+ * func returning non-zero, then zero will be returned.
+ */
+int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
+ int (*func)(u64, u64, void *))
+{
+ if (kbuf->image->type == KEXEC_TYPE_CRASH)
+ return walk_iomem_res_desc(crashk_res.desc,
+ IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
+ crashk_res.start, crashk_res.end,
+ kbuf, func);
+ else
+ return walk_system_ram_res(0, ULONG_MAX, kbuf, func);
+}
+
+/**
+ * kexec_locate_mem_hole - find free memory for the purgatory or the next kernel
+ * @kbuf: Parameters for the memory search.
+ *
+ * On success, kbuf->mem will have the start address of the memory region found.
+ *
+ * Return: 0 on success, negative errno on error.
*/
-int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz,
- unsigned long memsz, unsigned long buf_align,
- unsigned long buf_min, unsigned long buf_max,
- bool top_down, unsigned long *load_addr)
+int kexec_locate_mem_hole(struct kexec_buf *kbuf)
+{
+ int ret;
+
+ ret = arch_kexec_walk_mem(kbuf, locate_mem_hole_callback);
+
+ return ret == 1 ? 0 : -EADDRNOTAVAIL;
+}
+
+/**
+ * kexec_add_buffer - place a buffer in a kexec segment
+ * @kbuf: Buffer contents and memory parameters.
+ *
+ * This function assumes that kexec_mutex is held.
+ * On successful return, @kbuf->mem will have the physical address of
+ * the buffer in memory.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int kexec_add_buffer(struct kexec_buf *kbuf)
{
struct kexec_segment *ksegment;
- struct kexec_buf buf, *kbuf;
int ret;
/* Currently adding segment this way is allowed only in file mode */
- if (!image->file_mode)
+ if (!kbuf->image->file_mode)
return -EINVAL;
- if (image->nr_segments >= KEXEC_SEGMENT_MAX)
+ if (kbuf->image->nr_segments >= KEXEC_SEGMENT_MAX)
return -EINVAL;
/*
@@ -456,45 +496,27 @@ int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz,
* logic goes through list of segments to make sure there are
* no destination overlaps.
*/
- if (!list_empty(&image->control_pages)) {
+ if (!list_empty(&kbuf->image->control_pages)) {
WARN_ON(1);
return -EINVAL;
}
- memset(&buf, 0, sizeof(struct kexec_buf));
- kbuf = &buf;
- kbuf->image = image;
- kbuf->buffer = buffer;
- kbuf->bufsz = bufsz;
-
- kbuf->memsz = ALIGN(memsz, PAGE_SIZE);
- kbuf->buf_align = max(buf_align, PAGE_SIZE);
- kbuf->buf_min = buf_min;
- kbuf->buf_max = buf_max;
- kbuf->top_down = top_down;
+ /* Ensure minimum alignment needed for segments. */
+ kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE);
+ kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE);
/* Walk the RAM ranges and allocate a suitable range for the buffer */
- if (image->type == KEXEC_TYPE_CRASH)
- ret = walk_iomem_res_desc(crashk_res.desc,
- IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
- crashk_res.start, crashk_res.end, kbuf,
- locate_mem_hole_callback);
- else
- ret = walk_system_ram_res(0, -1, kbuf,
- locate_mem_hole_callback);
- if (ret != 1) {
- /* A suitable memory range could not be found for buffer */
- return -EADDRNOTAVAIL;
- }
+ ret = kexec_locate_mem_hole(kbuf);
+ if (ret)
+ return ret;
/* Found a suitable memory range */
- ksegment = &image->segment[image->nr_segments];
+ ksegment = &kbuf->image->segment[kbuf->image->nr_segments];
ksegment->kbuf = kbuf->buffer;
ksegment->bufsz = kbuf->bufsz;
ksegment->mem = kbuf->mem;
ksegment->memsz = kbuf->memsz;
- image->nr_segments++;
- *load_addr = ksegment->mem;
+ kbuf->image->nr_segments++;
return 0;
}
@@ -616,13 +638,15 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
unsigned long max, int top_down)
{
struct purgatory_info *pi = &image->purgatory_info;
- unsigned long align, buf_align, bss_align, buf_sz, bss_sz, bss_pad;
- unsigned long memsz, entry, load_addr, curr_load_addr, bss_addr, offset;
+ unsigned long align, bss_align, bss_sz, bss_pad;
+ unsigned long entry, load_addr, curr_load_addr, bss_addr, offset;
unsigned char *buf_addr, *src;
int i, ret = 0, entry_sidx = -1;
const Elf_Shdr *sechdrs_c;
Elf_Shdr *sechdrs = NULL;
- void *purgatory_buf = NULL;
+ struct kexec_buf kbuf = { .image = image, .bufsz = 0, .buf_align = 1,
+ .buf_min = min, .buf_max = max,
+ .top_down = top_down };
/*
* sechdrs_c points to section headers in purgatory and are read
@@ -688,9 +712,7 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
}
/* Determine how much memory is needed to load relocatable object. */
- buf_align = 1;
bss_align = 1;
- buf_sz = 0;
bss_sz = 0;
for (i = 0; i < pi->ehdr->e_shnum; i++) {
@@ -699,10 +721,10 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
align = sechdrs[i].sh_addralign;
if (sechdrs[i].sh_type != SHT_NOBITS) {
- if (buf_align < align)
- buf_align = align;
- buf_sz = ALIGN(buf_sz, align);
- buf_sz += sechdrs[i].sh_size;
+ if (kbuf.buf_align < align)
+ kbuf.buf_align = align;
+ kbuf.bufsz = ALIGN(kbuf.bufsz, align);
+ kbuf.bufsz += sechdrs[i].sh_size;
} else {
/* bss section */
if (bss_align < align)
@@ -714,32 +736,31 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
/* Determine the bss padding required to align bss properly */
bss_pad = 0;
- if (buf_sz & (bss_align - 1))
- bss_pad = bss_align - (buf_sz & (bss_align - 1));
+ if (kbuf.bufsz & (bss_align - 1))
+ bss_pad = bss_align - (kbuf.bufsz & (bss_align - 1));
- memsz = buf_sz + bss_pad + bss_sz;
+ kbuf.memsz = kbuf.bufsz + bss_pad + bss_sz;
/* Allocate buffer for purgatory */
- purgatory_buf = vzalloc(buf_sz);
- if (!purgatory_buf) {
+ kbuf.buffer = vzalloc(kbuf.bufsz);
+ if (!kbuf.buffer) {
ret = -ENOMEM;
goto out;
}
- if (buf_align < bss_align)
- buf_align = bss_align;
+ if (kbuf.buf_align < bss_align)
+ kbuf.buf_align = bss_align;
/* Add buffer to segment list */
- ret = kexec_add_buffer(image, purgatory_buf, buf_sz, memsz,
- buf_align, min, max, top_down,
- &pi->purgatory_load_addr);
+ ret = kexec_add_buffer(&kbuf);
if (ret)
goto out;
+ pi->purgatory_load_addr = kbuf.mem;
/* Load SHF_ALLOC sections */
- buf_addr = purgatory_buf;
+ buf_addr = kbuf.buffer;
load_addr = curr_load_addr = pi->purgatory_load_addr;
- bss_addr = load_addr + buf_sz + bss_pad;
+ bss_addr = load_addr + kbuf.bufsz + bss_pad;
for (i = 0; i < pi->ehdr->e_shnum; i++) {
if (!(sechdrs[i].sh_flags & SHF_ALLOC))
@@ -785,11 +806,11 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min,
* Used later to identify which section is purgatory and skip it
* from checksumming.
*/
- pi->purgatory_buf = purgatory_buf;
+ pi->purgatory_buf = kbuf.buffer;
return ret;
out:
vfree(sechdrs);
- vfree(purgatory_buf);
+ vfree(kbuf.buffer);
return ret;
}
diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h
index 0a52315d9c62..4cef7e4706b0 100644
--- a/kernel/kexec_internal.h
+++ b/kernel/kexec_internal.h
@@ -20,22 +20,6 @@ struct kexec_sha_region {
unsigned long len;
};
-/*
- * Keeps track of buffer parameters as provided by caller for requesting
- * memory placement of buffer.
- */
-struct kexec_buf {
- struct kimage *image;
- char *buffer;
- unsigned long bufsz;
- unsigned long mem;
- unsigned long memsz;
- unsigned long buf_align;
- unsigned long buf_min;
- unsigned long buf_max;
- bool top_down; /* allocate from top of memory hole */
-};
-
void kimage_file_post_load_cleanup(struct kimage *image);
#else /* CONFIG_KEXEC_FILE */
static inline void kimage_file_post_load_cleanup(struct kimage *image) { }
diff --git a/scripts/sortextable.c b/scripts/sortextable.c
index f453b7ce99d6..365a907f98b3 100644
--- a/scripts/sortextable.c
+++ b/scripts/sortextable.c
@@ -316,6 +316,8 @@ do_file(char const *const fname)
case EM_S390:
case EM_AARCH64:
case EM_PARISC:
+ case EM_PPC:
+ case EM_PPC64:
custom_sort = sort_relative_table;
break;
case EM_ARCOMPACT:
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index db54a33f850f..c2c4211ba58b 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -8,7 +8,7 @@ ifeq ($(ARCH),powerpc)
GIT_VERSION = $(shell git describe --always --long --dirty || echo "unknown")
-CFLAGS := -std=gnu99 -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR) $(CFLAGS)
+CFLAGS := -std=gnu99 -Wall -O2 -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CURDIR)/include $(CFLAGS)
export CFLAGS
@@ -26,7 +26,8 @@ SUB_DIRS = alignment \
syscalls \
tm \
vphn \
- math
+ math \
+ ptrace
endif
diff --git a/tools/testing/selftests/powerpc/benchmarks/.gitignore b/tools/testing/selftests/powerpc/benchmarks/.gitignore
index bce49ebd869e..04dc1e6ef2ce 100644
--- a/tools/testing/selftests/powerpc/benchmarks/.gitignore
+++ b/tools/testing/selftests/powerpc/benchmarks/.gitignore
@@ -1,4 +1,5 @@
gettimeofday
context_switch
mmap_bench
-futex_bench \ No newline at end of file
+futex_bench
+null_syscall
diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile
index a9adfb7de78f..545077f98f72 100644
--- a/tools/testing/selftests/powerpc/benchmarks/Makefile
+++ b/tools/testing/selftests/powerpc/benchmarks/Makefile
@@ -1,4 +1,4 @@
-TEST_PROGS := gettimeofday context_switch mmap_bench futex_bench
+TEST_PROGS := gettimeofday context_switch mmap_bench futex_bench null_syscall
CFLAGS += -O2
diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
index a36883ad48a4..778f5fbfd784 100644
--- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c
+++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
@@ -28,7 +28,7 @@
#ifdef __powerpc__
#include <altivec.h>
#endif
-#include "../utils.h"
+#include "utils.h"
static unsigned int timeout = 30;
diff --git a/tools/testing/selftests/powerpc/benchmarks/null_syscall.c b/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
new file mode 100644
index 000000000000..ecc14d68e101
--- /dev/null
+++ b/tools/testing/selftests/powerpc/benchmarks/null_syscall.c
@@ -0,0 +1,157 @@
+/*
+ * Test null syscall performance
+ *
+ * Copyright (C) 2009-2015 Anton Blanchard, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define NR_LOOPS 10000000
+
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <time.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <signal.h>
+
+static volatile int soak_done;
+unsigned long long clock_frequency;
+unsigned long long timebase_frequency;
+double timebase_multiplier;
+
+static inline unsigned long long mftb(void)
+{
+ unsigned long low;
+
+ asm volatile("mftb %0" : "=r" (low));
+
+ return low;
+}
+
+static void sigalrm_handler(int unused)
+{
+ soak_done = 1;
+}
+
+/*
+ * Use a timer instead of busy looping on clock_gettime() so we don't
+ * pollute profiles with glibc and VDSO hits.
+ */
+static void cpu_soak_usecs(unsigned long usecs)
+{
+ struct itimerval val;
+
+ memset(&val, 0, sizeof(val));
+ val.it_value.tv_usec = usecs;
+
+ signal(SIGALRM, sigalrm_handler);
+ setitimer(ITIMER_REAL, &val, NULL);
+
+ while (1) {
+ if (soak_done)
+ break;
+ }
+
+ signal(SIGALRM, SIG_DFL);
+}
+
+/*
+ * This only works with recent kernels where cpufreq modifies
+ * /proc/cpuinfo dynamically.
+ */
+static void get_proc_frequency(void)
+{
+ FILE *f;
+ char line[128];
+ char *p, *end;
+ unsigned long v;
+ double d;
+ char *override;
+
+ /* Try to get out of low power/low frequency mode */
+ cpu_soak_usecs(0.25 * 1000000);
+
+ f = fopen("/proc/cpuinfo", "r");
+ if (f == NULL)
+ return;
+
+ timebase_frequency = 0;
+
+ while (fgets(line, sizeof(line), f) != NULL) {
+ if (strncmp(line, "timebase", 8) == 0) {
+ p = strchr(line, ':');
+ if (p != NULL) {
+ v = strtoull(p + 1, &end, 0);
+ if (end != p + 1)
+ timebase_frequency = v;
+ }
+ }
+
+ if (((strncmp(line, "clock", 5) == 0) ||
+ (strncmp(line, "cpu MHz", 7) == 0))) {
+ p = strchr(line, ':');
+ if (p != NULL) {
+ d = strtod(p + 1, &end);
+ if (end != p + 1) {
+ /* Find fastest clock frequency */
+ if ((d * 1000000ULL) > clock_frequency)
+ clock_frequency = d * 1000000ULL;
+ }
+ }
+ }
+ }
+
+ fclose(f);
+
+ override = getenv("FREQUENCY");
+ if (override)
+ clock_frequency = strtoull(override, NULL, 10);
+
+ if (timebase_frequency)
+ timebase_multiplier = (double)clock_frequency
+ / timebase_frequency;
+ else
+ timebase_multiplier = 1;
+}
+
+static void do_null_syscall(unsigned long nr)
+{
+ unsigned long i;
+
+ for (i = 0; i < nr; i++)
+ getppid();
+}
+
+#define TIME(A, STR) \
+
+int main(void)
+{
+ unsigned long tb_start, tb_now;
+ struct timespec tv_start, tv_now;
+ unsigned long long elapsed_ns, elapsed_tb;
+
+ get_proc_frequency();
+
+ clock_gettime(CLOCK_MONOTONIC, &tv_start);
+ tb_start = mftb();
+
+ do_null_syscall(NR_LOOPS);
+
+ clock_gettime(CLOCK_MONOTONIC, &tv_now);
+ tb_now = mftb();
+
+ elapsed_ns = (tv_now.tv_sec - tv_start.tv_sec) * 1000000000ULL +
+ (tv_now.tv_nsec - tv_start.tv_nsec);
+ elapsed_tb = tb_now - tb_start;
+
+ printf("%10.2f ns %10.2f cycles\n", (float)elapsed_ns / NR_LOOPS,
+ (float)elapsed_tb * timebase_multiplier / NR_LOOPS);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
index 50ae7d2091ce..80d34a9ffff4 100644
--- a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
+++ b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h
@@ -25,6 +25,8 @@
#define PPC_MTOCRF(A, B) mtocrf A, B
+#define EX_TABLE(x, y)
+
FUNC_START(enter_vmx_usercopy)
li r3,1
blr
diff --git a/tools/testing/selftests/powerpc/copyloops/validate.c b/tools/testing/selftests/powerpc/copyloops/validate.c
index 1750ff57ee58..7fb436f82d16 100644
--- a/tools/testing/selftests/powerpc/copyloops/validate.c
+++ b/tools/testing/selftests/powerpc/copyloops/validate.c
@@ -3,7 +3,7 @@
#include <stdlib.h>
#include <stdbool.h>
-#include "../utils.h"
+#include "utils.h"
#define MAX_LEN 8192
#define MAX_OFFSET 16
diff --git a/tools/testing/selftests/powerpc/dscr/dscr.h b/tools/testing/selftests/powerpc/dscr/dscr.h
index a36af1b2c2bb..18ea223bd398 100644
--- a/tools/testing/selftests/powerpc/dscr/dscr.h
+++ b/tools/testing/selftests/powerpc/dscr/dscr.h
@@ -28,8 +28,6 @@
#include "utils.h"
-#define SPRN_DSCR 0x11 /* Privilege state SPR */
-#define SPRN_DSCR_USR 0x03 /* Problem state SPR */
#define THREADS 100 /* Max threads */
#define COUNT 100 /* Max iterations */
#define DSCR_MAX 16 /* Max DSCR value */
@@ -48,14 +46,14 @@ inline unsigned long get_dscr(void)
{
unsigned long ret;
- asm volatile("mfspr %0,%1" : "=r" (ret): "i" (SPRN_DSCR));
+ asm volatile("mfspr %0,%1" : "=r" (ret) : "i" (SPRN_DSCR_PRIV));
return ret;
}
inline void set_dscr(unsigned long val)
{
- asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
+ asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR_PRIV));
}
/* Problem state DSCR access */
@@ -63,14 +61,14 @@ inline unsigned long get_dscr_usr(void)
{
unsigned long ret;
- asm volatile("mfspr %0,%1" : "=r" (ret): "i" (SPRN_DSCR_USR));
+ asm volatile("mfspr %0,%1" : "=r" (ret) : "i" (SPRN_DSCR));
return ret;
}
inline void set_dscr_usr(unsigned long val)
{
- asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR_USR));
+ asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
}
/* Default DSCR access */
diff --git a/tools/testing/selftests/powerpc/basic_asm.h b/tools/testing/selftests/powerpc/include/basic_asm.h
index 3349a0704d1a..12eaddf72e66 100644
--- a/tools/testing/selftests/powerpc/basic_asm.h
+++ b/tools/testing/selftests/powerpc/include/basic_asm.h
@@ -4,12 +4,12 @@
#include <ppc-asm.h>
#include <asm/unistd.h>
-#define LOAD_REG_IMMEDIATE(reg,expr) \
- lis reg,(expr)@highest; \
- ori reg,reg,(expr)@higher; \
- rldicr reg,reg,32,31; \
- oris reg,reg,(expr)@high; \
- ori reg,reg,(expr)@l;
+#define LOAD_REG_IMMEDIATE(reg, expr) \
+ lis reg, (expr)@highest; \
+ ori reg, reg, (expr)@higher; \
+ rldicr reg, reg, 32, 31; \
+ oris reg, reg, (expr)@high; \
+ ori reg, reg, (expr)@l;
/*
* Note: These macros assume that variables being stored on the stack are
@@ -20,7 +20,8 @@
#define STACK_FRAME_MIN_SIZE 32
#define STACK_FRAME_TOC_POS 24
#define __STACK_FRAME_PARAM(_param) (32 + ((_param)*8))
-#define __STACK_FRAME_LOCAL(_num_params,_var_num) ((STACK_FRAME_PARAM(_num_params)) + ((_var_num)*8))
+#define __STACK_FRAME_LOCAL(_num_params, _var_num) \
+ ((STACK_FRAME_PARAM(_num_params)) + ((_var_num)*8))
#else
#define STACK_FRAME_MIN_SIZE 112
#define STACK_FRAME_TOC_POS 40
@@ -30,14 +31,16 @@
* Caveat: if a function passed more than 8 doublewords, the caller will have
* made more space... which would render the 112 incorrect.
*/
-#define __STACK_FRAME_LOCAL(_num_params,_var_num) (112 + ((_var_num)*8))
+#define __STACK_FRAME_LOCAL(_num_params, _var_num) \
+ (112 + ((_var_num)*8))
#endif
/* Parameter x saved to the stack */
#define STACK_FRAME_PARAM(var) __STACK_FRAME_PARAM(var)
/* Local variable x saved to the stack after x parameters */
-#define STACK_FRAME_LOCAL(num_params,var) __STACK_FRAME_LOCAL(num_params,var)
+#define STACK_FRAME_LOCAL(num_params, var) \
+ __STACK_FRAME_LOCAL(num_params, var)
#define STACK_FRAME_LR_POS 16
#define STACK_FRAME_CR_POS 8
@@ -53,18 +56,18 @@
*/
#define PUSH_BASIC_STACK(_extra) \
mflr r0; \
- std r0,STACK_FRAME_LR_POS(%r1); \
- stdu %r1,-(_extra + STACK_FRAME_MIN_SIZE)(%r1); \
+ std r0, STACK_FRAME_LR_POS(%r1); \
+ stdu %r1, -(_extra + STACK_FRAME_MIN_SIZE)(%r1); \
mfcr r0; \
- stw r0,STACK_FRAME_CR_POS(%r1); \
- std %r2,STACK_FRAME_TOC_POS(%r1);
+ stw r0, STACK_FRAME_CR_POS(%r1); \
+ std %r2, STACK_FRAME_TOC_POS(%r1);
#define POP_BASIC_STACK(_extra) \
- ld %r2,STACK_FRAME_TOC_POS(%r1); \
- lwz r0,STACK_FRAME_CR_POS(%r1); \
+ ld %r2, STACK_FRAME_TOC_POS(%r1); \
+ lwz r0, STACK_FRAME_CR_POS(%r1); \
mtcr r0; \
- addi %r1,%r1,(_extra + STACK_FRAME_MIN_SIZE); \
- ld r0,STACK_FRAME_LR_POS(%r1); \
+ addi %r1, %r1, (_extra + STACK_FRAME_MIN_SIZE); \
+ ld r0, STACK_FRAME_LR_POS(%r1); \
mtlr r0;
#endif /* _SELFTESTS_POWERPC_BASIC_ASM_H */
diff --git a/tools/testing/selftests/powerpc/fpu_asm.h b/tools/testing/selftests/powerpc/include/fpu_asm.h
index 6a387d255e27..6a387d255e27 100644
--- a/tools/testing/selftests/powerpc/fpu_asm.h
+++ b/tools/testing/selftests/powerpc/include/fpu_asm.h
diff --git a/tools/testing/selftests/powerpc/gpr_asm.h b/tools/testing/selftests/powerpc/include/gpr_asm.h
index f6f38852d3a0..f6f38852d3a0 100644
--- a/tools/testing/selftests/powerpc/gpr_asm.h
+++ b/tools/testing/selftests/powerpc/include/gpr_asm.h
diff --git a/tools/testing/selftests/powerpc/instructions.h b/tools/testing/selftests/powerpc/include/instructions.h
index 0fb0bd3b28c9..0fb0bd3b28c9 100644
--- a/tools/testing/selftests/powerpc/instructions.h
+++ b/tools/testing/selftests/powerpc/include/instructions.h
diff --git a/tools/testing/selftests/powerpc/include/reg.h b/tools/testing/selftests/powerpc/include/reg.h
new file mode 100644
index 000000000000..4afdebcce4cd
--- /dev/null
+++ b/tools/testing/selftests/powerpc/include/reg.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2014, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#ifndef _SELFTESTS_POWERPC_REG_H
+#define _SELFTESTS_POWERPC_REG_H
+
+#define __stringify_1(x) #x
+#define __stringify(x) __stringify_1(x)
+
+#define mfspr(rn) ({unsigned long rval; \
+ asm volatile("mfspr %0," _str(rn) \
+ : "=r" (rval)); rval; })
+#define mtspr(rn, v) asm volatile("mtspr " _str(rn) ",%0" : \
+ : "r" ((unsigned long)(v)) \
+ : "memory")
+
+#define mb() asm volatile("sync" : : : "memory");
+
+#define SPRN_MMCR2 769
+#define SPRN_MMCRA 770
+#define SPRN_MMCR0 779
+#define MMCR0_PMAO 0x00000080
+#define MMCR0_PMAE 0x04000000
+#define MMCR0_FC 0x80000000
+#define SPRN_EBBHR 804
+#define SPRN_EBBRR 805
+#define SPRN_BESCR 806 /* Branch event status & control register */
+#define SPRN_BESCRS 800 /* Branch event status & control set (1 bits set to 1) */
+#define SPRN_BESCRSU 801 /* Branch event status & control set upper */
+#define SPRN_BESCRR 802 /* Branch event status & control REset (1 bits set to 0) */
+#define SPRN_BESCRRU 803 /* Branch event status & control REset upper */
+
+#define BESCR_PMEO 0x1 /* PMU Event-based exception Occurred */
+#define BESCR_PME (0x1ul << 32) /* PMU Event-based exception Enable */
+
+#define SPRN_PMC1 771
+#define SPRN_PMC2 772
+#define SPRN_PMC3 773
+#define SPRN_PMC4 774
+#define SPRN_PMC5 775
+#define SPRN_PMC6 776
+
+#define SPRN_SIAR 780
+#define SPRN_SDAR 781
+#define SPRN_SIER 768
+
+#define SPRN_TEXASR 0x82 /* Transaction Exception and Status Register */
+#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
+#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
+#define SPRN_TAR 0x32f /* Target Address Register */
+
+#define SPRN_DSCR_PRIV 0x11 /* Privilege State DSCR */
+#define SPRN_DSCR 0x03 /* Data Stream Control Register */
+#define SPRN_PPR 896 /* Program Priority Register */
+
+/* TEXASR register bits */
+#define TEXASR_FC 0xFE00000000000000
+#define TEXASR_FP 0x0100000000000000
+#define TEXASR_DA 0x0080000000000000
+#define TEXASR_NO 0x0040000000000000
+#define TEXASR_FO 0x0020000000000000
+#define TEXASR_SIC 0x0010000000000000
+#define TEXASR_NTC 0x0008000000000000
+#define TEXASR_TC 0x0004000000000000
+#define TEXASR_TIC 0x0002000000000000
+#define TEXASR_IC 0x0001000000000000
+#define TEXASR_IFC 0x0000800000000000
+#define TEXASR_ABT 0x0000000100000000
+#define TEXASR_SPD 0x0000000080000000
+#define TEXASR_HV 0x0000000020000000
+#define TEXASR_PR 0x0000000010000000
+#define TEXASR_FS 0x0000000008000000
+#define TEXASR_TE 0x0000000004000000
+#define TEXASR_ROT 0x0000000002000000
+
+/* Vector Instructions */
+#define VSX_XX1(xs, ra, rb) (((xs) & 0x1f) << 21 | ((ra) << 16) | \
+ ((rb) << 11) | (((xs) >> 5)))
+#define STXVD2X(xs, ra, rb) .long (0x7c000798 | VSX_XX1((xs), (ra), (rb)))
+#define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb)))
+
+#define ASM_LOAD_GPR_IMMED(_asm_symbol_name_immed) \
+ "li 14, %[" #_asm_symbol_name_immed "];" \
+ "li 15, %[" #_asm_symbol_name_immed "];" \
+ "li 16, %[" #_asm_symbol_name_immed "];" \
+ "li 17, %[" #_asm_symbol_name_immed "];" \
+ "li 18, %[" #_asm_symbol_name_immed "];" \
+ "li 19, %[" #_asm_symbol_name_immed "];" \
+ "li 20, %[" #_asm_symbol_name_immed "];" \
+ "li 21, %[" #_asm_symbol_name_immed "];" \
+ "li 22, %[" #_asm_symbol_name_immed "];" \
+ "li 23, %[" #_asm_symbol_name_immed "];" \
+ "li 24, %[" #_asm_symbol_name_immed "];" \
+ "li 25, %[" #_asm_symbol_name_immed "];" \
+ "li 26, %[" #_asm_symbol_name_immed "];" \
+ "li 27, %[" #_asm_symbol_name_immed "];" \
+ "li 28, %[" #_asm_symbol_name_immed "];" \
+ "li 29, %[" #_asm_symbol_name_immed "];" \
+ "li 30, %[" #_asm_symbol_name_immed "];" \
+ "li 31, %[" #_asm_symbol_name_immed "];"
+
+#define ASM_LOAD_FPR_SINGLE_PRECISION(_asm_symbol_name_addr) \
+ "lfs 0, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 1, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 2, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 3, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 4, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 5, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 6, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 7, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 8, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 9, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 10, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 11, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 12, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 13, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 14, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 15, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 16, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 17, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 18, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 19, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 20, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 21, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 22, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 23, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 24, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 25, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 26, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 27, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 28, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 29, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 30, 0(%[" #_asm_symbol_name_addr "]);" \
+ "lfs 31, 0(%[" #_asm_symbol_name_addr "]);"
+
+#ifndef __ASSEMBLER__
+void store_gpr(unsigned long *addr);
+void load_gpr(unsigned long *addr);
+void load_fpr_single_precision(float *addr);
+void store_fpr_single_precision(float *addr);
+#endif /* end of __ASSEMBLER__ */
+
+#endif /* _SELFTESTS_POWERPC_REG_H */
diff --git a/tools/testing/selftests/powerpc/subunit.h b/tools/testing/selftests/powerpc/include/subunit.h
index 9c6c4e901ab6..9c6c4e901ab6 100644
--- a/tools/testing/selftests/powerpc/subunit.h
+++ b/tools/testing/selftests/powerpc/include/subunit.h
diff --git a/tools/testing/selftests/powerpc/utils.h b/tools/testing/selftests/powerpc/include/utils.h
index 53405e8a52ab..53405e8a52ab 100644
--- a/tools/testing/selftests/powerpc/utils.h
+++ b/tools/testing/selftests/powerpc/include/utils.h
diff --git a/tools/testing/selftests/powerpc/vmx_asm.h b/tools/testing/selftests/powerpc/include/vmx_asm.h
index 2eaaeca9cf1d..2eaaeca9cf1d 100644
--- a/tools/testing/selftests/powerpc/vmx_asm.h
+++ b/tools/testing/selftests/powerpc/include/vmx_asm.h
diff --git a/tools/testing/selftests/powerpc/vsx_asm.h b/tools/testing/selftests/powerpc/include/vsx_asm.h
index d828bfb6ef2d..d828bfb6ef2d 100644
--- a/tools/testing/selftests/powerpc/vsx_asm.h
+++ b/tools/testing/selftests/powerpc/include/vsx_asm.h
diff --git a/tools/testing/selftests/powerpc/lib/reg.S b/tools/testing/selftests/powerpc/lib/reg.S
new file mode 100644
index 000000000000..0dc44f0da065
--- /dev/null
+++ b/tools/testing/selftests/powerpc/lib/reg.S
@@ -0,0 +1,397 @@
+/*
+ * test helper assembly functions
+ *
+ * Copyright (C) 2016 Simon Guo, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <ppc-asm.h>
+#include "reg.h"
+
+
+/* Non volatile GPR - unsigned long buf[18] */
+FUNC_START(load_gpr)
+ ld 14, 0*8(3)
+ ld 15, 1*8(3)
+ ld 16, 2*8(3)
+ ld 17, 3*8(3)
+ ld 18, 4*8(3)
+ ld 19, 5*8(3)
+ ld 20, 6*8(3)
+ ld 21, 7*8(3)
+ ld 22, 8*8(3)
+ ld 23, 9*8(3)
+ ld 24, 10*8(3)
+ ld 25, 11*8(3)
+ ld 26, 12*8(3)
+ ld 27, 13*8(3)
+ ld 28, 14*8(3)
+ ld 29, 15*8(3)
+ ld 30, 16*8(3)
+ ld 31, 17*8(3)
+ blr
+FUNC_END(load_gpr)
+
+FUNC_START(store_gpr)
+ std 14, 0*8(3)
+ std 15, 1*8(3)
+ std 16, 2*8(3)
+ std 17, 3*8(3)
+ std 18, 4*8(3)
+ std 19, 5*8(3)
+ std 20, 6*8(3)
+ std 21, 7*8(3)
+ std 22, 8*8(3)
+ std 23, 9*8(3)
+ std 24, 10*8(3)
+ std 25, 11*8(3)
+ std 26, 12*8(3)
+ std 27, 13*8(3)
+ std 28, 14*8(3)
+ std 29, 15*8(3)
+ std 30, 16*8(3)
+ std 31, 17*8(3)
+ blr
+FUNC_END(store_gpr)
+
+/* Single Precision Float - float buf[32] */
+FUNC_START(load_fpr_single_precision)
+ lfs 0, 0*4(3)
+ lfs 1, 1*4(3)
+ lfs 2, 2*4(3)
+ lfs 3, 3*4(3)
+ lfs 4, 4*4(3)
+ lfs 5, 5*4(3)
+ lfs 6, 6*4(3)
+ lfs 7, 7*4(3)
+ lfs 8, 8*4(3)
+ lfs 9, 9*4(3)
+ lfs 10, 10*4(3)
+ lfs 11, 11*4(3)
+ lfs 12, 12*4(3)
+ lfs 13, 13*4(3)
+ lfs 14, 14*4(3)
+ lfs 15, 15*4(3)
+ lfs 16, 16*4(3)
+ lfs 17, 17*4(3)
+ lfs 18, 18*4(3)
+ lfs 19, 19*4(3)
+ lfs 20, 20*4(3)
+ lfs 21, 21*4(3)
+ lfs 22, 22*4(3)
+ lfs 23, 23*4(3)
+ lfs 24, 24*4(3)
+ lfs 25, 25*4(3)
+ lfs 26, 26*4(3)
+ lfs 27, 27*4(3)
+ lfs 28, 28*4(3)
+ lfs 29, 29*4(3)
+ lfs 30, 30*4(3)
+ lfs 31, 31*4(3)
+ blr
+FUNC_END(load_fpr_single_precision)
+
+/* Single Precision Float - float buf[32] */
+FUNC_START(store_fpr_single_precision)
+ stfs 0, 0*4(3)
+ stfs 1, 1*4(3)
+ stfs 2, 2*4(3)
+ stfs 3, 3*4(3)
+ stfs 4, 4*4(3)
+ stfs 5, 5*4(3)
+ stfs 6, 6*4(3)
+ stfs 7, 7*4(3)
+ stfs 8, 8*4(3)
+ stfs 9, 9*4(3)
+ stfs 10, 10*4(3)
+ stfs 11, 11*4(3)
+ stfs 12, 12*4(3)
+ stfs 13, 13*4(3)
+ stfs 14, 14*4(3)
+ stfs 15, 15*4(3)
+ stfs 16, 16*4(3)
+ stfs 17, 17*4(3)
+ stfs 18, 18*4(3)
+ stfs 19, 19*4(3)
+ stfs 20, 20*4(3)
+ stfs 21, 21*4(3)
+ stfs 22, 22*4(3)
+ stfs 23, 23*4(3)
+ stfs 24, 24*4(3)
+ stfs 25, 25*4(3)
+ stfs 26, 26*4(3)
+ stfs 27, 27*4(3)
+ stfs 28, 28*4(3)
+ stfs 29, 29*4(3)
+ stfs 30, 30*4(3)
+ stfs 31, 31*4(3)
+ blr
+FUNC_END(store_fpr_single_precision)
+
+/* VMX/VSX registers - unsigned long buf[128] */
+FUNC_START(loadvsx)
+ lis 4, 0
+ LXVD2X (0,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (1,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (2,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (3,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (4,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (5,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (6,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (7,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (8,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (9,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (10,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (11,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (12,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (13,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (14,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (15,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (16,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (17,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (18,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (19,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (20,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (21,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (22,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (23,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (24,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (25,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (26,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (27,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (28,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (29,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (30,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (31,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (32,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (33,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (34,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (35,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (36,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (37,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (38,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (39,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (40,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (41,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (42,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (43,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (44,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (45,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (46,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (47,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (48,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (49,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (50,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (51,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (52,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (53,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (54,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (55,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (56,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (57,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (58,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (59,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (60,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (61,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (62,(4),(3))
+ addi 4, 4, 16
+ LXVD2X (63,(4),(3))
+ blr
+FUNC_END(loadvsx)
+
+FUNC_START(storevsx)
+ lis 4, 0
+ STXVD2X (0,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (1,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (2,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (3,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (4,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (5,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (6,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (7,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (8,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (9,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (10,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (11,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (12,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (13,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (14,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (15,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (16,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (17,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (18,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (19,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (20,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (21,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (22,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (23,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (24,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (25,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (26,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (27,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (28,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (29,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (30,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (31,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (32,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (33,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (34,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (35,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (36,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (37,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (38,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (39,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (40,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (41,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (42,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (43,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (44,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (45,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (46,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (47,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (48,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (49,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (50,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (51,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (52,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (53,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (54,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (55,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (56,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (57,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (58,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (59,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (60,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (61,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (62,(4),(3))
+ addi 4, 4, 16
+ STXVD2X (63,(4),(3))
+ blr
+FUNC_END(storevsx)
diff --git a/tools/testing/selftests/powerpc/math/fpu_asm.S b/tools/testing/selftests/powerpc/math/fpu_asm.S
index 241f067a510f..8a04bb117b69 100644
--- a/tools/testing/selftests/powerpc/math/fpu_asm.S
+++ b/tools/testing/selftests/powerpc/math/fpu_asm.S
@@ -7,8 +7,8 @@
* 2 of the License, or (at your option) any later version.
*/
-#include "../basic_asm.h"
-#include "../fpu_asm.h"
+#include "basic_asm.h"
+#include "fpu_asm.h"
FUNC_START(check_fpu)
mr r4,r3
diff --git a/tools/testing/selftests/powerpc/math/vmx_asm.S b/tools/testing/selftests/powerpc/math/vmx_asm.S
index fd74da488625..cb1e5ae1be99 100644
--- a/tools/testing/selftests/powerpc/math/vmx_asm.S
+++ b/tools/testing/selftests/powerpc/math/vmx_asm.S
@@ -7,8 +7,8 @@
* 2 of the License, or (at your option) any later version.
*/
-#include "../basic_asm.h"
-#include "../vmx_asm.h"
+#include "basic_asm.h"
+#include "vmx_asm.h"
# Should be safe from C, only touches r4, r5 and v0,v1,v2
FUNC_START(check_vmx)
diff --git a/tools/testing/selftests/powerpc/math/vsx_asm.S b/tools/testing/selftests/powerpc/math/vsx_asm.S
index a110dd882d5e..8f431f6abc49 100644
--- a/tools/testing/selftests/powerpc/math/vsx_asm.S
+++ b/tools/testing/selftests/powerpc/math/vsx_asm.S
@@ -7,8 +7,8 @@
* 2 of the License, or (at your option) any later version.
*/
-#include "../basic_asm.h"
-#include "../vsx_asm.h"
+#include "basic_asm.h"
+#include "vsx_asm.h"
#long check_vsx(vector int *r3);
#This function wraps storeing VSX regs to the end of an array and a
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
index 44b7df14a936..42bddbed8b64 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
+++ b/tools/testing/selftests/powerpc/pmu/ebb/.gitignore
@@ -20,5 +20,3 @@ back_to_back_ebbs_test
lost_exception_test
no_handler_test
cycles_with_mmcr2_test
-ebb_lmr
-ebb_lmr_regs \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index 6b0453e60d53..8d2279c4bb4b 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -14,7 +14,7 @@ TEST_PROGS := reg_access_test event_attributes_test cycles_test \
fork_cleanup_test ebb_on_child_test \
ebb_on_willing_child_test back_to_back_ebbs_test \
lost_exception_test no_handler_test \
- cycles_with_mmcr2_test ebb_lmr ebb_lmr_regs
+ cycles_with_mmcr2_test
all: $(TEST_PROGS)
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
deleted file mode 100644
index c47ebd55ba4d..000000000000
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright 2016, Jack Miller, IBM Corp.
- * Licensed under GPLv2.
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "ebb.h"
-#include "ebb_lmr.h"
-
-#define SIZE (32 * 1024 * 1024) /* 32M */
-#define LM_SIZE 0 /* Smallest encoding, 32M */
-
-#define SECTIONS 64 /* 1 per bit in LMSER */
-#define SECTION_SIZE (SIZE / SECTIONS)
-#define SECTION_LONGS (SECTION_SIZE / sizeof(long))
-
-static unsigned long *test_mem;
-
-static int lmr_count = 0;
-
-void ebb_lmr_handler(void)
-{
- lmr_count++;
-}
-
-void ldmx_full_section(unsigned long *mem, int section)
-{
- unsigned long *ptr;
- int i;
-
- for (i = 0; i < SECTION_LONGS; i++) {
- ptr = &mem[(SECTION_LONGS * section) + i];
- ldmx((unsigned long) &ptr);
- ebb_lmr_reset();
- }
-}
-
-unsigned long section_masks[] = {
- 0x8000000000000000,
- 0xFF00000000000000,
- 0x0000000F70000000,
- 0x8000000000000001,
- 0xF0F0F0F0F0F0F0F0,
- 0x0F0F0F0F0F0F0F0F,
- 0x0
-};
-
-int ebb_lmr_section_test(unsigned long *mem)
-{
- unsigned long *mask = section_masks;
- int i;
-
- for (; *mask; mask++) {
- mtspr(SPRN_LMSER, *mask);
- printf("Testing mask 0x%016lx\n", mfspr(SPRN_LMSER));
-
- for (i = 0; i < 64; i++) {
- lmr_count = 0;
- ldmx_full_section(mem, i);
- if (*mask & (1UL << (63 - i)))
- FAIL_IF(lmr_count != SECTION_LONGS);
- else
- FAIL_IF(lmr_count);
- }
- }
-
- return 0;
-}
-
-int ebb_lmr(void)
-{
- int i;
-
- SKIP_IF(!lmr_is_supported());
-
- setup_ebb_handler(ebb_lmr_handler);
-
- ebb_global_enable();
-
- FAIL_IF(posix_memalign((void **)&test_mem, SIZE, SIZE) != 0);
-
- mtspr(SPRN_LMSER, 0);
-
- FAIL_IF(mfspr(SPRN_LMSER) != 0);
-
- mtspr(SPRN_LMRR, ((unsigned long)test_mem | LM_SIZE));
-
- FAIL_IF(mfspr(SPRN_LMRR) != ((unsigned long)test_mem | LM_SIZE));
-
- /* Read every single byte to ensure we get no false positives */
- for (i = 0; i < SECTIONS; i++)
- ldmx_full_section(test_mem, i);
-
- FAIL_IF(lmr_count != 0);
-
- /* Turn on the first section */
-
- mtspr(SPRN_LMSER, (1UL << 63));
- FAIL_IF(mfspr(SPRN_LMSER) != (1UL << 63));
-
- /* Enable LM (BESCR) */
-
- mtspr(SPRN_BESCR, mfspr(SPRN_BESCR) | BESCR_LME);
- FAIL_IF(!(mfspr(SPRN_BESCR) & BESCR_LME));
-
- ldmx((unsigned long)&test_mem);
-
- FAIL_IF(lmr_count != 1); // exactly one exception
- FAIL_IF(mfspr(SPRN_BESCR) & BESCR_LME); // LM now disabled
- FAIL_IF(!(mfspr(SPRN_BESCR) & BESCR_LMEO)); // occurred bit set
-
- printf("Simple LMR EBB OK\n");
-
- /* This shouldn't cause an EBB since it's been disabled */
- ldmx((unsigned long)&test_mem);
- FAIL_IF(lmr_count != 1);
-
- printf("LMR disable on EBB OK\n");
-
- ebb_lmr_reset();
-
- /* This should cause an EBB or reset is broken */
- ldmx((unsigned long)&test_mem);
- FAIL_IF(lmr_count != 2);
-
- printf("LMR reset EBB OK\n");
-
- ebb_lmr_reset();
-
- return ebb_lmr_section_test(test_mem);
-}
-
-int main(void)
-{
- int ret = test_harness(ebb_lmr, "ebb_lmr");
-
- if (test_mem)
- free(test_mem);
-
- return ret;
-}
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
deleted file mode 100644
index ef50abd557cd..000000000000
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef _SELFTESTS_POWERPC_PMU_EBB_LMR_H
-#define _SELFTESTS_POWERPC_PMU_EBB_LMR_H
-
-#include "reg.h"
-
-#ifndef PPC_FEATURE2_ARCH_3_00
-#define PPC_FEATURE2_ARCH_3_00 0x00800000
-#endif
-
-#define lmr_is_supported() have_hwcap2(PPC_FEATURE2_ARCH_3_00)
-
-static inline void ebb_lmr_reset(void)
-{
- unsigned long bescr = mfspr(SPRN_BESCR);
- bescr &= ~(BESCR_LMEO);
- bescr |= BESCR_LME;
- mtspr(SPRN_BESCR, bescr);
-}
-
-#define LDMX(t, a, b)\
- (0x7c00026a | \
- (((t) & 0x1f) << 21) | \
- (((a) & 0x1f) << 16) | \
- (((b) & 0x1f) << 11))
-
-static inline unsigned long ldmx(unsigned long address)
-{
- unsigned long ret;
-
- asm volatile ("mr 9, %1\r\n"
- ".long " __stringify(LDMX(9, 0, 9)) "\r\n"
- "mr %0, 9\r\n":"=r"(ret)
- :"r"(address)
- :"r9");
-
- return ret;
-}
-
-#endif
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
deleted file mode 100644
index aff4241fd88a..000000000000
--- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_lmr_regs.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2016, Jack Miller, IBM Corp.
- * Licensed under GPLv2.
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <unistd.h>
-
-#include "ebb.h"
-#include "ebb_lmr.h"
-
-#define CHECKS 10000
-
-int ebb_lmr_regs(void)
-{
- int i;
-
- SKIP_IF(!lmr_is_supported());
-
- ebb_global_enable();
-
- for (i = 0; i < CHECKS; i++) {
- mtspr(SPRN_LMRR, i << 25); // skip size and rsvd bits
- mtspr(SPRN_LMSER, i);
-
- FAIL_IF(mfspr(SPRN_LMRR) != (i << 25));
- FAIL_IF(mfspr(SPRN_LMSER) != i);
- }
-
- return 0;
-}
-
-int main(void)
-{
- return test_harness(ebb_lmr_regs, "ebb_lmr_regs");
-}
diff --git a/tools/testing/selftests/powerpc/pmu/lib.c b/tools/testing/selftests/powerpc/pmu/lib.c
index 8b992fa5b478..5bf5dd40822b 100644
--- a/tools/testing/selftests/powerpc/pmu/lib.c
+++ b/tools/testing/selftests/powerpc/pmu/lib.c
@@ -193,9 +193,9 @@ bool require_paranoia_below(int level)
long current;
char *end, buf[16];
FILE *f;
- int rc;
+ bool rc;
- rc = -1;
+ rc = false;
f = fopen(PARANOID_PATH, "r");
if (!f) {
@@ -218,7 +218,7 @@ bool require_paranoia_below(int level)
if (current >= level)
goto out_close;
- rc = 0;
+ rc = true;
out_close:
fclose(f);
out:
diff --git a/tools/testing/selftests/powerpc/primitives/asm/firmware.h b/tools/testing/selftests/powerpc/primitives/asm/firmware.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/asm/firmware.h
diff --git a/tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h b/tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h
new file mode 120000
index 000000000000..66c8193224e9
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/asm/ppc_asm.h
@@ -0,0 +1 @@
+../../../../../../arch/powerpc/include/asm/ppc_asm.h \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/primitives/asm/processor.h b/tools/testing/selftests/powerpc/primitives/asm/processor.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/asm/processor.h
diff --git a/tools/testing/selftests/powerpc/primitives/linux/stringify.h b/tools/testing/selftests/powerpc/primitives/linux/stringify.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/testing/selftests/powerpc/primitives/linux/stringify.h
diff --git a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
index 6cae06117b55..ed3239bbfae2 100644
--- a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
+++ b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
@@ -73,20 +73,23 @@ extern char __stop___ex_table[];
#error implement UCONTEXT_NIA
#endif
-static int segv_error;
+struct extbl_entry {
+ int insn;
+ int fixup;
+};
static void segv_handler(int signr, siginfo_t *info, void *ptr)
{
ucontext_t *uc = (ucontext_t *)ptr;
unsigned long addr = (unsigned long)info->si_addr;
unsigned long *ip = &UCONTEXT_NIA(uc);
- unsigned long *ex_p = (unsigned long *)__start___ex_table;
+ struct extbl_entry *entry = (struct extbl_entry *)__start___ex_table;
- while (ex_p < (unsigned long *)__stop___ex_table) {
+ while (entry < (struct extbl_entry *)__stop___ex_table) {
unsigned long insn, fixup;
- insn = *ex_p++;
- fixup = *ex_p++;
+ insn = (unsigned long)&entry->insn + entry->insn;
+ fixup = (unsigned long)&entry->fixup + entry->fixup;
if (insn == *ip) {
*ip = fixup;
@@ -95,7 +98,7 @@ static void segv_handler(int signr, siginfo_t *info, void *ptr)
}
printf("No exception table match for NIA %lx ADDR %lx\n", *ip, addr);
- segv_error++;
+ abort();
}
static void setup_segv_handler(void)
@@ -119,8 +122,10 @@ static int do_one_test(char *p, int page_offset)
got = load_unaligned_zeropad(p);
- if (should != got)
+ if (should != got) {
printf("offset %u load_unaligned_zeropad returned 0x%lx, should be 0x%lx\n", page_offset, got, should);
+ return 1;
+ }
return 0;
}
@@ -145,8 +150,6 @@ static int test_body(void)
for (i = 0; i < page_size; i++)
FAIL_IF(do_one_test(mem_region+i, i));
- FAIL_IF(segv_error);
-
return 0;
}
diff --git a/tools/testing/selftests/powerpc/ptrace/.gitignore b/tools/testing/selftests/powerpc/ptrace/.gitignore
new file mode 100644
index 000000000000..349acfafc95b
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/.gitignore
@@ -0,0 +1,10 @@
+ptrace-gpr
+ptrace-tm-gpr
+ptrace-tm-spd-gpr
+ptrace-tar
+ptrace-tm-tar
+ptrace-tm-spd-tar
+ptrace-vsx
+ptrace-tm-vsx
+ptrace-tm-spd-vsx
+ptrace-tm-spr
diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile
new file mode 100644
index 000000000000..fe6bc60dfc60
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -0,0 +1,14 @@
+TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
+ ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
+ ptrace-tm-spd-vsx ptrace-tm-spr
+
+include ../../lib.mk
+
+all: $(TEST_PROGS)
+
+CFLAGS += -m64 -I../../../../../usr/include -I../tm -mhtm
+
+$(TEST_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h
+
+clean:
+ rm -f $(TEST_PROGS) *.o
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
new file mode 100644
index 000000000000..0b4ebcc2f485
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
@@ -0,0 +1,123 @@
+/*
+ * Ptrace test for GPR/FPR registers
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-gpr.h"
+#include "reg.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+int *cptr, *pptr;
+
+float a = FPR_1;
+float b = FPR_2;
+float c = FPR_3;
+
+void gpr(void)
+{
+ unsigned long gpr_buf[18];
+ float fpr_buf[32];
+
+ cptr = (int *)shmat(shm_id, NULL, 0);
+
+ asm __volatile__(
+ ASM_LOAD_GPR_IMMED(gpr_1)
+ ASM_LOAD_FPR_SINGLE_PRECISION(flt_1)
+ :
+ : [gpr_1]"i"(GPR_1), [flt_1] "r" (&a)
+ : "memory", "r6", "r7", "r8", "r9", "r10",
+ "r11", "r12", "r13", "r14", "r15", "r16", "r17",
+ "r18", "r19", "r20", "r21", "r22", "r23", "r24",
+ "r25", "r26", "r27", "r28", "r29", "r30", "r31"
+ );
+
+ cptr[1] = 1;
+
+ while (!cptr[0])
+ asm volatile("" : : : "memory");
+
+ shmdt((void *)cptr);
+ store_gpr(gpr_buf);
+ store_fpr_single_precision(fpr_buf);
+
+ if (validate_gpr(gpr_buf, GPR_3))
+ exit(1);
+
+ if (validate_fpr_float(fpr_buf, c))
+ exit(1);
+
+ exit(0);
+}
+
+int trace_gpr(pid_t child)
+{
+ unsigned long gpr[18];
+ unsigned long fpr[32];
+
+ FAIL_IF(start_trace(child));
+ FAIL_IF(show_gpr(child, gpr));
+ FAIL_IF(validate_gpr(gpr, GPR_1));
+ FAIL_IF(show_fpr(child, fpr));
+ FAIL_IF(validate_fpr(fpr, FPR_1_REP));
+ FAIL_IF(write_gpr(child, GPR_3));
+ FAIL_IF(write_fpr(child, FPR_3_REP));
+ FAIL_IF(stop_trace(child));
+
+ return TEST_PASS;
+}
+
+int ptrace_gpr(void)
+{
+ pid_t pid;
+ int ret, status;
+
+ shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
+ pid = fork();
+ if (pid < 0) {
+ perror("fork() failed");
+ return TEST_FAIL;
+ }
+ if (pid == 0)
+ gpr();
+
+ if (pid) {
+ pptr = (int *)shmat(shm_id, NULL, 0);
+ while (!pptr[1])
+ asm volatile("" : : : "memory");
+
+ ret = trace_gpr(pid);
+ if (ret) {
+ kill(pid, SIGTERM);
+ shmdt((void *)pptr);
+ shmctl(shm_id, IPC_RMID, NULL);
+ return TEST_FAIL;
+ }
+
+ pptr[0] = 1;
+ shmdt((void *)pptr);
+
+ ret = wait(&status);
+ shmctl(shm_id, IPC_RMID, NULL);
+ if (ret != pid) {
+ printf("Child's exit status not captured\n");
+ return TEST_FAIL;
+ }
+
+ return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+ TEST_PASS;
+ }
+
+ return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(ptrace_gpr, "ptrace_gpr");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h
new file mode 100644
index 000000000000..e30fef63824c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#define GPR_1 1
+#define GPR_2 2
+#define GPR_3 3
+#define GPR_4 4
+
+#define FPR_1 0.001
+#define FPR_2 0.002
+#define FPR_3 0.003
+#define FPR_4 0.004
+
+#define FPR_1_REP 0x3f50624de0000000
+#define FPR_2_REP 0x3f60624de0000000
+#define FPR_3_REP 0x3f689374c0000000
+#define FPR_4_REP 0x3f70624de0000000
+
+/* Buffer must have 18 elements */
+int validate_gpr(unsigned long *gpr, unsigned long val)
+{
+ int i, found = 1;
+
+ for (i = 0; i < 18; i++) {
+ if (gpr[i] != val) {
+ printf("GPR[%d]: %lx Expected: %lx\n",
+ i+14, gpr[i], val);
+ found = 0;
+ }
+ }
+
+ if (!found)
+ return TEST_FAIL;
+ return TEST_PASS;
+}
+
+/* Buffer must have 32 elements */
+int validate_fpr(unsigned long *fpr, unsigned long val)
+{
+ int i, found = 1;
+
+ for (i = 0; i < 32; i++) {
+ if (fpr[i] != val) {
+ printf("FPR[%d]: %lx Expected: %lx\n", i, fpr[i], val);
+ found = 0;
+ }
+ }
+
+ if (!found)
+ return TEST_FAIL;
+ return TEST_PASS;
+}
+
+/* Buffer must have 32 elements */
+int validate_fpr_float(float *fpr, float val)
+{
+ int i, found = 1;
+
+ for (i = 0; i < 32; i++) {
+ if (fpr[i] != val) {
+ printf("FPR[%d]: %f Expected: %f\n", i, fpr[i], val);
+ found = 0;
+ }
+ }
+
+ if (!found)
+ return TEST_FAIL;
+ return TEST_PASS;
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c
new file mode 100644
index 000000000000..f9b5069db89b
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.c
@@ -0,0 +1,135 @@
+/*
+ * Ptrace test for TAR, PPR, DSCR registers
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-tar.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+int *cptr;
+int *pptr;
+
+void tar(void)
+{
+ unsigned long reg[3];
+ int ret;
+
+ cptr = (int *)shmat(shm_id, NULL, 0);
+ printf("%-30s TAR: %u PPR: %lx DSCR: %u\n",
+ user_write, TAR_1, PPR_1, DSCR_1);
+
+ mtspr(SPRN_TAR, TAR_1);
+ mtspr(SPRN_PPR, PPR_1);
+ mtspr(SPRN_DSCR, DSCR_1);
+
+ cptr[2] = 1;
+
+ /* Wait on parent */
+ while (!cptr[0])
+ asm volatile("" : : : "memory");
+
+ reg[0] = mfspr(SPRN_TAR);
+ reg[1] = mfspr(SPRN_PPR);
+ reg[2] = mfspr(SPRN_DSCR);
+
+ printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+ user_read, reg[0], reg[1], reg[2]);
+
+ /* Unblock the parent now */
+ cptr[1] = 1;
+ shmdt((int *)cptr);
+
+ ret = validate_tar_registers(reg, TAR_2, PPR_2, DSCR_2);
+ if (ret)
+ exit(1);
+ exit(0);
+}
+
+int trace_tar(pid_t child)
+{
+ unsigned long reg[3];
+
+ FAIL_IF(start_trace(child));
+ FAIL_IF(show_tar_registers(child, reg));
+ printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+ ptrace_read_running, reg[0], reg[1], reg[2]);
+
+ FAIL_IF(validate_tar_registers(reg, TAR_1, PPR_1, DSCR_1));
+ FAIL_IF(stop_trace(child));
+ return TEST_PASS;
+}
+
+int trace_tar_write(pid_t child)
+{
+ FAIL_IF(start_trace(child));
+ FAIL_IF(write_tar_registers(child, TAR_2, PPR_2, DSCR_2));
+ printf("%-30s TAR: %u PPR: %lx DSCR: %u\n",
+ ptrace_write_running, TAR_2, PPR_2, DSCR_2);
+
+ FAIL_IF(stop_trace(child));
+ return TEST_PASS;
+}
+
+int ptrace_tar(void)
+{
+ pid_t pid;
+ int ret, status;
+
+ shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
+ pid = fork();
+ if (pid < 0) {
+ perror("fork() failed");
+ return TEST_FAIL;
+ }
+
+ if (pid == 0)
+ tar();
+
+ if (pid) {
+ pptr = (int *)shmat(shm_id, NULL, 0);
+ pptr[0] = 0;
+ pptr[1] = 0;
+
+ while (!pptr[2])
+ asm volatile("" : : : "memory");
+ ret = trace_tar(pid);
+ if (ret)
+ return ret;
+
+ ret = trace_tar_write(pid);
+ if (ret)
+ return ret;
+
+ /* Unblock the child now */
+ pptr[0] = 1;
+
+ /* Wait on child */
+ while (!pptr[1])
+ asm volatile("" : : : "memory");
+
+ shmdt((int *)pptr);
+
+ ret = wait(&status);
+ shmctl(shm_id, IPC_RMID, NULL);
+ if (ret != pid) {
+ printf("Child's exit status not captured\n");
+ return TEST_PASS;
+ }
+
+ return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+ TEST_PASS;
+ }
+ return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(ptrace_tar, "ptrace_tar");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tar.h b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.h
new file mode 100644
index 000000000000..aed0aac716d2
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tar.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#define TAR_1 10
+#define TAR_2 20
+#define TAR_3 30
+#define TAR_4 40
+#define TAR_5 50
+
+#define DSCR_1 100
+#define DSCR_2 200
+#define DSCR_3 300
+#define DSCR_4 400
+#define DSCR_5 500
+
+#define PPR_1 0x4000000000000 /* or 31,31,31*/
+#define PPR_2 0x8000000000000 /* or 1,1,1 */
+#define PPR_3 0xc000000000000 /* or 6,6,6 */
+#define PPR_4 0x10000000000000 /* or 2,2,2 */
+
+char *user_read = "[User Read (Running)]";
+char *user_write = "[User Write (Running)]";
+char *ptrace_read_running = "[Ptrace Read (Running)]";
+char *ptrace_write_running = "[Ptrace Write (Running)]";
+char *ptrace_read_ckpt = "[Ptrace Read (Checkpointed)]";
+char *ptrace_write_ckpt = "[Ptrace Write (Checkpointed)]";
+
+int validate_tar_registers(unsigned long *reg, unsigned long tar,
+ unsigned long ppr, unsigned long dscr)
+{
+ int match = 1;
+
+ if (reg[0] != tar)
+ match = 0;
+
+ if (reg[1] != ppr)
+ match = 0;
+
+ if (reg[2] != dscr)
+ match = 0;
+
+ if (!match)
+ return TEST_FAIL;
+ return TEST_PASS;
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
new file mode 100644
index 000000000000..59206b96e98a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
@@ -0,0 +1,158 @@
+/*
+ * Ptrace test for GPR/FPR registers in TM context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-gpr.h"
+#include "tm.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+unsigned long *cptr, *pptr;
+
+float a = FPR_1;
+float b = FPR_2;
+float c = FPR_3;
+
+void tm_gpr(void)
+{
+ unsigned long gpr_buf[18];
+ unsigned long result, texasr;
+ float fpr_buf[32];
+
+ printf("Starting the child\n");
+ cptr = (unsigned long *)shmat(shm_id, NULL, 0);
+
+trans:
+ cptr[1] = 0;
+ asm __volatile__(
+ ASM_LOAD_GPR_IMMED(gpr_1)
+ ASM_LOAD_FPR_SINGLE_PRECISION(flt_1)
+ "1: ;"
+ "tbegin.;"
+ "beq 2f;"
+ ASM_LOAD_GPR_IMMED(gpr_2)
+ ASM_LOAD_FPR_SINGLE_PRECISION(flt_2)
+ "tsuspend.;"
+ "li 7, 1;"
+ "stw 7, 0(%[cptr1]);"
+ "tresume.;"
+ "b .;"
+
+ "tend.;"
+ "li 0, 0;"
+ "ori %[res], 0, 0;"
+ "b 3f;"
+
+ /* Transaction abort handler */
+ "2: ;"
+ "li 0, 1;"
+ "ori %[res], 0, 0;"
+ "mfspr %[texasr], %[sprn_texasr];"
+
+ "3: ;"
+ : [res] "=r" (result), [texasr] "=r" (texasr)
+ : [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2),
+ [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "r" (&a),
+ [flt_2] "r" (&b), [cptr1] "r" (&cptr[1])
+ : "memory", "r7", "r8", "r9", "r10",
+ "r11", "r12", "r13", "r14", "r15", "r16",
+ "r17", "r18", "r19", "r20", "r21", "r22",
+ "r23", "r24", "r25", "r26", "r27", "r28",
+ "r29", "r30", "r31"
+ );
+
+ if (result) {
+ if (!cptr[0])
+ goto trans;
+
+ shmdt((void *)cptr);
+ store_gpr(gpr_buf);
+ store_fpr_single_precision(fpr_buf);
+
+ if (validate_gpr(gpr_buf, GPR_3))
+ exit(1);
+
+ if (validate_fpr_float(fpr_buf, c))
+ exit(1);
+
+ exit(0);
+ }
+ shmdt((void *)cptr);
+ exit(1);
+}
+
+int trace_tm_gpr(pid_t child)
+{
+ unsigned long gpr[18];
+ unsigned long fpr[32];
+
+ FAIL_IF(start_trace(child));
+ FAIL_IF(show_gpr(child, gpr));
+ FAIL_IF(validate_gpr(gpr, GPR_2));
+ FAIL_IF(show_fpr(child, fpr));
+ FAIL_IF(validate_fpr(fpr, FPR_2_REP));
+ FAIL_IF(show_ckpt_fpr(child, fpr));
+ FAIL_IF(validate_fpr(fpr, FPR_1_REP));
+ FAIL_IF(show_ckpt_gpr(child, gpr));
+ FAIL_IF(validate_gpr(gpr, GPR_1));
+ FAIL_IF(write_ckpt_gpr(child, GPR_3));
+ FAIL_IF(write_ckpt_fpr(child, FPR_3_REP));
+
+ pptr[0] = 1;
+ FAIL_IF(stop_trace(child));
+
+ return TEST_PASS;
+}
+
+int ptrace_tm_gpr(void)
+{
+ pid_t pid;
+ int ret, status;
+
+ SKIP_IF(!have_htm());
+ shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
+ pid = fork();
+ if (pid < 0) {
+ perror("fork() failed");
+ return TEST_FAIL;
+ }
+ if (pid == 0)
+ tm_gpr();
+
+ if (pid) {
+ pptr = (unsigned long *)shmat(shm_id, NULL, 0);
+
+ while (!pptr[1])
+ asm volatile("" : : : "memory");
+ ret = trace_tm_gpr(pid);
+ if (ret) {
+ kill(pid, SIGTERM);
+ return TEST_FAIL;
+ }
+
+ shmdt((void *)pptr);
+
+ ret = wait(&status);
+ shmctl(shm_id, IPC_RMID, NULL);
+ if (ret != pid) {
+ printf("Child's exit status not captured\n");
+ return TEST_FAIL;
+ }
+
+ return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+ TEST_PASS;
+ }
+ return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(ptrace_tm_gpr, "ptrace_tm_gpr");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
new file mode 100644
index 000000000000..327fa943c7f3
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
@@ -0,0 +1,169 @@
+/*
+ * Ptrace test for GPR/FPR registers in TM Suspend context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-gpr.h"
+#include "tm.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+int *cptr, *pptr;
+
+float a = FPR_1;
+float b = FPR_2;
+float c = FPR_3;
+float d = FPR_4;
+
+__attribute__((used)) void wait_parent(void)
+{
+ cptr[2] = 1;
+ while (!cptr[1])
+ asm volatile("" : : : "memory");
+}
+
+void tm_spd_gpr(void)
+{
+ unsigned long gpr_buf[18];
+ unsigned long result, texasr;
+ float fpr_buf[32];
+
+ cptr = (int *)shmat(shm_id, NULL, 0);
+
+trans:
+ cptr[2] = 0;
+ asm __volatile__(
+ ASM_LOAD_GPR_IMMED(gpr_1)
+ ASM_LOAD_FPR_SINGLE_PRECISION(flt_1)
+
+ "1: ;"
+ "tbegin.;"
+ "beq 2f;"
+
+ ASM_LOAD_GPR_IMMED(gpr_2)
+ "tsuspend.;"
+ ASM_LOAD_GPR_IMMED(gpr_4)
+ ASM_LOAD_FPR_SINGLE_PRECISION(flt_4)
+
+ "bl wait_parent;"
+ "tresume.;"
+ "tend.;"
+ "li 0, 0;"
+ "ori %[res], 0, 0;"
+ "b 3f;"
+
+ /* Transaction abort handler */
+ "2: ;"
+ "li 0, 1;"
+ "ori %[res], 0, 0;"
+ "mfspr %[texasr], %[sprn_texasr];"
+
+ "3: ;"
+ : [res] "=r" (result), [texasr] "=r" (texasr)
+ : [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2), [gpr_4]"i"(GPR_4),
+ [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "r" (&a),
+ [flt_2] "r" (&b), [flt_4] "r" (&d)
+ : "memory", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
+ );
+
+ if (result) {
+ if (!cptr[0])
+ goto trans;
+
+ shmdt((void *)cptr);
+ store_gpr(gpr_buf);
+ store_fpr_single_precision(fpr_buf);
+
+ if (validate_gpr(gpr_buf, GPR_3))
+ exit(1);
+
+ if (validate_fpr_float(fpr_buf, c))
+ exit(1);
+ exit(0);
+ }
+ shmdt((void *)cptr);
+ exit(1);
+}
+
+int trace_tm_spd_gpr(pid_t child)
+{
+ unsigned long gpr[18];
+ unsigned long fpr[32];
+
+ FAIL_IF(start_trace(child));
+ FAIL_IF(show_gpr(child, gpr));
+ FAIL_IF(validate_gpr(gpr, GPR_4));
+ FAIL_IF(show_fpr(child, fpr));
+ FAIL_IF(validate_fpr(fpr, FPR_4_REP));
+ FAIL_IF(show_ckpt_fpr(child, fpr));
+ FAIL_IF(validate_fpr(fpr, FPR_1_REP));
+ FAIL_IF(show_ckpt_gpr(child, gpr));
+ FAIL_IF(validate_gpr(gpr, GPR_1));
+ FAIL_IF(write_ckpt_gpr(child, GPR_3));
+ FAIL_IF(write_ckpt_fpr(child, FPR_3_REP));
+
+ pptr[0] = 1;
+ pptr[1] = 1;
+ FAIL_IF(stop_trace(child));
+ return TEST_PASS;
+}
+
+int ptrace_tm_spd_gpr(void)
+{
+ pid_t pid;
+ int ret, status;
+
+ SKIP_IF(!have_htm());
+ shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
+ pid = fork();
+ if (pid < 0) {
+ perror("fork() failed");
+ return TEST_FAIL;
+ }
+
+ if (pid == 0)
+ tm_spd_gpr();
+
+ if (pid) {
+ pptr = (int *)shmat(shm_id, NULL, 0);
+ pptr[0] = 0;
+ pptr[1] = 0;
+
+ while (!pptr[2])
+ asm volatile("" : : : "memory");
+ ret = trace_tm_spd_gpr(pid);
+ if (ret) {
+ kill(pid, SIGTERM);
+ shmdt((void *)pptr);
+ shmctl(shm_id, IPC_RMID, NULL);
+ return TEST_FAIL;
+ }
+
+ shmdt((void *)pptr);
+
+ ret = wait(&status);
+ shmctl(shm_id, IPC_RMID, NULL);
+ if (ret != pid) {
+ printf("Child's exit status not captured\n");
+ return TEST_FAIL;
+ }
+
+ return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+ TEST_PASS;
+ }
+ return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(ptrace_tm_spd_gpr, "ptrace_tm_spd_gpr");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
new file mode 100644
index 000000000000..b3c061dc9512
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
@@ -0,0 +1,174 @@
+/*
+ * Ptrace test for TAR, PPR, DSCR registers in the TM Suspend context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "tm.h"
+#include "ptrace-tar.h"
+
+int shm_id;
+int *cptr, *pptr;
+
+__attribute__((used)) void wait_parent(void)
+{
+ cptr[2] = 1;
+ while (!cptr[1])
+ asm volatile("" : : : "memory");
+}
+
+void tm_spd_tar(void)
+{
+ unsigned long result, texasr;
+ unsigned long regs[3];
+ int ret;
+
+ cptr = (int *)shmat(shm_id, NULL, 0);
+
+trans:
+ cptr[2] = 0;
+ asm __volatile__(
+ "li 4, %[tar_1];"
+ "mtspr %[sprn_tar], 4;" /* TAR_1 */
+ "li 4, %[dscr_1];"
+ "mtspr %[sprn_dscr], 4;" /* DSCR_1 */
+ "or 31,31,31;" /* PPR_1*/
+
+ "1: ;"
+ "tbegin.;"
+ "beq 2f;"
+
+ "li 4, %[tar_2];"
+ "mtspr %[sprn_tar], 4;" /* TAR_2 */
+ "li 4, %[dscr_2];"
+ "mtspr %[sprn_dscr], 4;" /* DSCR_2 */
+ "or 1,1,1;" /* PPR_2 */
+
+ "tsuspend.;"
+ "li 4, %[tar_3];"
+ "mtspr %[sprn_tar], 4;" /* TAR_3 */
+ "li 4, %[dscr_3];"
+ "mtspr %[sprn_dscr], 4;" /* DSCR_3 */
+ "or 6,6,6;" /* PPR_3 */
+ "bl wait_parent;"
+ "tresume.;"
+
+ "tend.;"
+ "li 0, 0;"
+ "ori %[res], 0, 0;"
+ "b 3f;"
+
+ /* Transaction abort handler */
+ "2: ;"
+ "li 0, 1;"
+ "ori %[res], 0, 0;"
+ "mfspr %[texasr], %[sprn_texasr];"
+
+ "3: ;"
+
+ : [res] "=r" (result), [texasr] "=r" (texasr)
+ : [val] "r" (cptr[1]), [sprn_dscr]"i"(SPRN_DSCR),
+ [sprn_tar]"i"(SPRN_TAR), [sprn_ppr]"i"(SPRN_PPR),
+ [sprn_texasr]"i"(SPRN_TEXASR), [tar_1]"i"(TAR_1),
+ [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2), [dscr_2]"i"(DSCR_2),
+ [tar_3]"i"(TAR_3), [dscr_3]"i"(DSCR_3)
+ : "memory", "r0", "r1", "r3", "r4", "r5", "r6"
+ );
+
+ /* TM failed, analyse */
+ if (result) {
+ if (!cptr[0])
+ goto trans;
+
+ regs[0] = mfspr(SPRN_TAR);
+ regs[1] = mfspr(SPRN_PPR);
+ regs[2] = mfspr(SPRN_DSCR);
+
+ shmdt(&cptr);
+ printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+ user_read, regs[0], regs[1], regs[2]);
+
+ ret = validate_tar_registers(regs, TAR_4, PPR_4, DSCR_4);
+ if (ret)
+ exit(1);
+ exit(0);
+ }
+ shmdt(&cptr);
+ exit(1);
+}
+
+int trace_tm_spd_tar(pid_t child)
+{
+ unsigned long regs[3];
+
+ FAIL_IF(start_trace(child));
+ FAIL_IF(show_tar_registers(child, regs));
+ printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+ ptrace_read_running, regs[0], regs[1], regs[2]);
+
+ FAIL_IF(validate_tar_registers(regs, TAR_3, PPR_3, DSCR_3));
+ FAIL_IF(show_tm_checkpointed_state(child, regs));
+ printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+ ptrace_read_ckpt, regs[0], regs[1], regs[2]);
+
+ FAIL_IF(validate_tar_registers(regs, TAR_1, PPR_1, DSCR_1));
+ FAIL_IF(write_ckpt_tar_registers(child, TAR_4, PPR_4, DSCR_4));
+ printf("%-30s TAR: %u PPR: %lx DSCR: %u\n",
+ ptrace_write_ckpt, TAR_4, PPR_4, DSCR_4);
+
+ pptr[0] = 1;
+ pptr[1] = 1;
+ FAIL_IF(stop_trace(child));
+ return TEST_PASS;
+}
+
+int ptrace_tm_spd_tar(void)
+{
+ pid_t pid;
+ int ret, status;
+
+ SKIP_IF(!have_htm());
+ shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
+ pid = fork();
+ if (pid == 0)
+ tm_spd_tar();
+
+ pptr = (int *)shmat(shm_id, NULL, 0);
+ pptr[0] = 0;
+ pptr[1] = 0;
+
+ if (pid) {
+ while (!pptr[2])
+ asm volatile("" : : : "memory");
+ ret = trace_tm_spd_tar(pid);
+ if (ret) {
+ kill(pid, SIGTERM);
+ shmdt(&pptr);
+ shmctl(shm_id, IPC_RMID, NULL);
+ return TEST_FAIL;
+ }
+
+ shmdt(&pptr);
+
+ ret = wait(&status);
+ shmctl(shm_id, IPC_RMID, NULL);
+ if (ret != pid) {
+ printf("Child's exit status not captured\n");
+ return TEST_FAIL;
+ }
+
+ return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+ TEST_PASS;
+ }
+ return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(ptrace_tm_spd_tar, "ptrace_tm_spd_tar");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
new file mode 100644
index 000000000000..0df3c23b7888
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
@@ -0,0 +1,185 @@
+/*
+ * Ptrace test for VMX/VSX registers in the TM Suspend context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "tm.h"
+#include "ptrace-vsx.h"
+
+int shm_id;
+int *cptr, *pptr;
+
+unsigned long fp_load[VEC_MAX];
+unsigned long fp_load_new[VEC_MAX];
+unsigned long fp_store[VEC_MAX];
+unsigned long fp_load_ckpt[VEC_MAX];
+unsigned long fp_load_ckpt_new[VEC_MAX];
+
+__attribute__((used)) void load_vsx(void)
+{
+ loadvsx(fp_load, 0);
+}
+
+__attribute__((used)) void load_vsx_new(void)
+{
+ loadvsx(fp_load_new, 0);
+}
+
+__attribute__((used)) void load_vsx_ckpt(void)
+{
+ loadvsx(fp_load_ckpt, 0);
+}
+
+__attribute__((used)) void wait_parent(void)
+{
+ cptr[2] = 1;
+ while (!cptr[1])
+ asm volatile("" : : : "memory");
+}
+
+void tm_spd_vsx(void)
+{
+ unsigned long result, texasr;
+ int ret;
+
+ cptr = (int *)shmat(shm_id, NULL, 0);
+
+trans:
+ cptr[2] = 0;
+ asm __volatile__(
+ "bl load_vsx_ckpt;"
+
+ "1: ;"
+ "tbegin.;"
+ "beq 2f;"
+
+ "bl load_vsx_new;"
+ "tsuspend.;"
+ "bl load_vsx;"
+ "bl wait_parent;"
+ "tresume.;"
+
+ "tend.;"
+ "li 0, 0;"
+ "ori %[res], 0, 0;"
+ "b 3f;"
+
+ "2: ;"
+ "li 0, 1;"
+ "ori %[res], 0, 0;"
+ "mfspr %[texasr], %[sprn_texasr];"
+
+ "3: ;"
+ : [res] "=r" (result), [texasr] "=r" (texasr)
+ : [fp_load] "r" (fp_load), [fp_load_ckpt] "r" (fp_load_ckpt),
+ [sprn_texasr] "i" (SPRN_TEXASR)
+ : "memory", "r0", "r1", "r2", "r3", "r4",
+ "r8", "r9", "r10", "r11"
+ );
+
+ if (result) {
+ if (!cptr[0])
+ goto trans;
+ shmdt((void *)cptr);
+
+ storevsx(fp_store, 0);
+ ret = compare_vsx_vmx(fp_store, fp_load_ckpt_new);
+ if (ret)
+ exit(1);
+ exit(0);
+ }
+ shmdt((void *)cptr);
+ exit(1);
+}
+
+int trace_tm_spd_vsx(pid_t child)
+{
+ unsigned long vsx[VSX_MAX];
+ unsigned long vmx[VMX_MAX + 2][2];
+
+ FAIL_IF(start_trace(child));
+ FAIL_IF(show_vsx(child, vsx));
+ FAIL_IF(validate_vsx(vsx, fp_load));
+ FAIL_IF(show_vmx(child, vmx));
+ FAIL_IF(validate_vmx(vmx, fp_load));
+ FAIL_IF(show_vsx_ckpt(child, vsx));
+ FAIL_IF(validate_vsx(vsx, fp_load_ckpt));
+ FAIL_IF(show_vmx_ckpt(child, vmx));
+ FAIL_IF(validate_vmx(vmx, fp_load_ckpt));
+
+ memset(vsx, 0, sizeof(vsx));
+ memset(vmx, 0, sizeof(vmx));
+
+ load_vsx_vmx(fp_load_ckpt_new, vsx, vmx);
+
+ FAIL_IF(write_vsx_ckpt(child, vsx));
+ FAIL_IF(write_vmx_ckpt(child, vmx));
+
+ pptr[0] = 1;
+ pptr[1] = 1;
+ FAIL_IF(stop_trace(child));
+
+ return TEST_PASS;
+}
+
+int ptrace_tm_spd_vsx(void)
+{
+ pid_t pid;
+ int ret, status, i;
+
+ SKIP_IF(!have_htm());
+ shm_id = shmget(IPC_PRIVATE, sizeof(int) * 3, 0777|IPC_CREAT);
+
+ for (i = 0; i < 128; i++) {
+ fp_load[i] = 1 + rand();
+ fp_load_new[i] = 1 + 2 * rand();
+ fp_load_ckpt[i] = 1 + 3 * rand();
+ fp_load_ckpt_new[i] = 1 + 4 * rand();
+ }
+
+ pid = fork();
+ if (pid < 0) {
+ perror("fork() failed");
+ return TEST_FAIL;
+ }
+
+ if (pid == 0)
+ tm_spd_vsx();
+
+ if (pid) {
+ pptr = (int *)shmat(shm_id, NULL, 0);
+ while (!pptr[2])
+ asm volatile("" : : : "memory");
+
+ ret = trace_tm_spd_vsx(pid);
+ if (ret) {
+ kill(pid, SIGKILL);
+ shmdt((void *)pptr);
+ shmctl(shm_id, IPC_RMID, NULL);
+ return TEST_FAIL;
+ }
+
+ shmdt((void *)pptr);
+ ret = wait(&status);
+ shmctl(shm_id, IPC_RMID, NULL);
+ if (ret != pid) {
+ printf("Child's exit status not captured\n");
+ return TEST_FAIL;
+ }
+
+ return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+ TEST_PASS;
+ }
+ return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(ptrace_tm_spd_vsx, "ptrace_tm_spd_vsx");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
new file mode 100644
index 000000000000..94e57cb89769
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
@@ -0,0 +1,168 @@
+/*
+ * Ptrace test TM SPR registers
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "tm.h"
+
+/* Tracee and tracer shared data */
+struct shared {
+ int flag;
+ struct tm_spr_regs regs;
+};
+unsigned long tfhar;
+
+int shm_id;
+struct shared *cptr, *pptr;
+
+int shm_id1;
+int *cptr1, *pptr1;
+
+#define TM_KVM_SCHED 0xe0000001ac000001
+int validate_tm_spr(struct tm_spr_regs *regs)
+{
+ FAIL_IF(regs->tm_tfhar != tfhar);
+ FAIL_IF((regs->tm_texasr == TM_KVM_SCHED) && (regs->tm_tfiar != 0));
+
+ return TEST_PASS;
+}
+
+void tm_spr(void)
+{
+ unsigned long result, texasr;
+ int ret;
+
+ cptr = (struct shared *)shmat(shm_id, NULL, 0);
+ cptr1 = (int *)shmat(shm_id1, NULL, 0);
+
+trans:
+ cptr1[0] = 0;
+ asm __volatile__(
+ "1: ;"
+ /* TM failover handler should follow "tbegin.;" */
+ "mflr 31;"
+ "bl 4f;" /* $ = TFHAR - 12 */
+ "4: ;"
+ "mflr %[tfhar];"
+ "mtlr 31;"
+
+ "tbegin.;"
+ "beq 2f;"
+
+ "tsuspend.;"
+ "li 8, 1;"
+ "sth 8, 0(%[cptr1]);"
+ "tresume.;"
+ "b .;"
+
+ "tend.;"
+ "li 0, 0;"
+ "ori %[res], 0, 0;"
+ "b 3f;"
+
+ "2: ;"
+
+ "li 0, 1;"
+ "ori %[res], 0, 0;"
+ "mfspr %[texasr], %[sprn_texasr];"
+
+ "3: ;"
+ : [tfhar] "=r" (tfhar), [res] "=r" (result),
+ [texasr] "=r" (texasr), [cptr1] "=r" (cptr1)
+ : [sprn_texasr] "i" (SPRN_TEXASR)
+ : "memory", "r0", "r1", "r2", "r3", "r4",
+ "r8", "r9", "r10", "r11", "r31"
+ );
+
+ /* There are 2 32bit instructions before tbegin. */
+ tfhar += 12;
+
+ if (result) {
+ if (!cptr->flag)
+ goto trans;
+
+ ret = validate_tm_spr((struct tm_spr_regs *)&cptr->regs);
+ shmdt((void *)cptr);
+ shmdt((void *)cptr1);
+ if (ret)
+ exit(1);
+ exit(0);
+ }
+ shmdt((void *)cptr);
+ shmdt((void *)cptr1);
+ exit(1);
+}
+
+int trace_tm_spr(pid_t child)
+{
+ FAIL_IF(start_trace(child));
+ FAIL_IF(show_tm_spr(child, (struct tm_spr_regs *)&pptr->regs));
+
+ printf("TFHAR: %lx TEXASR: %lx TFIAR: %lx\n", pptr->regs.tm_tfhar,
+ pptr->regs.tm_texasr, pptr->regs.tm_tfiar);
+
+ pptr->flag = 1;
+ FAIL_IF(stop_trace(child));
+
+ return TEST_PASS;
+}
+
+int ptrace_tm_spr(void)
+{
+ pid_t pid;
+ int ret, status;
+
+ SKIP_IF(!have_htm());
+ shm_id = shmget(IPC_PRIVATE, sizeof(struct shared), 0777|IPC_CREAT);
+ shm_id1 = shmget(IPC_PRIVATE, sizeof(int), 0777|IPC_CREAT);
+ pid = fork();
+ if (pid < 0) {
+ perror("fork() failed");
+ return TEST_FAIL;
+ }
+
+ if (pid == 0)
+ tm_spr();
+
+ if (pid) {
+ pptr = (struct shared *)shmat(shm_id, NULL, 0);
+ pptr1 = (int *)shmat(shm_id1, NULL, 0);
+
+ while (!pptr1[0])
+ asm volatile("" : : : "memory");
+ ret = trace_tm_spr(pid);
+ if (ret) {
+ kill(pid, SIGKILL);
+ shmdt((void *)pptr);
+ shmdt((void *)pptr1);
+ shmctl(shm_id, IPC_RMID, NULL);
+ shmctl(shm_id1, IPC_RMID, NULL);
+ return TEST_FAIL;
+ }
+
+ shmdt((void *)pptr);
+ shmdt((void *)pptr1);
+ ret = wait(&status);
+ shmctl(shm_id, IPC_RMID, NULL);
+ shmctl(shm_id1, IPC_RMID, NULL);
+ if (ret != pid) {
+ printf("Child's exit status not captured\n");
+ return TEST_FAIL;
+ }
+
+ return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+ TEST_PASS;
+ }
+ return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(ptrace_tm_spr, "ptrace_tm_spr");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
new file mode 100644
index 000000000000..48b462f75023
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
@@ -0,0 +1,160 @@
+/*
+ * Ptrace test for TAR, PPR, DSCR registers in the TM context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "tm.h"
+#include "ptrace-tar.h"
+
+int shm_id;
+unsigned long *cptr, *pptr;
+
+
+void tm_tar(void)
+{
+ unsigned long result, texasr;
+ unsigned long regs[3];
+ int ret;
+
+ cptr = (unsigned long *)shmat(shm_id, NULL, 0);
+
+trans:
+ cptr[1] = 0;
+ asm __volatile__(
+ "li 4, %[tar_1];"
+ "mtspr %[sprn_tar], 4;" /* TAR_1 */
+ "li 4, %[dscr_1];"
+ "mtspr %[sprn_dscr], 4;" /* DSCR_1 */
+ "or 31,31,31;" /* PPR_1*/
+
+ "1: ;"
+ "tbegin.;"
+ "beq 2f;"
+
+ "li 4, %[tar_2];"
+ "mtspr %[sprn_tar], 4;" /* TAR_2 */
+ "li 4, %[dscr_2];"
+ "mtspr %[sprn_dscr], 4;" /* DSCR_2 */
+ "or 1,1,1;" /* PPR_2 */
+ "tsuspend.;"
+ "li 0, 1;"
+ "stw 0, 0(%[cptr1]);"
+ "tresume.;"
+ "b .;"
+
+ "tend.;"
+ "li 0, 0;"
+ "ori %[res], 0, 0;"
+ "b 3f;"
+
+ /* Transaction abort handler */
+ "2: ;"
+ "li 0, 1;"
+ "ori %[res], 0, 0;"
+ "mfspr %[texasr], %[sprn_texasr];"
+
+ "3: ;"
+
+ : [res] "=r" (result), [texasr] "=r" (texasr)
+ : [sprn_dscr]"i"(SPRN_DSCR), [sprn_tar]"i"(SPRN_TAR),
+ [sprn_ppr]"i"(SPRN_PPR), [sprn_texasr]"i"(SPRN_TEXASR),
+ [tar_1]"i"(TAR_1), [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2),
+ [dscr_2]"i"(DSCR_2), [cptr1] "r" (&cptr[1])
+ : "memory", "r0", "r1", "r3", "r4", "r5", "r6"
+ );
+
+ /* TM failed, analyse */
+ if (result) {
+ if (!cptr[0])
+ goto trans;
+
+ regs[0] = mfspr(SPRN_TAR);
+ regs[1] = mfspr(SPRN_PPR);
+ regs[2] = mfspr(SPRN_DSCR);
+
+ shmdt(&cptr);
+ printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+ user_read, regs[0], regs[1], regs[2]);
+
+ ret = validate_tar_registers(regs, TAR_4, PPR_4, DSCR_4);
+ if (ret)
+ exit(1);
+ exit(0);
+ }
+ shmdt(&cptr);
+ exit(1);
+}
+
+int trace_tm_tar(pid_t child)
+{
+ unsigned long regs[3];
+
+ FAIL_IF(start_trace(child));
+ FAIL_IF(show_tar_registers(child, regs));
+ printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+ ptrace_read_running, regs[0], regs[1], regs[2]);
+
+ FAIL_IF(validate_tar_registers(regs, TAR_2, PPR_2, DSCR_2));
+ FAIL_IF(show_tm_checkpointed_state(child, regs));
+ printf("%-30s TAR: %lu PPR: %lx DSCR: %lu\n",
+ ptrace_read_ckpt, regs[0], regs[1], regs[2]);
+
+ FAIL_IF(validate_tar_registers(regs, TAR_1, PPR_1, DSCR_1));
+ FAIL_IF(write_ckpt_tar_registers(child, TAR_4, PPR_4, DSCR_4));
+ printf("%-30s TAR: %u PPR: %lx DSCR: %u\n",
+ ptrace_write_ckpt, TAR_4, PPR_4, DSCR_4);
+
+ pptr[0] = 1;
+ FAIL_IF(stop_trace(child));
+ return TEST_PASS;
+}
+
+int ptrace_tm_tar(void)
+{
+ pid_t pid;
+ int ret, status;
+
+ SKIP_IF(!have_htm());
+ shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
+ pid = fork();
+ if (pid == 0)
+ tm_tar();
+
+ pptr = (unsigned long *)shmat(shm_id, NULL, 0);
+ pptr[0] = 0;
+
+ if (pid) {
+ while (!pptr[1])
+ asm volatile("" : : : "memory");
+ ret = trace_tm_tar(pid);
+ if (ret) {
+ kill(pid, SIGTERM);
+ shmdt(&pptr);
+ shmctl(shm_id, IPC_RMID, NULL);
+ return TEST_FAIL;
+ }
+ shmdt(&pptr);
+
+ ret = wait(&status);
+ shmctl(shm_id, IPC_RMID, NULL);
+ if (ret != pid) {
+ printf("Child's exit status not captured\n");
+ return TEST_FAIL;
+ }
+
+ return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+ TEST_PASS;
+ }
+ return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(ptrace_tm_tar, "ptrace_tm_tar");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
new file mode 100644
index 000000000000..b4081e2b22d5
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
@@ -0,0 +1,168 @@
+/*
+ * Ptrace test for VMX/VSX registers in the TM context
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "tm.h"
+#include "ptrace-vsx.h"
+
+int shm_id;
+unsigned long *cptr, *pptr;
+
+unsigned long fp_load[VEC_MAX];
+unsigned long fp_store[VEC_MAX];
+unsigned long fp_load_ckpt[VEC_MAX];
+unsigned long fp_load_ckpt_new[VEC_MAX];
+
+__attribute__((used)) void load_vsx(void)
+{
+ loadvsx(fp_load, 0);
+}
+
+__attribute__((used)) void load_vsx_ckpt(void)
+{
+ loadvsx(fp_load_ckpt, 0);
+}
+
+void tm_vsx(void)
+{
+ unsigned long result, texasr;
+ int ret;
+
+ cptr = (unsigned long *)shmat(shm_id, NULL, 0);
+
+trans:
+ cptr[1] = 0;
+ asm __volatile__(
+ "bl load_vsx_ckpt;"
+
+ "1: ;"
+ "tbegin.;"
+ "beq 2f;"
+
+ "bl load_vsx;"
+ "tsuspend.;"
+ "li 7, 1;"
+ "stw 7, 0(%[cptr1]);"
+ "tresume.;"
+ "b .;"
+
+ "tend.;"
+ "li 0, 0;"
+ "ori %[res], 0, 0;"
+ "b 3f;"
+
+ "2: ;"
+ "li 0, 1;"
+ "ori %[res], 0, 0;"
+ "mfspr %[texasr], %[sprn_texasr];"
+
+ "3: ;"
+ : [res] "=r" (result), [texasr] "=r" (texasr)
+ : [fp_load] "r" (fp_load), [fp_load_ckpt] "r" (fp_load_ckpt),
+ [sprn_texasr] "i" (SPRN_TEXASR), [cptr1] "r" (&cptr[1])
+ : "memory", "r0", "r1", "r2", "r3", "r4",
+ "r7", "r8", "r9", "r10", "r11"
+ );
+
+ if (result) {
+ if (!cptr[0])
+ goto trans;
+
+ shmdt((void *)cptr);
+ storevsx(fp_store, 0);
+ ret = compare_vsx_vmx(fp_store, fp_load_ckpt_new);
+ if (ret)
+ exit(1);
+ exit(0);
+ }
+ shmdt((void *)cptr);
+ exit(1);
+}
+
+int trace_tm_vsx(pid_t child)
+{
+ unsigned long vsx[VSX_MAX];
+ unsigned long vmx[VMX_MAX + 2][2];
+
+ FAIL_IF(start_trace(child));
+ FAIL_IF(show_vsx(child, vsx));
+ FAIL_IF(validate_vsx(vsx, fp_load));
+ FAIL_IF(show_vmx(child, vmx));
+ FAIL_IF(validate_vmx(vmx, fp_load));
+ FAIL_IF(show_vsx_ckpt(child, vsx));
+ FAIL_IF(validate_vsx(vsx, fp_load_ckpt));
+ FAIL_IF(show_vmx_ckpt(child, vmx));
+ FAIL_IF(validate_vmx(vmx, fp_load_ckpt));
+ memset(vsx, 0, sizeof(vsx));
+ memset(vmx, 0, sizeof(vmx));
+
+ load_vsx_vmx(fp_load_ckpt_new, vsx, vmx);
+
+ FAIL_IF(write_vsx_ckpt(child, vsx));
+ FAIL_IF(write_vmx_ckpt(child, vmx));
+ pptr[0] = 1;
+ FAIL_IF(stop_trace(child));
+ return TEST_PASS;
+}
+
+int ptrace_tm_vsx(void)
+{
+ pid_t pid;
+ int ret, status, i;
+
+ SKIP_IF(!have_htm());
+ shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
+
+ for (i = 0; i < 128; i++) {
+ fp_load[i] = 1 + rand();
+ fp_load_ckpt[i] = 1 + 2 * rand();
+ fp_load_ckpt_new[i] = 1 + 3 * rand();
+ }
+
+ pid = fork();
+ if (pid < 0) {
+ perror("fork() failed");
+ return TEST_FAIL;
+ }
+
+ if (pid == 0)
+ tm_vsx();
+
+ if (pid) {
+ pptr = (unsigned long *)shmat(shm_id, NULL, 0);
+ while (!pptr[1])
+ asm volatile("" : : : "memory");
+
+ ret = trace_tm_vsx(pid);
+ if (ret) {
+ kill(pid, SIGKILL);
+ shmdt((void *)pptr);
+ shmctl(shm_id, IPC_RMID, NULL);
+ return TEST_FAIL;
+ }
+
+ shmdt((void *)pptr);
+ ret = wait(&status);
+ shmctl(shm_id, IPC_RMID, NULL);
+ if (ret != pid) {
+ printf("Child's exit status not captured\n");
+ return TEST_FAIL;
+ }
+
+ return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+ TEST_PASS;
+ }
+ return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(ptrace_tm_vsx, "ptrace_tm_vsx");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c
new file mode 100644
index 000000000000..04084ee7d27b
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c
@@ -0,0 +1,117 @@
+/*
+ * Ptrace test for VMX/VSX registers
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include "ptrace.h"
+#include "ptrace-vsx.h"
+
+/* Tracer and Tracee Shared Data */
+int shm_id;
+int *cptr, *pptr;
+
+unsigned long fp_load[VEC_MAX];
+unsigned long fp_load_new[VEC_MAX];
+unsigned long fp_store[VEC_MAX];
+
+void vsx(void)
+{
+ int ret;
+
+ cptr = (int *)shmat(shm_id, NULL, 0);
+ loadvsx(fp_load, 0);
+ cptr[1] = 1;
+
+ while (!cptr[0])
+ asm volatile("" : : : "memory");
+ shmdt((void *) cptr);
+
+ storevsx(fp_store, 0);
+ ret = compare_vsx_vmx(fp_store, fp_load_new);
+ if (ret)
+ exit(1);
+ exit(0);
+}
+
+int trace_vsx(pid_t child)
+{
+ unsigned long vsx[VSX_MAX];
+ unsigned long vmx[VMX_MAX + 2][2];
+
+ FAIL_IF(start_trace(child));
+ FAIL_IF(show_vsx(child, vsx));
+ FAIL_IF(validate_vsx(vsx, fp_load));
+ FAIL_IF(show_vmx(child, vmx));
+ FAIL_IF(validate_vmx(vmx, fp_load));
+
+ memset(vsx, 0, sizeof(vsx));
+ memset(vmx, 0, sizeof(vmx));
+ load_vsx_vmx(fp_load_new, vsx, vmx);
+
+ FAIL_IF(write_vsx(child, vsx));
+ FAIL_IF(write_vmx(child, vmx));
+ FAIL_IF(stop_trace(child));
+
+ return TEST_PASS;
+}
+
+int ptrace_vsx(void)
+{
+ pid_t pid;
+ int ret, status, i;
+
+ shm_id = shmget(IPC_PRIVATE, sizeof(int) * 2, 0777|IPC_CREAT);
+
+ for (i = 0; i < VEC_MAX; i++)
+ fp_load[i] = i + rand();
+
+ for (i = 0; i < VEC_MAX; i++)
+ fp_load_new[i] = i + 2 * rand();
+
+ pid = fork();
+ if (pid < 0) {
+ perror("fork() failed");
+ return TEST_FAIL;
+ }
+
+ if (pid == 0)
+ vsx();
+
+ if (pid) {
+ pptr = (int *)shmat(shm_id, NULL, 0);
+ while (!pptr[1])
+ asm volatile("" : : : "memory");
+
+ ret = trace_vsx(pid);
+ if (ret) {
+ kill(pid, SIGTERM);
+ shmdt((void *)pptr);
+ shmctl(shm_id, IPC_RMID, NULL);
+ return TEST_FAIL;
+ }
+
+ pptr[0] = 1;
+ shmdt((void *)pptr);
+
+ ret = wait(&status);
+ shmctl(shm_id, IPC_RMID, NULL);
+ if (ret != pid) {
+ printf("Child's exit status not captured\n");
+ return TEST_FAIL;
+ }
+
+ return (WIFEXITED(status) && WEXITSTATUS(status)) ? TEST_FAIL :
+ TEST_PASS;
+ }
+ return TEST_PASS;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(ptrace_vsx, "ptrace_vsx");
+}
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h
new file mode 100644
index 000000000000..f4e4b427c9d9
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-vsx.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#define VEC_MAX 128
+#define VSX_MAX 32
+#define VMX_MAX 32
+
+/*
+ * unsigned long vsx[32]
+ * unsigned long load[128]
+ */
+int validate_vsx(unsigned long *vsx, unsigned long *load)
+{
+ int i;
+
+ for (i = 0; i < VSX_MAX; i++) {
+ if (vsx[i] != load[2 * i + 1]) {
+ printf("vsx[%d]: %lx load[%d] %lx\n",
+ i, vsx[i], 2 * i + 1, load[2 * i + 1]);
+ return TEST_FAIL;
+ }
+ }
+ return TEST_PASS;
+}
+
+/*
+ * unsigned long vmx[32][2]
+ * unsigned long load[128]
+ */
+int validate_vmx(unsigned long vmx[][2], unsigned long *load)
+{
+ int i;
+
+ for (i = 0; i < VMX_MAX; i++) {
+ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ if ((vmx[i][0] != load[64 + 2 * i]) ||
+ (vmx[i][1] != load[65 + 2 * i])) {
+ printf("vmx[%d][0]: %lx load[%d] %lx\n",
+ i, vmx[i][0], 64 + 2 * i,
+ load[64 + 2 * i]);
+ printf("vmx[%d][1]: %lx load[%d] %lx\n",
+ i, vmx[i][1], 65 + 2 * i,
+ load[65 + 2 * i]);
+ return TEST_FAIL;
+ }
+ #else /*
+ * In LE each value pair is stored in an
+ * alternate manner.
+ */
+ if ((vmx[i][0] != load[65 + 2 * i]) ||
+ (vmx[i][1] != load[64 + 2 * i])) {
+ printf("vmx[%d][0]: %lx load[%d] %lx\n",
+ i, vmx[i][0], 65 + 2 * i,
+ load[65 + 2 * i]);
+ printf("vmx[%d][1]: %lx load[%d] %lx\n",
+ i, vmx[i][1], 64 + 2 * i,
+ load[64 + 2 * i]);
+ return TEST_FAIL;
+ }
+ #endif
+ }
+ return TEST_PASS;
+}
+
+/*
+ * unsigned long store[128]
+ * unsigned long load[128]
+ */
+int compare_vsx_vmx(unsigned long *store, unsigned long *load)
+{
+ int i;
+
+ for (i = 0; i < VSX_MAX; i++) {
+ if (store[1 + 2 * i] != load[1 + 2 * i]) {
+ printf("store[%d]: %lx load[%d] %lx\n",
+ 1 + 2 * i, store[i],
+ 1 + 2 * i, load[i]);
+ return TEST_FAIL;
+ }
+ }
+
+ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ for (i = 64; i < VEC_MAX; i++) {
+ if (store[i] != load[i]) {
+ printf("store[%d]: %lx load[%d] %lx\n",
+ i, store[i], i, load[i]);
+ return TEST_FAIL;
+ }
+ }
+ #else /* In LE each value pair is stored in an alternate manner */
+ for (i = 64; i < VEC_MAX; i++) {
+ if (!(i % 2) && (store[i] != load[i+1])) {
+ printf("store[%d]: %lx load[%d] %lx\n",
+ i, store[i], i+1, load[i+1]);
+ return TEST_FAIL;
+ }
+ if ((i % 2) && (store[i] != load[i-1])) {
+ printf("here store[%d]: %lx load[%d] %lx\n",
+ i, store[i], i-1, load[i-1]);
+ return TEST_FAIL;
+ }
+ }
+ #endif
+ return TEST_PASS;
+}
+
+void load_vsx_vmx(unsigned long *load, unsigned long *vsx,
+ unsigned long vmx[][2])
+{
+ int i;
+
+ for (i = 0; i < VSX_MAX; i++)
+ vsx[i] = load[1 + 2 * i];
+
+ for (i = 0; i < VMX_MAX; i++) {
+ vmx[i][0] = load[64 + 2 * i];
+ vmx[i][1] = load[65 + 2 * i];
+ }
+}
+
+void loadvsx(void *p, int tmp);
+void storevsx(void *p, int tmp);
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace.h b/tools/testing/selftests/powerpc/ptrace/ptrace.h
new file mode 100644
index 000000000000..19fb825270a1
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace.h
@@ -0,0 +1,711 @@
+/*
+ * Ptrace interface test helper functions
+ *
+ * Copyright (C) 2015 Anshuman Khandual, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <inttypes.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <malloc.h>
+#include <errno.h>
+#include <time.h>
+#include <sys/ptrace.h>
+#include <sys/ioctl.h>
+#include <sys/uio.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/signal.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#include <sys/user.h>
+#include <linux/elf.h>
+#include <linux/types.h>
+#include <linux/auxvec.h>
+#include "reg.h"
+#include "utils.h"
+
+#define TEST_PASS 0
+#define TEST_FAIL 1
+
+struct fpr_regs {
+ unsigned long fpr[32];
+ unsigned long fpscr;
+};
+
+struct tm_spr_regs {
+ unsigned long tm_tfhar;
+ unsigned long tm_texasr;
+ unsigned long tm_tfiar;
+};
+
+#ifndef NT_PPC_TAR
+#define NT_PPC_TAR 0x103
+#define NT_PPC_PPR 0x104
+#define NT_PPC_DSCR 0x105
+#define NT_PPC_EBB 0x106
+#define NT_PPC_PMU 0x107
+#define NT_PPC_TM_CGPR 0x108
+#define NT_PPC_TM_CFPR 0x109
+#define NT_PPC_TM_CVMX 0x10a
+#define NT_PPC_TM_CVSX 0x10b
+#define NT_PPC_TM_SPR 0x10c
+#define NT_PPC_TM_CTAR 0x10d
+#define NT_PPC_TM_CPPR 0x10e
+#define NT_PPC_TM_CDSCR 0x10f
+#endif
+
+/* Basic ptrace operations */
+int start_trace(pid_t child)
+{
+ int ret;
+
+ ret = ptrace(PTRACE_ATTACH, child, NULL, NULL);
+ if (ret) {
+ perror("ptrace(PTRACE_ATTACH) failed");
+ return TEST_FAIL;
+ }
+ ret = waitpid(child, NULL, 0);
+ if (ret != child) {
+ perror("waitpid() failed");
+ return TEST_FAIL;
+ }
+ return TEST_PASS;
+}
+
+int stop_trace(pid_t child)
+{
+ int ret;
+
+ ret = ptrace(PTRACE_DETACH, child, NULL, NULL);
+ if (ret) {
+ perror("ptrace(PTRACE_DETACH) failed");
+ return TEST_FAIL;
+ }
+ return TEST_PASS;
+}
+
+int cont_trace(pid_t child)
+{
+ int ret;
+
+ ret = ptrace(PTRACE_CONT, child, NULL, NULL);
+ if (ret) {
+ perror("ptrace(PTRACE_CONT) failed");
+ return TEST_FAIL;
+ }
+ return TEST_PASS;
+}
+
+/* TAR, PPR, DSCR */
+int show_tar_registers(pid_t child, unsigned long *out)
+{
+ struct iovec iov;
+ unsigned long *reg;
+ int ret;
+
+ reg = malloc(sizeof(unsigned long));
+ if (!reg) {
+ perror("malloc() failed");
+ return TEST_FAIL;
+ }
+ iov.iov_base = (u64 *) reg;
+ iov.iov_len = sizeof(unsigned long);
+
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TAR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ goto fail;
+ }
+ if (out)
+ out[0] = *reg;
+
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_PPR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ goto fail;
+ }
+ if (out)
+ out[1] = *reg;
+
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_DSCR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ goto fail;
+ }
+ if (out)
+ out[2] = *reg;
+
+ free(reg);
+ return TEST_PASS;
+fail:
+ free(reg);
+ return TEST_FAIL;
+}
+
+int write_tar_registers(pid_t child, unsigned long tar,
+ unsigned long ppr, unsigned long dscr)
+{
+ struct iovec iov;
+ unsigned long *reg;
+ int ret;
+
+ reg = malloc(sizeof(unsigned long));
+ if (!reg) {
+ perror("malloc() failed");
+ return TEST_FAIL;
+ }
+
+ iov.iov_base = (u64 *) reg;
+ iov.iov_len = sizeof(unsigned long);
+
+ *reg = tar;
+ ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TAR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_SETREGSET) failed");
+ goto fail;
+ }
+
+ *reg = ppr;
+ ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_PPR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_SETREGSET) failed");
+ goto fail;
+ }
+
+ *reg = dscr;
+ ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_DSCR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_SETREGSET) failed");
+ goto fail;
+ }
+
+ free(reg);
+ return TEST_PASS;
+fail:
+ free(reg);
+ return TEST_FAIL;
+}
+
+int show_tm_checkpointed_state(pid_t child, unsigned long *out)
+{
+ struct iovec iov;
+ unsigned long *reg;
+ int ret;
+
+ reg = malloc(sizeof(unsigned long));
+ if (!reg) {
+ perror("malloc() failed");
+ return TEST_FAIL;
+ }
+
+ iov.iov_base = (u64 *) reg;
+ iov.iov_len = sizeof(unsigned long);
+
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CTAR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ goto fail;
+ }
+ if (out)
+ out[0] = *reg;
+
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CPPR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ goto fail;
+ }
+ if (out)
+ out[1] = *reg;
+
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CDSCR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ goto fail;
+ }
+ if (out)
+ out[2] = *reg;
+
+ free(reg);
+ return TEST_PASS;
+
+fail:
+ free(reg);
+ return TEST_FAIL;
+}
+
+int write_ckpt_tar_registers(pid_t child, unsigned long tar,
+ unsigned long ppr, unsigned long dscr)
+{
+ struct iovec iov;
+ unsigned long *reg;
+ int ret;
+
+ reg = malloc(sizeof(unsigned long));
+ if (!reg) {
+ perror("malloc() failed");
+ return TEST_FAIL;
+ }
+
+ iov.iov_base = (u64 *) reg;
+ iov.iov_len = sizeof(unsigned long);
+
+ *reg = tar;
+ ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CTAR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ goto fail;
+ }
+
+ *reg = ppr;
+ ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CPPR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ goto fail;
+ }
+
+ *reg = dscr;
+ ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CDSCR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ goto fail;
+ }
+
+ free(reg);
+ return TEST_PASS;
+fail:
+ free(reg);
+ return TEST_FAIL;
+}
+
+/* FPR */
+int show_fpr(pid_t child, unsigned long *fpr)
+{
+ struct fpr_regs *regs;
+ int ret, i;
+
+ regs = (struct fpr_regs *) malloc(sizeof(struct fpr_regs));
+ ret = ptrace(PTRACE_GETFPREGS, child, NULL, regs);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ return TEST_FAIL;
+ }
+
+ if (fpr) {
+ for (i = 0; i < 32; i++)
+ fpr[i] = regs->fpr[i];
+ }
+ return TEST_PASS;
+}
+
+int write_fpr(pid_t child, unsigned long val)
+{
+ struct fpr_regs *regs;
+ int ret, i;
+
+ regs = (struct fpr_regs *) malloc(sizeof(struct fpr_regs));
+ ret = ptrace(PTRACE_GETFPREGS, child, NULL, regs);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ return TEST_FAIL;
+ }
+
+ for (i = 0; i < 32; i++)
+ regs->fpr[i] = val;
+
+ ret = ptrace(PTRACE_SETFPREGS, child, NULL, regs);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ return TEST_FAIL;
+ }
+ return TEST_PASS;
+}
+
+int show_ckpt_fpr(pid_t child, unsigned long *fpr)
+{
+ struct fpr_regs *regs;
+ struct iovec iov;
+ int ret, i;
+
+ regs = (struct fpr_regs *) malloc(sizeof(struct fpr_regs));
+ iov.iov_base = regs;
+ iov.iov_len = sizeof(struct fpr_regs);
+
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CFPR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ return TEST_FAIL;
+ }
+
+ if (fpr) {
+ for (i = 0; i < 32; i++)
+ fpr[i] = regs->fpr[i];
+ }
+
+ return TEST_PASS;
+}
+
+int write_ckpt_fpr(pid_t child, unsigned long val)
+{
+ struct fpr_regs *regs;
+ struct iovec iov;
+ int ret, i;
+
+ regs = (struct fpr_regs *) malloc(sizeof(struct fpr_regs));
+ iov.iov_base = regs;
+ iov.iov_len = sizeof(struct fpr_regs);
+
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CFPR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ return TEST_FAIL;
+ }
+
+ for (i = 0; i < 32; i++)
+ regs->fpr[i] = val;
+
+ ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CFPR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ return TEST_FAIL;
+ }
+ return TEST_PASS;
+}
+
+/* GPR */
+int show_gpr(pid_t child, unsigned long *gpr)
+{
+ struct pt_regs *regs;
+ int ret, i;
+
+ regs = (struct pt_regs *) malloc(sizeof(struct pt_regs));
+ if (!regs) {
+ perror("malloc() failed");
+ return TEST_FAIL;
+ }
+
+ ret = ptrace(PTRACE_GETREGS, child, NULL, regs);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ return TEST_FAIL;
+ }
+
+ if (gpr) {
+ for (i = 14; i < 32; i++)
+ gpr[i-14] = regs->gpr[i];
+ }
+
+ return TEST_PASS;
+}
+
+int write_gpr(pid_t child, unsigned long val)
+{
+ struct pt_regs *regs;
+ int i, ret;
+
+ regs = (struct pt_regs *) malloc(sizeof(struct pt_regs));
+ if (!regs) {
+ perror("malloc() failed");
+ return TEST_FAIL;
+ }
+
+ ret = ptrace(PTRACE_GETREGS, child, NULL, regs);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ return TEST_FAIL;
+ }
+
+ for (i = 14; i < 32; i++)
+ regs->gpr[i] = val;
+
+ ret = ptrace(PTRACE_SETREGS, child, NULL, regs);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ return TEST_FAIL;
+ }
+ return TEST_PASS;
+}
+
+int show_ckpt_gpr(pid_t child, unsigned long *gpr)
+{
+ struct pt_regs *regs;
+ struct iovec iov;
+ int ret, i;
+
+ regs = (struct pt_regs *) malloc(sizeof(struct pt_regs));
+ if (!regs) {
+ perror("malloc() failed");
+ return TEST_FAIL;
+ }
+
+ iov.iov_base = (u64 *) regs;
+ iov.iov_len = sizeof(struct pt_regs);
+
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CGPR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ return TEST_FAIL;
+ }
+
+ if (gpr) {
+ for (i = 14; i < 32; i++)
+ gpr[i-14] = regs->gpr[i];
+ }
+
+ return TEST_PASS;
+}
+
+int write_ckpt_gpr(pid_t child, unsigned long val)
+{
+ struct pt_regs *regs;
+ struct iovec iov;
+ int ret, i;
+
+ regs = (struct pt_regs *) malloc(sizeof(struct pt_regs));
+ if (!regs) {
+ perror("malloc() failed\n");
+ return TEST_FAIL;
+ }
+ iov.iov_base = (u64 *) regs;
+ iov.iov_len = sizeof(struct pt_regs);
+
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CGPR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ return TEST_FAIL;
+ }
+
+ for (i = 14; i < 32; i++)
+ regs->gpr[i] = val;
+
+ ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CGPR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ return TEST_FAIL;
+ }
+ return TEST_PASS;
+}
+
+/* VMX */
+int show_vmx(pid_t child, unsigned long vmx[][2])
+{
+ int ret;
+
+ ret = ptrace(PTRACE_GETVRREGS, child, 0, vmx);
+ if (ret) {
+ perror("ptrace(PTRACE_GETVRREGS) failed");
+ return TEST_FAIL;
+ }
+ return TEST_PASS;
+}
+
+int show_vmx_ckpt(pid_t child, unsigned long vmx[][2])
+{
+ unsigned long regs[34][2];
+ struct iovec iov;
+ int ret;
+
+ iov.iov_base = (u64 *) regs;
+ iov.iov_len = sizeof(regs);
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CVMX, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET, NT_PPC_TM_CVMX) failed");
+ return TEST_FAIL;
+ }
+ memcpy(vmx, regs, sizeof(regs));
+ return TEST_PASS;
+}
+
+
+int write_vmx(pid_t child, unsigned long vmx[][2])
+{
+ int ret;
+
+ ret = ptrace(PTRACE_SETVRREGS, child, 0, vmx);
+ if (ret) {
+ perror("ptrace(PTRACE_SETVRREGS) failed");
+ return TEST_FAIL;
+ }
+ return TEST_PASS;
+}
+
+int write_vmx_ckpt(pid_t child, unsigned long vmx[][2])
+{
+ unsigned long regs[34][2];
+ struct iovec iov;
+ int ret;
+
+ memcpy(regs, vmx, sizeof(regs));
+ iov.iov_base = (u64 *) regs;
+ iov.iov_len = sizeof(regs);
+ ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CVMX, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_SETREGSET, NT_PPC_TM_CVMX) failed");
+ return TEST_FAIL;
+ }
+ return TEST_PASS;
+}
+
+/* VSX */
+int show_vsx(pid_t child, unsigned long *vsx)
+{
+ int ret;
+
+ ret = ptrace(PTRACE_GETVSRREGS, child, 0, vsx);
+ if (ret) {
+ perror("ptrace(PTRACE_GETVSRREGS) failed");
+ return TEST_FAIL;
+ }
+ return TEST_PASS;
+}
+
+int show_vsx_ckpt(pid_t child, unsigned long *vsx)
+{
+ unsigned long regs[32];
+ struct iovec iov;
+ int ret;
+
+ iov.iov_base = (u64 *) regs;
+ iov.iov_len = sizeof(regs);
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_CVSX, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET, NT_PPC_TM_CVSX) failed");
+ return TEST_FAIL;
+ }
+ memcpy(vsx, regs, sizeof(regs));
+ return TEST_PASS;
+}
+
+int write_vsx(pid_t child, unsigned long *vsx)
+{
+ int ret;
+
+ ret = ptrace(PTRACE_SETVSRREGS, child, 0, vsx);
+ if (ret) {
+ perror("ptrace(PTRACE_SETVSRREGS) failed");
+ return TEST_FAIL;
+ }
+ return TEST_PASS;
+}
+
+int write_vsx_ckpt(pid_t child, unsigned long *vsx)
+{
+ unsigned long regs[32];
+ struct iovec iov;
+ int ret;
+
+ memcpy(regs, vsx, sizeof(regs));
+ iov.iov_base = (u64 *) regs;
+ iov.iov_len = sizeof(regs);
+ ret = ptrace(PTRACE_SETREGSET, child, NT_PPC_TM_CVSX, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_SETREGSET, NT_PPC_TM_CVSX) failed");
+ return TEST_FAIL;
+ }
+ return TEST_PASS;
+}
+
+/* TM SPR */
+int show_tm_spr(pid_t child, struct tm_spr_regs *out)
+{
+ struct tm_spr_regs *regs;
+ struct iovec iov;
+ int ret;
+
+ regs = (struct tm_spr_regs *) malloc(sizeof(struct tm_spr_regs));
+ if (!regs) {
+ perror("malloc() failed");
+ return TEST_FAIL;
+ }
+
+ iov.iov_base = (u64 *) regs;
+ iov.iov_len = sizeof(struct tm_spr_regs);
+
+ ret = ptrace(PTRACE_GETREGSET, child, NT_PPC_TM_SPR, &iov);
+ if (ret) {
+ perror("ptrace(PTRACE_GETREGSET) failed");
+ return TEST_FAIL;
+ }
+
+ if (out)
+ memcpy(out, regs, sizeof(struct tm_spr_regs));
+
+ return TEST_PASS;
+}
+
+
+
+/* Analyse TEXASR after TM failure */
+inline unsigned long get_tfiar(void)
+{
+ unsigned long ret;
+
+ asm volatile("mfspr %0,%1" : "=r" (ret) : "i" (SPRN_TFIAR));
+ return ret;
+}
+
+void analyse_texasr(unsigned long texasr)
+{
+ printf("TEXASR: %16lx\t", texasr);
+
+ if (texasr & TEXASR_FP)
+ printf("TEXASR_FP ");
+
+ if (texasr & TEXASR_DA)
+ printf("TEXASR_DA ");
+
+ if (texasr & TEXASR_NO)
+ printf("TEXASR_NO ");
+
+ if (texasr & TEXASR_FO)
+ printf("TEXASR_FO ");
+
+ if (texasr & TEXASR_SIC)
+ printf("TEXASR_SIC ");
+
+ if (texasr & TEXASR_NTC)
+ printf("TEXASR_NTC ");
+
+ if (texasr & TEXASR_TC)
+ printf("TEXASR_TC ");
+
+ if (texasr & TEXASR_TIC)
+ printf("TEXASR_TIC ");
+
+ if (texasr & TEXASR_IC)
+ printf("TEXASR_IC ");
+
+ if (texasr & TEXASR_IFC)
+ printf("TEXASR_IFC ");
+
+ if (texasr & TEXASR_ABT)
+ printf("TEXASR_ABT ");
+
+ if (texasr & TEXASR_SPD)
+ printf("TEXASR_SPD ");
+
+ if (texasr & TEXASR_HV)
+ printf("TEXASR_HV ");
+
+ if (texasr & TEXASR_PR)
+ printf("TEXASR_PR ");
+
+ if (texasr & TEXASR_FS)
+ printf("TEXASR_FS ");
+
+ if (texasr & TEXASR_TE)
+ printf("TEXASR_TE ");
+
+ if (texasr & TEXASR_ROT)
+ printf("TEXASR_ROT ");
+
+ printf("TFIAR :%lx\n", get_tfiar());
+}
+
+void store_gpr(unsigned long *addr);
+void store_fpr(float *addr);
diff --git a/tools/testing/selftests/powerpc/reg.h b/tools/testing/selftests/powerpc/reg.h
deleted file mode 100644
index fddf368ed82f..000000000000
--- a/tools/testing/selftests/powerpc/reg.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright 2014, Michael Ellerman, IBM Corp.
- * Licensed under GPLv2.
- */
-
-#ifndef _SELFTESTS_POWERPC_REG_H
-#define _SELFTESTS_POWERPC_REG_H
-
-#define __stringify_1(x) #x
-#define __stringify(x) __stringify_1(x)
-
-#define mfspr(rn) ({unsigned long rval; \
- asm volatile("mfspr %0," _str(rn) \
- : "=r" (rval)); rval; })
-#define mtspr(rn, v) asm volatile("mtspr " _str(rn) ",%0" : \
- : "r" ((unsigned long)(v)) \
- : "memory")
-
-#define mb() asm volatile("sync" : : : "memory");
-
-#define SPRN_MMCR2 769
-#define SPRN_MMCRA 770
-#define SPRN_MMCR0 779
-#define MMCR0_PMAO 0x00000080
-#define MMCR0_PMAE 0x04000000
-#define MMCR0_FC 0x80000000
-#define SPRN_EBBHR 804
-#define SPRN_EBBRR 805
-#define SPRN_BESCR 806 /* Branch event status & control register */
-#define SPRN_BESCRS 800 /* Branch event status & control set (1 bits set to 1) */
-#define SPRN_BESCRSU 801 /* Branch event status & control set upper */
-#define SPRN_BESCRR 802 /* Branch event status & control REset (1 bits set to 0) */
-#define SPRN_BESCRRU 803 /* Branch event status & control REset upper */
-
-#define BESCR_PMEO 0x1 /* PMU Event-based exception Occurred */
-#define BESCR_PME (0x1ul << 32) /* PMU Event-based exception Enable */
-#define BESCR_LME (0x1ul << 34) /* Load Monitor Enable */
-#define BESCR_LMEO (0x1ul << 2) /* Load Monitor Exception Occurred */
-
-#define SPRN_LMRR 813 /* Load Monitor Region Register */
-#define SPRN_LMSER 814 /* Load Monitor Section Enable Register */
-
-#define SPRN_PMC1 771
-#define SPRN_PMC2 772
-#define SPRN_PMC3 773
-#define SPRN_PMC4 774
-#define SPRN_PMC5 775
-#define SPRN_PMC6 776
-
-#define SPRN_SIAR 780
-#define SPRN_SDAR 781
-#define SPRN_SIER 768
-
-#define SPRN_TEXASR 0x82
-#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
-#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
-#define TEXASR_FS 0x08000000
-#define SPRN_TAR 0x32f
-
-#endif /* _SELFTESTS_POWERPC_REG_H */
diff --git a/tools/testing/selftests/powerpc/signal/signal.S b/tools/testing/selftests/powerpc/signal/signal.S
index 7043d521df0a..322f2f1fc327 100644
--- a/tools/testing/selftests/powerpc/signal/signal.S
+++ b/tools/testing/selftests/powerpc/signal/signal.S
@@ -7,7 +7,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#include "../basic_asm.h"
+#include "basic_asm.h"
/* long signal_self(pid_t pid, int sig); */
FUNC_START(signal_self)
diff --git a/tools/testing/selftests/powerpc/stringloops/memcmp.c b/tools/testing/selftests/powerpc/stringloops/memcmp.c
index 17417dd70708..30b1222380ca 100644
--- a/tools/testing/selftests/powerpc/stringloops/memcmp.c
+++ b/tools/testing/selftests/powerpc/stringloops/memcmp.c
@@ -1,7 +1,7 @@
#include <malloc.h>
#include <stdlib.h>
#include <string.h>
-#include "../utils.h"
+#include "utils.h"
#define SIZE 256
#define ITERATIONS 10000
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal.S b/tools/testing/selftests/powerpc/tm/tm-signal.S
index 4e13e8b3a96f..506a4ebaf3ae 100644
--- a/tools/testing/selftests/powerpc/tm/tm-signal.S
+++ b/tools/testing/selftests/powerpc/tm/tm-signal.S
@@ -7,11 +7,11 @@
* 2 of the License, or (at your option) any later version.
*/
-#include "../basic_asm.h"
-#include "../gpr_asm.h"
-#include "../fpu_asm.h"
-#include "../vmx_asm.h"
-#include "../vsx_asm.h"
+#include "basic_asm.h"
+#include "gpr_asm.h"
+#include "fpu_asm.h"
+#include "vmx_asm.h"
+#include "vsx_asm.h"
/*
* Large caveat here being that the caller cannot expect the
diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h
index 2c8da74304e7..0ffff04433c5 100644
--- a/tools/testing/selftests/powerpc/tm/tm.h
+++ b/tools/testing/selftests/powerpc/tm/tm.h
@@ -10,7 +10,7 @@
#include <asm/cputable.h>
#include <stdbool.h>
-#include "../utils.h"
+#include "utils.h"
static inline bool have_htm(void)
{