summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/alpha/Kconfig4
-rw-r--r--arch/alpha/boot/bootp.c1
-rw-r--r--arch/alpha/boot/bootpz.c1
-rw-r--r--arch/alpha/boot/main.c1
-rw-r--r--arch/alpha/boot/tools/objstrip.c2
-rw-r--r--arch/alpha/include/asm/cacheflush.h32
-rw-r--r--arch/alpha/include/asm/io.h75
-rw-r--r--arch/alpha/include/asm/pgtable.h16
-rw-r--r--arch/alpha/kernel/io.c60
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/alpha/kernel/pci_iommu.c2
-rw-r--r--arch/alpha/kernel/process.c1
-rw-r--r--arch/alpha/kernel/proto.h2
-rw-r--r--arch/alpha/kernel/ptrace.c1
-rw-r--r--arch/alpha/kernel/setup.c24
-rw-r--r--arch/alpha/kernel/smp.c3
-rw-r--r--arch/alpha/kernel/sys_alcor.c1
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c1
-rw-r--r--arch/alpha/kernel/sys_dp264.c1
-rw-r--r--arch/alpha/kernel/sys_eb64p.c1
-rw-r--r--arch/alpha/kernel/sys_eiger.c3
-rw-r--r--arch/alpha/kernel/sys_jensen.c1
-rw-r--r--arch/alpha/kernel/sys_marvel.c1
-rw-r--r--arch/alpha/kernel/sys_miata.c1
-rw-r--r--arch/alpha/kernel/sys_mikasa.c1
-rw-r--r--arch/alpha/kernel/sys_nautilus.c1
-rw-r--r--arch/alpha/kernel/sys_noritake.c1
-rw-r--r--arch/alpha/kernel/sys_rawhide.c1
-rw-r--r--arch/alpha/kernel/sys_ruffian.c1
-rw-r--r--arch/alpha/kernel/sys_rx164.c1
-rw-r--r--arch/alpha/kernel/sys_sable.c1
-rw-r--r--arch/alpha/kernel/sys_sio.c1
-rw-r--r--arch/alpha/kernel/sys_sx164.c1
-rw-r--r--arch/alpha/kernel/sys_takara.c1
-rw-r--r--arch/alpha/kernel/sys_titan.c1
-rw-r--r--arch/alpha/kernel/sys_wildfire.c1
-rw-r--r--arch/alpha/kernel/traps.c26
-rw-r--r--arch/alpha/mm/fault.c12
-rw-r--r--arch/alpha/mm/init.c1
-rw-r--r--arch/arc/include/asm/bug.h3
-rw-r--r--arch/arc/include/asm/pgtable.h24
-rw-r--r--arch/arc/kernel/process.c4
-rw-r--r--arch/arc/kernel/stacktrace.c17
-rw-r--r--arch/arc/kernel/troubleshoot.c6
-rw-r--r--arch/arc/mm/fault.c6
-rw-r--r--arch/arc/mm/highmem.c12
-rw-r--r--arch/arc/mm/tlbex.S2
-rw-r--r--arch/arm/Kconfig9
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/head.S62
-rwxr-xr-xarch/arm/boot/deflate_xip_data.sh2
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi54
-rw-r--r--arch/arm/boot/dts/at91sam9rl.dtsi54
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi54
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi120
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi107
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi120
-rw-r--r--arch/arm/crypto/Kconfig12
-rw-r--r--arch/arm/include/asm/bug.h3
-rw-r--r--arch/arm/include/asm/cacheflush.h7
-rw-r--r--arch/arm/include/asm/efi.h1
-rw-r--r--arch/arm/include/asm/fixmap.h2
-rw-r--r--arch/arm/include/asm/idmap.h2
-rw-r--r--arch/arm/include/asm/pgtable-2level.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h7
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h3
-rw-r--r--arch/arm/include/asm/pgtable.h25
-rw-r--r--arch/arm/include/asm/traps.h3
-rw-r--r--arch/arm/include/asm/unwind.h3
-rw-r--r--arch/arm/kernel/elf.c27
-rw-r--r--arch/arm/kernel/fiq.c4
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/machine_kexec.c1
-rw-r--r--arch/arm/kernel/module.c1
-rw-r--r--arch/arm/kernel/process.c4
-rw-r--r--arch/arm/kernel/ptrace.c1
-rw-r--r--arch/arm/kernel/smp.c1
-rw-r--r--arch/arm/kernel/suspend.c2
-rw-r--r--arch/arm/kernel/swp_emulate.c4
-rw-r--r--arch/arm/kernel/traps.c41
-rw-r--r--arch/arm/kernel/unwind.c5
-rw-r--r--arch/arm/kernel/vdso.c2
-rw-r--r--arch/arm/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm/lib/backtrace-clang.S9
-rw-r--r--arch/arm/lib/backtrace.S14
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c16
-rw-r--r--arch/arm/mach-ebsa110/core.c1
-rw-r--r--arch/arm/mach-footbridge/common.c1
-rw-r--r--arch/arm/mach-imx/mm-imx21.c1
-rw-r--r--arch/arm/mach-imx/mm-imx27.c1
-rw-r--r--arch/arm/mach-imx/mm-imx3.c1
-rw-r--r--arch/arm/mach-integrator/core.c2
-rw-r--r--arch/arm/mach-iop32x/i2c.c1
-rw-r--r--arch/arm/mach-iop32x/iq31244.c1
-rw-r--r--arch/arm/mach-iop32x/iq80321.c1
-rw-r--r--arch/arm/mach-iop32x/n2100.c1
-rw-r--r--arch/arm/mach-ixp4xx/common.c1
-rw-r--r--arch/arm/mach-keystone/platsmp.c2
-rw-r--r--arch/arm/mach-mmp/Kconfig2
-rw-r--r--arch/arm/mach-mmp/Makefile6
-rw-r--r--arch/arm/mach-mmp/clock-mmp2.c114
-rw-r--r--arch/arm/mach-mmp/clock-pxa168.c94
-rw-r--r--arch/arm/mach-mmp/clock-pxa910.c70
-rw-r--r--arch/arm/mach-mmp/clock.c105
-rw-r--r--arch/arm/mach-mmp/clock.h65
-rw-r--r--arch/arm/mach-mmp/pxa168.c1
-rw-r--r--arch/arm/mach-mmp/time.c1
-rw-r--r--arch/arm/mach-sa1100/assabet.c3
-rw-r--r--arch/arm/mach-sa1100/hackkit.c2
-rw-r--r--arch/arm/mach-tegra/iomap.h2
-rw-r--r--arch/arm/mach-vt8500/Kconfig1
-rw-r--r--arch/arm/mach-zynq/common.c2
-rw-r--r--arch/arm/mm/copypage-v4mc.c1
-rw-r--r--arch/arm/mm/copypage-v6.c1
-rw-r--r--arch/arm/mm/copypage-xscale.c1
-rw-r--r--arch/arm/mm/dump.c1
-rw-r--r--arch/arm/mm/fault-armv.c1
-rw-r--r--arch/arm/mm/fault.c9
-rw-r--r--arch/arm/mm/highmem.c4
-rw-r--r--arch/arm/mm/idmap.c2
-rw-r--r--arch/arm/mm/ioremap.c31
-rw-r--r--arch/arm/mm/mm.h8
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/mm/pageattr.c1
-rw-r--r--arch/arm/mm/proc-arm1020.S2
-rw-r--r--arch/arm/mm/proc-arm1020e.S2
-rw-r--r--arch/arm/mm/proc-arm1022.S2
-rw-r--r--arch/arm/mm/proc-arm1026.S2
-rw-r--r--arch/arm/mm/proc-arm720.S2
-rw-r--r--arch/arm/mm/proc-arm740.S2
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm922.S2
-rw-r--r--arch/arm/mm/proc-arm925.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-arm940.S2
-rw-r--r--arch/arm/mm/proc-arm946.S2
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-fa526.S2
-rw-r--r--arch/arm/mm/proc-feroceon.S2
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa110.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/mm/pv-fixup-asm.S2
-rw-r--r--arch/arm64/Kconfig11
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi8
-rw-r--r--arch/arm64/include/asm/acpi.h5
-rw-r--r--arch/arm64/include/asm/atomic.h6
-rw-r--r--arch/arm64/include/asm/barrier.h16
-rw-r--r--arch/arm64/include/asm/cacheflush.h46
-rw-r--r--arch/arm64/include/asm/elf.h23
-rw-r--r--arch/arm64/include/asm/io.h2
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h2
-rw-r--r--arch/arm64/include/asm/kvm_asm.h33
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h6
-rw-r--r--arch/arm64/include/asm/kvm_host.h9
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h22
-rw-r--r--arch/arm64/include/asm/mmu_context.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h40
-rw-r--r--arch/arm64/include/asm/stacktrace.h3
-rw-r--r--arch/arm64/include/asm/stage2_pgtable.h2
-rw-r--r--arch/arm64/include/asm/vmap_stack.h2
-rw-r--r--arch/arm64/kernel/acpi.c2
-rw-r--r--arch/arm64/kernel/debug-monitors.c2
-rw-r--r--arch/arm64/kernel/ftrace.c3
-rw-r--r--arch/arm64/kernel/head.S2
-rw-r--r--arch/arm64/kernel/hibernate.c5
-rw-r--r--arch/arm64/kernel/kaslr.c2
-rw-r--r--arch/arm64/kernel/pci.c4
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/kernel/ptrace.c1
-rw-r--r--arch/arm64/kernel/setup.c4
-rw-r--r--arch/arm64/kernel/smp.c1
-rw-r--r--arch/arm64/kernel/suspend.c2
-rw-r--r--arch/arm64/kernel/traps.c21
-rw-r--r--arch/arm64/kernel/vdso.c8
-rw-r--r--arch/arm64/kernel/vdso32/Makefile8
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm64/kvm/aarch32.c28
-rw-r--r--arch/arm64/kvm/arm.c25
-rw-r--r--arch/arm64/kvm/handle_exit.c32
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c4
-rw-r--r--arch/arm64/kvm/hyp/switch.c65
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c8
-rw-r--r--arch/arm64/kvm/mmu.c14
-rw-r--r--arch/arm64/kvm/pmu.c8
-rw-r--r--arch/arm64/kvm/sys_regs.c25
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c10
-rw-r--r--arch/arm64/lib/csum.c20
-rw-r--r--arch/arm64/mm/dump.c1
-rw-r--r--arch/arm64/mm/fault.c9
-rw-r--r--arch/arm64/mm/kasan_init.c3
-rw-r--r--arch/arm64/mm/mmu.c8
-rw-r--r--arch/arm64/mm/pageattr.c1
-rw-r--r--arch/arm64/mm/proc.S2
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/cacheflush.h19
-rw-r--r--arch/c6x/include/asm/pgtable.h3
-rw-r--r--arch/c6x/kernel/traps.c16
-rw-r--r--arch/csky/include/asm/io.h2
-rw-r--r--arch/csky/include/asm/pgtable.h33
-rw-r--r--arch/csky/kernel/module.c1
-rw-r--r--arch/csky/kernel/ptrace.c5
-rw-r--r--arch/csky/kernel/stacktrace.c6
-rw-r--r--arch/csky/kernel/vdso.c4
-rw-r--r--arch/csky/mm/fault.c10
-rw-r--r--arch/csky/mm/highmem.c2
-rw-r--r--arch/csky/mm/init.c7
-rw-r--r--arch/csky/mm/tlb.c1
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/boot/compressed/Makefile2
-rw-r--r--arch/h8300/include/asm/pgtable.h1
-rw-r--r--arch/h8300/kernel/process.c1
-rw-r--r--arch/h8300/kernel/setup.c1
-rw-r--r--arch/h8300/kernel/signal.c1
-rw-r--r--arch/h8300/kernel/traps.c12
-rw-r--r--arch/h8300/mm/fault.c1
-rw-r--r--arch/h8300/mm/init.c1
-rw-r--r--arch/h8300/mm/memory.c1
-rw-r--r--arch/hexagon/Makefile2
-rw-r--r--arch/hexagon/include/asm/cacheflush.h19
-rw-r--r--arch/hexagon/include/asm/fixmap.h4
-rw-r--r--arch/hexagon/include/asm/pgtable.h55
-rw-r--r--arch/hexagon/kernel/traps.c25
-rw-r--r--arch/hexagon/kernel/vdso.c4
-rw-r--r--arch/hexagon/mm/uaccess.c2
-rw-r--r--arch/hexagon/mm/vm_fault.c9
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/include/asm/cacheflush.h30
-rw-r--r--arch/ia64/include/asm/pgtable.h34
-rw-r--r--arch/ia64/include/asm/ptrace.h1
-rw-r--r--arch/ia64/include/asm/uaccess.h2
-rw-r--r--arch/ia64/kernel/efi.c1
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/head.S3
-rw-r--r--arch/ia64/kernel/irq_ia64.c2
-rw-r--r--arch/ia64/kernel/ivt.S2
-rw-r--r--arch/ia64/kernel/kprobes.c2
-rw-r--r--arch/ia64/kernel/mca.c2
-rw-r--r--arch/ia64/kernel/mca_asm.S2
-rw-r--r--arch/ia64/kernel/perfmon.c8
-rw-r--r--arch/ia64/kernel/process.c17
-rw-r--r--arch/ia64/kernel/ptrace.c1
-rw-r--r--arch/ia64/kernel/relocate_kernel.S4
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/ia64/kernel/smp.c1
-rw-r--r--arch/ia64/kernel/smpboot.c1
-rw-r--r--arch/ia64/kernel/uncached.c2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S2
-rw-r--r--arch/ia64/mm/contig.c1
-rw-r--r--arch/ia64/mm/fault.c17
-rw-r--r--arch/ia64/mm/init.c12
-rw-r--r--arch/m68k/68000/m68EZ328.c2
-rw-r--r--arch/m68k/68000/m68VZ328.c2
-rw-r--r--arch/m68k/68000/timers.c1
-rw-r--r--arch/m68k/Kconfig.cpu2
-rw-r--r--arch/m68k/Makefile8
-rw-r--r--arch/m68k/amiga/config.c1
-rw-r--r--arch/m68k/apollo/config.c1
-rw-r--r--arch/m68k/atari/atasound.c1
-rw-r--r--arch/m68k/atari/stram.c1
-rw-r--r--arch/m68k/bvme6000/config.c1
-rw-r--r--arch/m68k/coldfire/pci.c4
-rw-r--r--arch/m68k/configs/stmark2_defconfig1
-rw-r--r--arch/m68k/include/asm/cacheflush_mm.h6
-rw-r--r--arch/m68k/include/asm/cacheflush_no.h19
-rw-r--r--arch/m68k/include/asm/mcf_pgtable.h63
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h8
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h84
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h1
-rw-r--r--arch/m68k/include/asm/pgtable_no.h2
-rw-r--r--arch/m68k/include/asm/sun3_pgtable.h24
-rw-r--r--arch/m68k/include/asm/sun3xflop.h2
-rw-r--r--arch/m68k/include/asm/uaccess_no.h6
-rw-r--r--arch/m68k/kernel/head.S2
-rw-r--r--arch/m68k/kernel/process.c1
-rw-r--r--arch/m68k/kernel/ptrace.c1
-rw-r--r--arch/m68k/kernel/setup_no.c1
-rw-r--r--arch/m68k/kernel/signal.c1
-rw-r--r--arch/m68k/kernel/sys_m68k.c14
-rw-r--r--arch/m68k/kernel/traps.c13
-rw-r--r--arch/m68k/kernel/uboot.c1
-rw-r--r--arch/m68k/mac/config.c1
-rw-r--r--arch/m68k/mm/cache.c13
-rw-r--r--arch/m68k/mm/fault.c10
-rw-r--r--arch/m68k/mm/init.c2
-rw-r--r--arch/m68k/mm/mcfmmu.c1
-rw-r--r--arch/m68k/mm/motorola.c25
-rw-r--r--arch/m68k/mm/sun3kmap.c1
-rw-r--r--arch/m68k/mm/sun3mmu.c1
-rw-r--r--arch/m68k/mvme147/config.c1
-rw-r--r--arch/m68k/mvme16x/config.c1
-rw-r--r--arch/m68k/q40/config.c1
-rw-r--r--arch/m68k/sun3/config.c1
-rw-r--r--arch/m68k/sun3/dvma.c1
-rw-r--r--arch/m68k/sun3/mmu_emu.c1
-rw-r--r--arch/m68k/sun3/sun3dvma.c1
-rw-r--r--arch/m68k/sun3x/dvma.c1
-rw-r--r--arch/m68k/sun3x/prom.c1
-rw-r--r--arch/microblaze/include/asm/cacheflush.h29
-rw-r--r--arch/microblaze/include/asm/pgalloc.h2
-rw-r--r--arch/microblaze/include/asm/pgtable.h23
-rw-r--r--arch/microblaze/include/asm/uaccess.h2
-rw-r--r--arch/microblaze/include/asm/unwind.h3
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S2
-rw-r--r--arch/microblaze/kernel/module.c2
-rw-r--r--arch/microblaze/kernel/setup.c2
-rw-r--r--arch/microblaze/kernel/signal.c9
-rw-r--r--arch/microblaze/kernel/stacktrace.c4
-rw-r--r--arch/microblaze/kernel/traps.c12
-rw-r--r--arch/microblaze/kernel/unwind.c40
-rw-r--r--arch/microblaze/mm/fault.c17
-rw-r--r--arch/microblaze/mm/init.c9
-rw-r--r--arch/microblaze/mm/pgtable.c2
-rw-r--r--arch/mips/Kconfig8
-rw-r--r--arch/mips/fw/arc/memory.c1
-rw-r--r--arch/mips/include/asm/cpu-features.h3
-rw-r--r--arch/mips/include/asm/fixmap.h3
-rw-r--r--arch/mips/include/asm/kvm_host.h52
-rw-r--r--arch/mips/include/asm/mach-generic/floppy.h1
-rw-r--r--arch/mips/include/asm/mach-jazz/floppy.h1
-rw-r--r--arch/mips/include/asm/mipsregs.h4
-rw-r--r--arch/mips/include/asm/pgtable-32.h22
-rw-r--r--arch/mips/include/asm/pgtable-64.h32
-rw-r--r--arch/mips/include/asm/pgtable.h2
-rw-r--r--arch/mips/include/uapi/asm/inst.h11
-rw-r--r--arch/mips/jazz/irq.c2
-rw-r--r--arch/mips/jazz/jazzdma.c1
-rw-r--r--arch/mips/jazz/setup.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c5
-rw-r--r--arch/mips/kernel/module.c1
-rw-r--r--arch/mips/kernel/process.c1
-rw-r--r--arch/mips/kernel/ptrace.c1
-rw-r--r--arch/mips/kernel/ptrace32.c1
-rw-r--r--arch/mips/kernel/smp-bmips.c1
-rw-r--r--arch/mips/kernel/sysrq.c2
-rw-r--r--arch/mips/kernel/traps.c40
-rw-r--r--arch/mips/kernel/vdso.c4
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/kvm/Makefile5
-rw-r--r--arch/mips/kvm/emulate.c503
-rw-r--r--arch/mips/kvm/entry.c19
-rw-r--r--arch/mips/kvm/interrupt.c93
-rw-r--r--arch/mips/kvm/interrupt.h14
-rw-r--r--arch/mips/kvm/loongson_ipi.c214
-rw-r--r--arch/mips/kvm/mips.c49
-rw-r--r--arch/mips/kvm/mmu.c20
-rw-r--r--arch/mips/kvm/tlb.c42
-rw-r--r--arch/mips/kvm/trap_emul.c5
-rw-r--r--arch/mips/kvm/vz.c237
-rw-r--r--arch/mips/lib/dump_tlb.c1
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c1
-rw-r--r--arch/mips/mm/c-octeon.c1
-rw-r--r--arch/mips/mm/c-r3k.c11
-rw-r--r--arch/mips/mm/c-r4k.c11
-rw-r--r--arch/mips/mm/c-tx39.c11
-rw-r--r--arch/mips/mm/fault.c12
-rw-r--r--arch/mips/mm/highmem.c2
-rw-r--r--arch/mips/mm/init.c1
-rw-r--r--arch/mips/mm/page.c1
-rw-r--r--arch/mips/mm/pgtable-32.c1
-rw-r--r--arch/mips/mm/pgtable-64.c1
-rw-r--r--arch/mips/mm/sc-ip22.c1
-rw-r--r--arch/mips/mm/sc-mips.c1
-rw-r--r--arch/mips/mm/sc-r5k.c1
-rw-r--r--arch/mips/mm/tlb-r3k.c1
-rw-r--r--arch/mips/mm/tlb-r4k.c1
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/ralink/Kconfig4
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c1
-rw-r--r--arch/mips/sgi-ip32/ip32-memory.c1
-rw-r--r--arch/nds32/include/asm/cacheflush.h4
-rw-r--r--arch/nds32/include/asm/highmem.h1
-rw-r--r--arch/nds32/include/asm/pgtable.h22
-rw-r--r--arch/nds32/kernel/head.S2
-rw-r--r--arch/nds32/kernel/module.c2
-rw-r--r--arch/nds32/kernel/traps.c15
-rw-r--r--arch/nds32/kernel/vdso.c6
-rw-r--r--arch/nds32/mm/cacheflush.c3
-rw-r--r--arch/nds32/mm/fault.c17
-rw-r--r--arch/nds32/mm/init.c13
-rw-r--r--arch/nds32/mm/proc.c7
-rw-r--r--arch/nios2/include/asm/pgtable.h24
-rw-r--r--arch/nios2/kernel/module.c1
-rw-r--r--arch/nios2/kernel/nios2_ksyms.c2
-rw-r--r--arch/nios2/kernel/signal.c1
-rw-r--r--arch/nios2/kernel/traps.c17
-rw-r--r--arch/nios2/mm/fault.c14
-rw-r--r--arch/nios2/mm/init.c5
-rw-r--r--arch/nios2/mm/pgtable.c1
-rw-r--r--arch/nios2/mm/tlb.c1
-rw-r--r--arch/openrisc/include/asm/cacheflush.h31
-rw-r--r--arch/openrisc/include/asm/io.h1
-rw-r--r--arch/openrisc/include/asm/pgtable.h33
-rw-r--r--arch/openrisc/include/asm/tlbflush.h1
-rw-r--r--arch/openrisc/kernel/asm-offsets.c1
-rw-r--r--arch/openrisc/kernel/entry.S6
-rw-r--r--arch/openrisc/kernel/head.S2
-rw-r--r--arch/openrisc/kernel/or32_ksyms.c2
-rw-r--r--arch/openrisc/kernel/process.c1
-rw-r--r--arch/openrisc/kernel/ptrace.c1
-rw-r--r--arch/openrisc/kernel/setup.c1
-rw-r--r--arch/openrisc/kernel/traps.c13
-rw-r--r--arch/openrisc/mm/fault.c12
-rw-r--r--arch/openrisc/mm/init.c1
-rw-r--r--arch/openrisc/mm/ioremap.c2
-rw-r--r--arch/openrisc/mm/tlb.c1
-rw-r--r--arch/parisc/Makefile2
-rw-r--r--arch/parisc/include/asm/io.h2
-rw-r--r--arch/parisc/include/asm/mmu_context.h1
-rw-r--r--arch/parisc/include/asm/pgtable.h33
-rw-r--r--arch/parisc/kernel/asm-offsets.c2
-rw-r--r--arch/parisc/kernel/entry.S2
-rw-r--r--arch/parisc/kernel/head.S2
-rw-r--r--arch/parisc/kernel/module.c1
-rw-r--r--arch/parisc/kernel/pacache.S2
-rw-r--r--arch/parisc/kernel/pci-dma.c2
-rw-r--r--arch/parisc/kernel/pdt.c2
-rw-r--r--arch/parisc/kernel/ptrace.c1
-rw-r--r--arch/parisc/kernel/smp.c1
-rw-r--r--arch/parisc/kernel/traps.c30
-rw-r--r--arch/parisc/lib/memcpy.c12
-rw-r--r--arch/parisc/mm/fault.c10
-rw-r--r--arch/parisc/mm/fixmap.c6
-rw-r--r--arch/parisc/mm/init.c1
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h20
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h43
-rw-r--r--arch/powerpc/include/asm/cacheflush.h42
-rw-r--r--arch/powerpc/include/asm/fixmap.h2
-rw-r--r--arch/powerpc/include/asm/io.h1
-rw-r--r--arch/powerpc/include/asm/kup.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h16
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h27
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h17
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-4k.h4
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h22
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/pgtable.h28
-rw-r--r--arch/powerpc/include/asm/pkeys.h2
-rw-r--r--arch/powerpc/include/asm/tlb.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/fpu.S1
-rw-r--r--arch/powerpc/kernel/head_32.S2
-rw-r--r--arch/powerpc/kernel/head_40x.S2
-rw-r--r--arch/powerpc/kernel/head_44x.S2
-rw-r--r--arch/powerpc/kernel/head_8xx.S2
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/io-workarounds.c2
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/mce_power.c2
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/process.c16
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/rtas_pci.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal_32.c1
-rw-r--r--arch/powerpc/kernel/signal_64.c1
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/stacktrace.c2
-rw-r--r--arch/powerpc/kernel/traps.c1
-rw-r--r--arch/powerpc/kernel/vdso.c7
-rw-r--r--arch/powerpc/kvm/book3s.c4
-rw-r--r--arch/powerpc/kvm/book3s.h2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c14
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c40
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c18
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c10
-rw-r--r--arch/powerpc/kvm/book3s_hv.c81
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c17
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xive.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c32
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c72
-rw-r--r--arch/powerpc/kvm/book3s_pr.c30
-rw-r--r--arch/powerpc/kvm/booke.c36
-rw-r--r--arch/powerpc/kvm/booke.h8
-rw-r--r--arch/powerpc/kvm/booke_emulate.c2
-rw-r--r--arch/powerpc/kvm/e500_emulate.c15
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c4
-rw-r--r--arch/powerpc/kvm/emulate.c10
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c32
-rw-r--r--arch/powerpc/kvm/fpu.S2
-rw-r--r--arch/powerpc/kvm/powerpc.c72
-rw-r--r--arch/powerpc/kvm/trace_hv.h6
-rw-r--r--arch/powerpc/lib/code-patching.c1
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S2
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c2
-rw-r--r--arch/powerpc/mm/book3s32/tlb.c6
-rw-r--r--arch/powerpc/mm/book3s64/hash_hugetlbpage.c1
-rw-r--r--arch/powerpc/mm/book3s64/hash_native.c2
-rw-r--r--arch/powerpc/mm/book3s64/hash_pgtable.c5
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c2
-rw-r--r--arch/powerpc/mm/book3s64/iommu_api.c4
-rw-r--r--arch/powerpc/mm/book3s64/radix_hugetlbpage.c1
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c1
-rw-r--r--arch/powerpc/mm/book3s64/slb.c2
-rw-r--r--arch/powerpc/mm/book3s64/subpage_prot.c16
-rw-r--r--arch/powerpc/mm/copro_fault.c4
-rw-r--r--arch/powerpc/mm/fault.c23
-rw-r--r--arch/powerpc/mm/hugetlbpage.c1
-rw-r--r--arch/powerpc/mm/init-common.c2
-rw-r--r--arch/powerpc/mm/init_32.c1
-rw-r--r--arch/powerpc/mm/init_64.c1
-rw-r--r--arch/powerpc/mm/kasan/8xx.c4
-rw-r--r--arch/powerpc/mm/kasan/book3s_32.c2
-rw-r--r--arch/powerpc/mm/kasan/kasan_init_32.c8
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/powerpc/mm/nohash/40x.c5
-rw-r--r--arch/powerpc/mm/nohash/8xx.c2
-rw-r--r--arch/powerpc/mm/nohash/fsl_booke.c1
-rw-r--r--arch/powerpc/mm/nohash/tlb_low_64e.S2
-rw-r--r--arch/powerpc/mm/pgtable.c4
-rw-r--r--arch/powerpc/mm/pgtable_32.c5
-rw-r--r--arch/powerpc/mm/pgtable_64.c1
-rw-r--r--arch/powerpc/mm/ptdump/8xx.c2
-rw-r--r--arch/powerpc/mm/ptdump/bats.c2
-rw-r--r--arch/powerpc/mm/ptdump/book3s64.c2
-rw-r--r--arch/powerpc/mm/ptdump/hashpagetable.c1
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c1
-rw-r--r--arch/powerpc/mm/ptdump/shared.c2
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c6
-rw-r--r--arch/powerpc/perf/callchain.c1
-rw-r--r--arch/powerpc/perf/callchain_32.c1
-rw-r--r--arch/powerpc/perf/callchain_64.c5
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c2
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c2
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c2
-rw-r--r--arch/powerpc/platforms/85xx/smp.c2
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_smp.c2
-rw-r--r--arch/powerpc/platforms/8xx/cpm1.c1
-rw-r--r--arch/powerpc/platforms/8xx/micropatch.c1
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c2
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c2
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c2
-rw-r--r--arch/powerpc/platforms/cell/setup.c1
-rw-r--r--arch/powerpc/platforms/cell/smp.c2
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c10
-rw-r--r--arch/powerpc/platforms/chrp/pci.c2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c1
-rw-r--r--arch/powerpc/platforms/chrp/smp.c2
-rw-r--r--arch/powerpc/platforms/maple/setup.c1
-rw-r--r--arch/powerpc/platforms/maple/time.c1
-rw-r--r--arch/powerpc/platforms/powermac/setup.c1
-rw-r--r--arch/powerpc/platforms/powermac/smp.c2
-rw-r--r--arch/powerpc/platforms/powermac/time.c1
-rw-r--r--arch/powerpc/platforms/powernv/vas-fault.c4
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/setup.c1
-rw-r--r--arch/powerpc/platforms/pseries/smp.c2
-rw-r--r--arch/powerpc/sysdev/cpm2.c1
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_sram.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/xmon/xmon.c3
-rw-r--r--arch/riscv/Kconfig74
-rw-r--r--arch/riscv/include/asm/cacheflush.h65
-rw-r--r--arch/riscv/include/asm/clocksource.h7
-rw-r--r--arch/riscv/include/asm/fixmap.h2
-rw-r--r--arch/riscv/include/asm/io.h2
-rw-r--r--arch/riscv/include/asm/irq.h5
-rw-r--r--arch/riscv/include/asm/kasan.h2
-rw-r--r--arch/riscv/include/asm/pgtable-64.h7
-rw-r--r--arch/riscv/include/asm/pgtable.h22
-rw-r--r--arch/riscv/include/asm/processor.h13
-rw-r--r--arch/riscv/include/asm/smp.h3
-rw-r--r--arch/riscv/include/asm/vdso.h2
-rw-r--r--arch/riscv/include/asm/vdso/clocksource.h8
-rw-r--r--arch/riscv/include/asm/vdso/gettimeofday.h79
-rw-r--r--arch/riscv/include/asm/vdso/processor.h19
-rw-r--r--arch/riscv/include/asm/vdso/vsyscall.h27
-rw-r--r--arch/riscv/kernel/cpu.c16
-rw-r--r--arch/riscv/kernel/entry.S4
-rw-r--r--arch/riscv/kernel/irq.c33
-rw-r--r--arch/riscv/kernel/module.c2
-rw-r--r--arch/riscv/kernel/patch.c1
-rw-r--r--arch/riscv/kernel/setup.c1
-rw-r--r--arch/riscv/kernel/smp.c11
-rw-r--r--arch/riscv/kernel/soc.c2
-rw-r--r--arch/riscv/kernel/stacktrace.c9
-rw-r--r--arch/riscv/kernel/time.c9
-rw-r--r--arch/riscv/kernel/traps.c2
-rw-r--r--arch/riscv/kernel/vdso.c28
-rw-r--r--arch/riscv/kernel/vdso/Makefile12
-rw-r--r--arch/riscv/kernel/vdso/clock_getres.S18
-rw-r--r--arch/riscv/kernel/vdso/clock_gettime.S18
-rw-r--r--arch/riscv/kernel/vdso/gettimeofday.S18
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S2
-rw-r--r--arch/riscv/kernel/vdso/vgettimeofday.c25
-rw-r--r--arch/riscv/mm/cacheflush.c1
-rw-r--r--arch/riscv/mm/fault.c14
-rw-r--r--arch/riscv/mm/init.c42
-rw-r--r--arch/riscv/mm/kasan_init.c2
-rw-r--r--arch/riscv/mm/pageattr.c6
-rw-r--r--arch/riscv/mm/ptdump.c2
-rw-r--r--arch/s390/appldata/appldata_mem.c4
-rw-r--r--arch/s390/appldata/appldata_net_sum.c4
-rw-r--r--arch/s390/appldata/appldata_os.c4
-rw-r--r--arch/s390/boot/ipl_parm.c2
-rw-r--r--arch/s390/boot/kaslr.c2
-rw-r--r--arch/s390/include/asm/ccwdev.h5
-rw-r--r--arch/s390/include/asm/chsc.h62
-rw-r--r--arch/s390/include/asm/hugetlb.h2
-rw-r--r--arch/s390/include/asm/io.h2
-rw-r--r--arch/s390/include/asm/ipl.h11
-rw-r--r--arch/s390/include/asm/kasan.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/include/asm/nmi.h2
-rw-r--r--arch/s390/include/asm/pci.h42
-rw-r--r--arch/s390/include/asm/pci_clp.h13
-rw-r--r--arch/s390/include/asm/pgtable.h15
-rw-r--r--arch/s390/include/asm/processor.h20
-rw-r--r--arch/s390/include/asm/qdio.h33
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/tlbflush.h1
-rw-r--r--arch/s390/include/uapi/asm/ipl.h25
-rw-r--r--arch/s390/kernel/Makefile5
-rw-r--r--arch/s390/kernel/asm-offsets.c2
-rw-r--r--arch/s390/kernel/dumpstack.c13
-rw-r--r--arch/s390/kernel/entry.S464
-rw-r--r--arch/s390/kernel/ftrace.c16
-rw-r--r--arch/s390/kernel/idle.c14
-rw-r--r--arch/s390/kernel/ipl.c209
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/nmi.c23
-rw-r--r--arch/s390/kernel/ptrace.c3
-rw-r--r--arch/s390/kernel/setup.c16
-rw-r--r--arch/s390/kernel/smp.c8
-rw-r--r--arch/s390/kernel/uv.c4
-rw-r--r--arch/s390/kernel/vdso.c5
-rw-r--r--arch/s390/kvm/gaccess.c6
-rw-r--r--arch/s390/kvm/interrupt.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c37
-rw-r--r--arch/s390/kvm/priv.c36
-rw-r--r--arch/s390/kvm/vsie.c3
-rw-r--r--arch/s390/lib/delay.c4
-rw-r--r--arch/s390/mm/dump_pagetables.c1
-rw-r--r--arch/s390/mm/extmem.c2
-rw-r--r--arch/s390/mm/fault.c17
-rw-r--r--arch/s390/mm/gmap.c78
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/mm/kasan_init.c2
-rw-r--r--arch/s390/mm/pageattr.c13
-rw-r--r--arch/s390/mm/pgalloc.c2
-rw-r--r--arch/s390/mm/pgtable.c1
-rw-r--r--arch/s390/mm/vmem.c1
-rw-r--r--arch/s390/pci/Makefile3
-rw-r--r--arch/s390/pci/pci.c225
-rw-r--r--arch/s390/pci/pci_bus.c328
-rw-r--r--arch/s390/pci/pci_bus.h31
-rw-r--r--arch/s390/pci/pci_clp.c8
-rw-r--r--arch/s390/pci/pci_event.c39
-rw-r--r--arch/s390/pci/pci_mmio.c4
-rw-r--r--arch/s390/pci/pci_sysfs.c4
-rw-r--r--arch/sh/Kconfig62
-rw-r--r--arch/sh/Kconfig.cpu9
-rw-r--r--arch/sh/Kconfig.debug13
-rw-r--r--arch/sh/Makefile29
-rw-r--r--arch/sh/boards/Kconfig5
-rw-r--r--arch/sh/boot/compressed/Makefile12
-rw-r--r--arch/sh/boot/compressed/misc.c8
-rw-r--r--arch/sh/boot/compressed/vmlinux.scr2
-rw-r--r--arch/sh/configs/apsh4ad0a_defconfig3
-rw-r--r--arch/sh/configs/kfr2r09_defconfig2
-rw-r--r--arch/sh/configs/magicpanelr2_defconfig2
-rw-r--r--arch/sh/configs/polaris_defconfig1
-rw-r--r--arch/sh/configs/r7780mp_defconfig2
-rw-r--r--arch/sh/configs/r7785rp_defconfig2
-rw-r--r--arch/sh/configs/rsk7201_defconfig2
-rw-r--r--arch/sh/configs/rsk7203_defconfig2
-rw-r--r--arch/sh/configs/rsk7264_defconfig2
-rw-r--r--arch/sh/configs/rsk7269_defconfig2
-rw-r--r--arch/sh/configs/sdk7786_defconfig3
-rw-r--r--arch/sh/configs/se7206_defconfig2
-rw-r--r--arch/sh/configs/se7343_defconfig1
-rw-r--r--arch/sh/configs/se7619_defconfig2
-rw-r--r--arch/sh/configs/se7705_defconfig2
-rw-r--r--arch/sh/configs/se7712_defconfig2
-rw-r--r--arch/sh/configs/se7721_defconfig2
-rw-r--r--arch/sh/configs/se7722_defconfig2
-rw-r--r--arch/sh/configs/se7780_defconfig1
-rw-r--r--arch/sh/configs/sh7710voipgw_defconfig1
-rw-r--r--arch/sh/configs/sh7757lcr_defconfig2
-rw-r--r--arch/sh/configs/shmin_defconfig2
-rw-r--r--arch/sh/configs/ul2_defconfig2
-rw-r--r--arch/sh/drivers/pci/Makefile1
-rw-r--r--arch/sh/drivers/pci/ops-sh5.c65
-rw-r--r--arch/sh/drivers/pci/pci-sh5.c217
-rw-r--r--arch/sh/drivers/pci/pci-sh5.h108
-rw-r--r--arch/sh/include/asm/barrier.h4
-rw-r--r--arch/sh/include/asm/bitops.h26
-rw-r--r--arch/sh/include/asm/bl_bit.h11
-rw-r--r--arch/sh/include/asm/bl_bit_64.h37
-rw-r--r--arch/sh/include/asm/bugs.h4
-rw-r--r--arch/sh/include/asm/cache_insns.h12
-rw-r--r--arch/sh/include/asm/cache_insns_64.h20
-rw-r--r--arch/sh/include/asm/cacheflush.h1
-rw-r--r--arch/sh/include/asm/checksum.h6
-rw-r--r--arch/sh/include/asm/elf.h23
-rw-r--r--arch/sh/include/asm/extable.h4
-rw-r--r--arch/sh/include/asm/fixmap.h4
-rw-r--r--arch/sh/include/asm/io.h8
-rw-r--r--arch/sh/include/asm/io_noioport.h34
-rw-r--r--arch/sh/include/asm/irq.h3
-rw-r--r--arch/sh/include/asm/kdebug.h6
-rw-r--r--arch/sh/include/asm/mmu_context.h12
-rw-r--r--arch/sh/include/asm/mmu_context_64.h75
-rw-r--r--arch/sh/include/asm/page.h21
-rw-r--r--arch/sh/include/asm/pgtable-3level.h7
-rw-r--r--arch/sh/include/asm/pgtable.h19
-rw-r--r--arch/sh/include/asm/pgtable_32.h25
-rw-r--r--arch/sh/include/asm/pgtable_64.h306
-rw-r--r--arch/sh/include/asm/posix_types.h6
-rw-r--r--arch/sh/include/asm/processor.h14
-rw-r--r--arch/sh/include/asm/processor_32.h2
-rw-r--r--arch/sh/include/asm/processor_64.h212
-rw-r--r--arch/sh/include/asm/ptrace_64.h14
-rw-r--r--arch/sh/include/asm/string.h6
-rw-r--r--arch/sh/include/asm/string_64.h21
-rw-r--r--arch/sh/include/asm/switch_to.h11
-rw-r--r--arch/sh/include/asm/switch_to_64.h32
-rw-r--r--arch/sh/include/asm/syscall.h6
-rw-r--r--arch/sh/include/asm/syscall_64.h75
-rw-r--r--arch/sh/include/asm/syscalls.h9
-rw-r--r--arch/sh/include/asm/syscalls_64.h18
-rw-r--r--arch/sh/include/asm/thread_info.h4
-rw-r--r--arch/sh/include/asm/tlb.h6
-rw-r--r--arch/sh/include/asm/tlb_64.h68
-rw-r--r--arch/sh/include/asm/traps.h4
-rw-r--r--arch/sh/include/asm/traps_64.h35
-rw-r--r--arch/sh/include/asm/types.h5
-rw-r--r--arch/sh/include/asm/uaccess.h4
-rw-r--r--arch/sh/include/asm/uaccess_64.h85
-rw-r--r--arch/sh/include/asm/unistd.h6
-rw-r--r--arch/sh/include/asm/user.h7
-rw-r--r--arch/sh/include/asm/vermagic.h4
-rw-r--r--arch/sh/include/asm/vmlinux.lds.h8
-rw-r--r--arch/sh/include/cpu-sh5/cpu/addrspace.h12
-rw-r--r--arch/sh/include/cpu-sh5/cpu/cache.h94
-rw-r--r--arch/sh/include/cpu-sh5/cpu/irq.h113
-rw-r--r--arch/sh/include/cpu-sh5/cpu/mmu_context.h22
-rw-r--r--arch/sh/include/cpu-sh5/cpu/registers.h103
-rw-r--r--arch/sh/include/cpu-sh5/cpu/rtc.h9
-rw-r--r--arch/sh/include/uapi/asm/posix_types.h8
-rw-r--r--arch/sh/include/uapi/asm/posix_types_64.h29
-rw-r--r--arch/sh/include/uapi/asm/ptrace.h5
-rw-r--r--arch/sh/include/uapi/asm/ptrace_64.h15
-rw-r--r--arch/sh/include/uapi/asm/sigcontext.h13
-rw-r--r--arch/sh/include/uapi/asm/stat.h61
-rw-r--r--arch/sh/include/uapi/asm/swab.h10
-rw-r--r--arch/sh/include/uapi/asm/unistd.h8
-rw-r--r--arch/sh/include/uapi/asm/unistd_64.h423
-rw-r--r--arch/sh/kernel/Makefile16
-rw-r--r--arch/sh/kernel/cpu/Makefile1
-rw-r--r--arch/sh/kernel/cpu/init.c2
-rw-r--r--arch/sh/kernel/cpu/irq/Makefile3
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c194
-rw-r--r--arch/sh/kernel/cpu/proc.c1
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c1
-rw-r--r--arch/sh/kernel/cpu/sh5/Makefile16
-rw-r--r--arch/sh/kernel/cpu/sh5/clock-sh5.c76
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2000
-rw-r--r--arch/sh/kernel/cpu/sh5/fpu.c106
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c72
-rw-r--r--arch/sh/kernel/cpu/sh5/setup-sh5.c121
-rw-r--r--arch/sh/kernel/cpu/sh5/switchto.S195
-rw-r--r--arch/sh/kernel/cpu/sh5/unwind.c342
-rw-r--r--arch/sh/kernel/dumpstack.c36
-rw-r--r--arch/sh/kernel/head_64.S346
-rw-r--r--arch/sh/kernel/irq_64.c48
-rw-r--r--arch/sh/kernel/machine_kexec.c1
-rw-r--r--arch/sh/kernel/module.c9
-rw-r--r--arch/sh/kernel/process.c2
-rw-r--r--arch/sh/kernel/process_32.c2
-rw-r--r--arch/sh/kernel/process_64.c461
-rw-r--r--arch/sh/kernel/ptrace_32.c1
-rw-r--r--arch/sh/kernel/ptrace_64.c576
-rw-r--r--arch/sh/kernel/reboot.c6
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c17
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c51
-rw-r--r--arch/sh/kernel/signal_32.c1
-rw-r--r--arch/sh/kernel/signal_64.c567
-rw-r--r--arch/sh/kernel/sys_sh.c6
-rw-r--r--arch/sh/kernel/syscalls_64.S419
-rw-r--r--arch/sh/kernel/traps.c4
-rw-r--r--arch/sh/kernel/traps_64.c814
-rw-r--r--arch/sh/kernel/vmlinux.lds.S18
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c4
-rw-r--r--arch/sh/lib/delay.c1
-rw-r--r--arch/sh/lib64/Makefile17
-rw-r--r--arch/sh/lib64/copy_page.S89
-rw-r--r--arch/sh/lib64/copy_user_memcpy.S218
-rw-r--r--arch/sh/lib64/memcpy.S202
-rw-r--r--arch/sh/lib64/memset.S92
-rw-r--r--arch/sh/lib64/panic.c15
-rw-r--r--arch/sh/lib64/sdivsi3.S136
-rw-r--r--arch/sh/lib64/strcpy.S98
-rw-r--r--arch/sh/lib64/strlen.S34
-rw-r--r--arch/sh/lib64/udelay.c49
-rw-r--r--arch/sh/lib64/udivdi3.S121
-rw-r--r--arch/sh/lib64/udivsi3.S60
-rw-r--r--arch/sh/mm/Kconfig16
-rw-r--r--arch/sh/mm/Makefile31
-rw-r--r--arch/sh/mm/cache-sh3.c1
-rw-r--r--arch/sh/mm/cache-sh4.c11
-rw-r--r--arch/sh/mm/cache-sh5.c626
-rw-r--r--arch/sh/mm/cache-sh7705.c1
-rw-r--r--arch/sh/mm/cache.c6
-rw-r--r--arch/sh/mm/extable_64.c84
-rw-r--r--arch/sh/mm/fault.c16
-rw-r--r--arch/sh/mm/kmap.c5
-rw-r--r--arch/sh/mm/nommu.c1
-rw-r--r--arch/sh/mm/pmb.c2
-rw-r--r--arch/sh/mm/tlb-sh5.c224
-rw-r--r--arch/sh/mm/tlbex_64.c171
-rw-r--r--arch/sh/mm/tlbflush_64.c172
-rw-r--r--arch/sparc/include/asm/cacheflush_32.h2
-rw-r--r--arch/sparc/include/asm/cacheflush_64.h1
-rw-r--r--arch/sparc/include/asm/floppy_32.h2
-rw-r--r--arch/sparc/include/asm/highmem.h2
-rw-r--r--arch/sparc/include/asm/ide.h2
-rw-r--r--arch/sparc/include/asm/io-unit.h2
-rw-r--r--arch/sparc/include/asm/page_32.h12
-rw-r--r--arch/sparc/include/asm/pgalloc_32.h13
-rw-r--r--arch/sparc/include/asm/pgalloc_64.h2
-rw-r--r--arch/sparc/include/asm/pgtable_32.h74
-rw-r--r--arch/sparc/include/asm/pgtable_64.h32
-rw-r--r--arch/sparc/include/asm/pgtsrmmu.h36
-rw-r--r--arch/sparc/include/asm/viking.h5
-rw-r--r--arch/sparc/kernel/cpu.c2
-rw-r--r--arch/sparc/kernel/cpumap.c2
-rw-r--r--arch/sparc/kernel/ds.c8
-rw-r--r--arch/sparc/kernel/entry.S2
-rw-r--r--arch/sparc/kernel/head_32.S8
-rw-r--r--arch/sparc/kernel/head_64.S2
-rw-r--r--arch/sparc/kernel/ktlb.S2
-rw-r--r--arch/sparc/kernel/leon_smp.c1
-rw-r--r--arch/sparc/kernel/pci.c4
-rw-r--r--arch/sparc/kernel/process_32.c11
-rw-r--r--arch/sparc/kernel/process_64.c7
-rw-r--r--arch/sparc/kernel/ptrace_32.c234
-rw-r--r--arch/sparc/kernel/ptrace_64.c18
-rw-r--r--arch/sparc/kernel/setup_32.c1
-rw-r--r--arch/sparc/kernel/setup_64.c1
-rw-r--r--arch/sparc/kernel/signal32.c1
-rw-r--r--arch/sparc/kernel/signal_32.c1
-rw-r--r--arch/sparc/kernel/signal_64.c1
-rw-r--r--arch/sparc/kernel/smp_32.c1
-rw-r--r--arch/sparc/kernel/smp_64.c1
-rw-r--r--arch/sparc/kernel/sun4m_irq.c2
-rw-r--r--arch/sparc/kernel/sys_sparc32.c1
-rw-r--r--arch/sparc/kernel/trampoline_64.S2
-rw-r--r--arch/sparc/kernel/traps_32.c2
-rw-r--r--arch/sparc/kernel/traps_64.c10
-rw-r--r--arch/sparc/kernel/vio.c2
-rw-r--r--arch/sparc/lib/clear_page.S2
-rw-r--r--arch/sparc/lib/copy_page.S2
-rw-r--r--arch/sparc/mm/fault_32.c21
-rw-r--r--arch/sparc/mm/fault_64.c17
-rw-r--r--arch/sparc/mm/highmem.c12
-rw-r--r--arch/sparc/mm/hugetlbpage.c1
-rw-r--r--arch/sparc/mm/hypersparc.S3
-rw-r--r--arch/sparc/mm/init_32.c1
-rw-r--r--arch/sparc/mm/init_64.c17
-rw-r--r--arch/sparc/mm/io-unit.c11
-rw-r--r--arch/sparc/mm/iommu.c9
-rw-r--r--arch/sparc/mm/srmmu.c114
-rw-r--r--arch/sparc/mm/tlb.c1
-rw-r--r--arch/sparc/mm/tsb.c2
-rw-r--r--arch/sparc/mm/ultra.S2
-rw-r--r--arch/sparc/mm/viking.S5
-rw-r--r--arch/sparc/vdso/vma.c4
-rw-r--r--arch/um/Makefile2
-rw-r--r--arch/um/drivers/Makefile4
-rw-r--r--arch/um/drivers/mconsole_kern.c2
-rw-r--r--arch/um/drivers/vector_kern.h2
-rw-r--r--arch/um/drivers/vector_user.c59
-rw-r--r--arch/um/drivers/vhost_user.h2
-rw-r--r--arch/um/drivers/virtio_uml.c2
-rw-r--r--arch/um/include/asm/mmu_context.h5
-rw-r--r--arch/um/include/asm/pgtable-3level.h4
-rw-r--r--arch/um/include/asm/pgtable.h69
-rw-r--r--arch/um/include/asm/tlb.h2
-rw-r--r--arch/um/kernel/maccess.c10
-rw-r--r--arch/um/kernel/mem.c10
-rw-r--r--arch/um/kernel/process.c1
-rw-r--r--arch/um/kernel/skas/mmu.c3
-rw-r--r--arch/um/kernel/skas/uaccess.c1
-rw-r--r--arch/um/kernel/sysrq.c23
-rw-r--r--arch/um/kernel/tlb.c5
-rw-r--r--arch/um/kernel/trap.c15
-rw-r--r--arch/um/kernel/um_arch.c1
-rw-r--r--arch/um/os-Linux/file.c3
-rw-r--r--arch/unicore32/Kconfig2
-rw-r--r--arch/unicore32/include/asm/cacheflush.h11
-rw-r--r--arch/unicore32/include/asm/pgtable.h19
-rw-r--r--arch/unicore32/kernel/hibernate.c2
-rw-r--r--arch/unicore32/kernel/hibernate_asm.S2
-rw-r--r--arch/unicore32/kernel/module.c1
-rw-r--r--arch/unicore32/kernel/setup.h2
-rw-r--r--arch/unicore32/kernel/traps.c34
-rw-r--r--arch/unicore32/lib/Makefile4
-rw-r--r--arch/unicore32/lib/backtrace.S24
-rw-r--r--arch/unicore32/mm/alignment.c2
-rw-r--r--arch/unicore32/mm/fault.c9
-rw-r--r--arch/unicore32/mm/mm.h10
-rw-r--r--arch/unicore32/mm/proc-ucv2.S2
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/boot/compressed/kaslr_64.c2
-rw-r--r--arch/x86/entry/Makefile8
-rw-r--r--arch/x86/entry/calling.h25
-rw-r--r--arch/x86/entry/common.c440
-rw-r--r--arch/x86/entry/entry_32.S485
-rw-r--r--arch/x86/entry/entry_64.S840
-rw-r--r--arch/x86/entry/entry_64_compat.S55
-rw-r--r--arch/x86/entry/thunk_64.S14
-rw-r--r--arch/x86/entry/vdso/Makefile6
-rw-r--r--arch/x86/entry/vdso/vma.c14
-rw-r--r--arch/x86/events/core.c4
-rw-r--r--arch/x86/hyperv/hv_init.c9
-rw-r--r--arch/x86/include/asm/acrn.h11
-rw-r--r--arch/x86/include/asm/agp.h2
-rw-r--r--arch/x86/include/asm/amd_nb.h1
-rw-r--r--arch/x86/include/asm/apic.h33
-rw-r--r--arch/x86/include/asm/asm-prototypes.h2
-rw-r--r--arch/x86/include/asm/atomic.h31
-rw-r--r--arch/x86/include/asm/atomic64_32.h9
-rw-r--r--arch/x86/include/asm/atomic64_64.h15
-rw-r--r--arch/x86/include/asm/bitops.h6
-rw-r--r--arch/x86/include/asm/bug.h3
-rw-r--r--arch/x86/include/asm/cacheflush.h2
-rw-r--r--arch/x86/include/asm/cpu_entry_area.h12
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/debugreg.h48
-rw-r--r--arch/x86/include/asm/desc.h52
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/include/asm/elf.h22
-rw-r--r--arch/x86/include/asm/entry_arch.h56
-rw-r--r--arch/x86/include/asm/hw_irq.h22
-rw-r--r--arch/x86/include/asm/idtentry.h652
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/iomap.h1
-rw-r--r--arch/x86/include/asm/irq.h15
-rw-r--r--arch/x86/include/asm/irq_regs.h32
-rw-r--r--arch/x86/include/asm/irq_stack.h53
-rw-r--r--arch/x86/include/asm/irq_work.h1
-rw-r--r--arch/x86/include/asm/irqflags.h54
-rw-r--r--arch/x86/include/asm/kaslr.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/kvm_para.h2
-rw-r--r--arch/x86/include/asm/mce.h30
-rw-r--r--arch/x86/include/asm/mmu.h2
-rw-r--r--arch/x86/include/asm/mshyperv.h13
-rw-r--r--arch/x86/include/asm/msr-index.h4
-rw-r--r--arch/x86/include/asm/nospec-branch.h4
-rw-r--r--arch/x86/include/asm/pgtable-3level.h8
-rw-r--r--arch/x86/include/asm/pgtable.h89
-rw-r--r--arch/x86/include/asm/pgtable_32.h18
-rw-r--r--arch/x86/include/asm/pgtable_64.h4
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/ptrace.h2
-rw-r--r--arch/x86/include/asm/set_memory.h19
-rw-r--r--arch/x86/include/asm/setup.h12
-rw-r--r--arch/x86/include/asm/special_insns.h22
-rw-r--r--arch/x86/include/asm/stacktrace.h2
-rw-r--r--arch/x86/include/asm/text-patching.h11
-rw-r--r--arch/x86/include/asm/trace/common.h4
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h17
-rw-r--r--arch/x86/include/asm/trapnr.h31
-rw-r--r--arch/x86/include/asm/traps.h118
-rw-r--r--arch/x86/include/asm/uaccess.h28
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h8
-rw-r--r--arch/x86/include/asm/vdso/gettimeofday.h18
-rw-r--r--arch/x86/include/asm/xen/hypercall.h2
-rw-r--r--arch/x86/include/asm/xen/page.h1
-rw-r--r--arch/x86/include/uapi/asm/mce.h1
-rw-r--r--arch/x86/kernel/Makefile4
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/alternative.c26
-rw-r--r--arch/x86/kernel/amd_gart_64.c3
-rw-r--r--arch/x86/kernel/amd_nb.c5
-rw-r--r--arch/x86/kernel/apic/apic.c41
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c2
-rw-r--r--arch/x86/kernel/apic/msi.c3
-rw-r--r--arch/x86/kernel/apic/vector.c5
-rw-r--r--arch/x86/kernel/asm-offsets_64.c3
-rw-r--r--arch/x86/kernel/cpu/Makefile3
-rw-r--r--arch/x86/kernel/cpu/acrn.c9
-rw-r--r--arch/x86/kernel/cpu/bugs.c200
-rw-r--r--arch/x86/kernel/cpu/common.c77
-rw-r--r--arch/x86/kernel/cpu/cpu.h1
-rw-r--r--arch/x86/kernel/cpu/intel.c5
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c238
-rw-r--r--arch/x86/kernel/cpu/mce/core.c206
-rw-r--r--arch/x86/kernel/cpu/mce/dev-mcelog.c8
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c4
-rw-r--r--arch/x86/kernel/cpu/mce/internal.h12
-rw-r--r--arch/x86/kernel/cpu/mce/p5.c8
-rw-r--r--arch/x86/kernel/cpu/mce/severity.c6
-rw-r--r--arch/x86/kernel/cpu/mce/therm_throt.c5
-rw-r--r--arch/x86/kernel/cpu/mce/threshold.c5
-rw-r--r--arch/x86/kernel/cpu/mce/winchip.c8
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c22
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c6
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c6
-rw-r--r--arch/x86/kernel/crash_core_32.c2
-rw-r--r--arch/x86/kernel/crash_core_64.c2
-rw-r--r--arch/x86/kernel/doublefault_32.c11
-rw-r--r--arch/x86/kernel/dumpstack.c9
-rw-r--r--arch/x86/kernel/dumpstack_64.c7
-rw-r--r--arch/x86/kernel/e820.c10
-rw-r--r--arch/x86/kernel/early_printk.c2
-rw-r--r--arch/x86/kernel/espfix_64.c2
-rw-r--r--arch/x86/kernel/ftrace_64.S2
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/head_64.S9
-rw-r--r--arch/x86/kernel/hw_breakpoint.c100
-rw-r--r--arch/x86/kernel/i8259.c2
-rw-r--r--arch/x86/kernel/idt.c226
-rw-r--r--arch/x86/kernel/irq.c66
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c6
-rw-r--r--arch/x86/kernel/irq_work.c6
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c9
-rw-r--r--arch/x86/kernel/kprobes/opt.c6
-rw-r--r--arch/x86/kernel/kvm.c16
-rw-r--r--arch/x86/kernel/ldt.c2
-rw-r--r--arch/x86/kernel/machine_kexec_32.c1
-rw-r--r--arch/x86/kernel/machine_kexec_64.c1
-rw-r--r--arch/x86/kernel/module.c1
-rw-r--r--arch/x86/kernel/nmi.c75
-rw-r--r--arch/x86/kernel/paravirt.c2
-rw-r--r--arch/x86/kernel/process.c28
-rw-r--r--arch/x86/kernel/process_32.c1
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/ptrace.c1
-rw-r--r--arch/x86/kernel/reboot.c10
-rw-r--r--arch/x86/kernel/smp.c37
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/sys_ia32.c40
-rw-r--r--arch/x86/kernel/tboot.c3
-rw-r--r--arch/x86/kernel/time.c4
-rw-r--r--arch/x86/kernel/tracepoint.c17
-rw-r--r--arch/x86/kernel/traps.c548
-rw-r--r--arch/x86/kernel/unwind_frame.c8
-rw-r--r--arch/x86/kernel/vm86_32.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S5
-rw-r--r--arch/x86/kvm/cpuid.c31
-rw-r--r--arch/x86/kvm/debugfs.c10
-rw-r--r--arch/x86/kvm/emulate.c8
-rw-r--r--arch/x86/kvm/hyperv.c2
-rw-r--r--arch/x86/kvm/i8254.c1
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h8
-rw-r--r--arch/x86/kvm/svm/nested.c2
-rw-r--r--arch/x86/kvm/svm/svm.c6
-rw-r--r--arch/x86/kvm/vmx/nested.c84
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c2
-rw-r--r--arch/x86/kvm/vmx/vmx.c40
-rw-r--r--arch/x86/kvm/vmx/vmx.h2
-rw-r--r--arch/x86/kvm/x86.c139
-rw-r--r--arch/x86/lib/Makefile9
-rw-r--r--arch/x86/mm/Makefile4
-rw-r--r--arch/x86/mm/cpu_entry_area.c3
-rw-r--r--arch/x86/mm/debug_pagetables.c2
-rw-r--r--arch/x86/mm/dump_pagetables.c1
-rw-r--r--arch/x86/mm/extable.c15
-rw-r--r--arch/x86/mm/fault.c100
-rw-r--r--arch/x86/mm/init.c22
-rw-r--r--arch/x86/mm/init_32.c27
-rw-r--r--arch/x86/mm/init_64.c1
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/kasan_init_64.c1
-rw-r--r--arch/x86/mm/kaslr.c35
-rw-r--r--arch/x86/mm/maccess.c28
-rw-r--r--arch/x86/mm/mem_encrypt_boot.S2
-rw-r--r--arch/x86/mm/mmio-mod.c2
-rw-r--r--arch/x86/mm/pat/cpa-test.c1
-rw-r--r--arch/x86/mm/pat/memtype.c1
-rw-r--r--arch/x86/mm/pat/memtype_interval.c2
-rw-r--r--arch/x86/mm/pgtable.c1
-rw-r--r--arch/x86/mm/pgtable_32.c1
-rw-r--r--arch/x86/mm/pti.c5
-rw-r--r--arch/x86/mm/setup_nx.c2
-rw-r--r--arch/x86/pci/fixup.c4
-rw-r--r--arch/x86/pci/xen.c16
-rw-r--r--arch/x86/platform/efi/efi_32.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c1
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c4
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c4
-rw-r--r--arch/x86/platform/olpc/olpc_ofw.c2
-rw-r--r--arch/x86/platform/uv/tlb_uv.c2
-rw-r--r--arch/x86/power/cpu.c2
-rw-r--r--arch/x86/power/hibernate.c2
-rw-r--r--arch/x86/power/hibernate_32.c2
-rw-r--r--arch/x86/power/hibernate_64.c2
-rw-r--r--arch/x86/purgatory/.gitignore1
-rw-r--r--arch/x86/purgatory/Makefile21
-rw-r--r--arch/x86/realmode/Makefile3
-rw-r--r--arch/x86/realmode/init.c2
-rw-r--r--arch/x86/realmode/rm/Makefile3
-rw-r--r--arch/x86/um/vdso/vma.c4
-rw-r--r--arch/x86/xen/enlighten_hvm.c12
-rw-r--r--arch/x86/xen/enlighten_pv.c53
-rw-r--r--arch/x86/xen/grant-table.c1
-rw-r--r--arch/x86/xen/mmu_pv.c2
-rw-r--r--arch/x86/xen/setup.c4
-rw-r--r--arch/x86/xen/smp_pv.c5
-rw-r--r--arch/x86/xen/suspend_hvm.c3
-rw-r--r--arch/x86/xen/xen-asm_32.S14
-rw-r--r--arch/x86/xen/xen-asm_64.S44
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--arch/xtensa/include/asm/cacheflush.h2
-rw-r--r--arch/xtensa/include/asm/fixmap.h10
-rw-r--r--arch/xtensa/include/asm/highmem.h2
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h2
-rw-r--r--arch/xtensa/include/asm/mmu_context.h2
-rw-r--r--arch/xtensa/include/asm/pgtable.h20
-rw-r--r--arch/xtensa/kernel/entry.S2
-rw-r--r--arch/xtensa/kernel/process.c1
-rw-r--r--arch/xtensa/kernel/ptrace.c1
-rw-r--r--arch/xtensa/kernel/setup.c1
-rw-r--r--arch/xtensa/kernel/traps.c24
-rw-r--r--arch/xtensa/kernel/vectors.S2
-rw-r--r--arch/xtensa/mm/cache.c2
-rw-r--r--arch/xtensa/mm/fault.c12
-rw-r--r--arch/xtensa/mm/highmem.c2
-rw-r--r--arch/xtensa/mm/ioremap.c2
-rw-r--r--arch/xtensa/mm/kasan_init.c10
-rw-r--r--arch/xtensa/mm/misc.S2
-rw-r--r--arch/xtensa/mm/mmu.c5
1150 files changed, 8316 insertions, 19904 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 2e6f843d87c4..6d2ba653fe49 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -328,12 +328,6 @@ config HAVE_FUNCTION_ARG_ACCESS_API
the API needed to access function arguments from pt_regs,
declared in asm/ptrace.h
-config HAVE_CLK
- bool
- help
- The <linux/clk.h> calls support software clock gating and
- thus are a key power management tool on many systems.
-
config HAVE_HW_BREAKPOINT
bool
depends on PERF_EVENTS
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index ef179033a7c2..48f6e22b6447 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -545,7 +545,7 @@ config NR_CPUS
default "4" if !ALPHA_GENERIC && !ALPHA_MARVEL
help
MARVEL support can handle a maximum of 32 CPUs, all the others
- with working support have a maximum of 4 CPUs.
+ with working support have a maximum of 4 CPUs.
config ARCH_DISCONTIGMEM_ENABLE
bool "Discontiguous Memory Support"
@@ -657,7 +657,7 @@ choice
endchoice
config HZ
- int
+ int
default 32 if HZ_32
default 64 if HZ_64
default 128 if HZ_128
diff --git a/arch/alpha/boot/bootp.c b/arch/alpha/boot/bootp.c
index 95c0359f4858..00266e6e1b71 100644
--- a/arch/alpha/boot/bootp.c
+++ b/arch/alpha/boot/bootp.c
@@ -16,7 +16,6 @@
#include <asm/console.h>
#include <asm/hwrpb.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <stdarg.h>
diff --git a/arch/alpha/boot/bootpz.c b/arch/alpha/boot/bootpz.c
index 99b8d7dc344b..43af71835adf 100644
--- a/arch/alpha/boot/bootpz.c
+++ b/arch/alpha/boot/bootpz.c
@@ -18,7 +18,6 @@
#include <asm/console.h>
#include <asm/hwrpb.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <stdarg.h>
diff --git a/arch/alpha/boot/main.c b/arch/alpha/boot/main.c
index 8f5ed8610970..e5347a080008 100644
--- a/arch/alpha/boot/main.c
+++ b/arch/alpha/boot/main.c
@@ -14,7 +14,6 @@
#include <asm/console.h>
#include <asm/hwrpb.h>
-#include <asm/pgtable.h>
#include <stdarg.h>
diff --git a/arch/alpha/boot/tools/objstrip.c b/arch/alpha/boot/tools/objstrip.c
index 825a16f5f622..08b430d25a31 100644
--- a/arch/alpha/boot/tools/objstrip.c
+++ b/arch/alpha/boot/tools/objstrip.c
@@ -148,7 +148,7 @@ main (int argc, char *argv[])
#ifdef __ELF__
elf = (struct elfhdr *) buf;
- if (elf->e_ident[0] == 0x7f && strncmp((char *)elf->e_ident + 1, "ELF", 3) == 0) {
+ if (elf->e_ident[0] == 0x7f && str_has_prefix((char *)elf->e_ident + 1, "ELF")) {
if (elf->e_type != ET_EXEC) {
fprintf(stderr, "%s: %s is not an ELF executable\n",
prog_name, inname);
diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h
index 89128489cb59..9945ff483eaf 100644
--- a/arch/alpha/include/asm/cacheflush.h
+++ b/arch/alpha/include/asm/cacheflush.h
@@ -4,19 +4,6 @@
#include <linux/mm.h>
-/* Caches aren't brain-dead on the Alpha. */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
/* Note that the following two definitions are _highly_ dependent
on the contexts in which they are used in the kernel. I personally
think it is criminal how loosely defined these macros are. */
@@ -48,7 +35,7 @@ extern void smp_imb(void);
extern void __load_new_mm_context(struct mm_struct *);
static inline void
-flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
if (vma->vm_flags & VM_EXEC) {
@@ -59,20 +46,17 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
mm->context[smp_processor_id()] = 0;
}
}
-#else
-extern void flush_icache_user_range(struct vm_area_struct *vma,
+#define flush_icache_user_page flush_icache_user_page
+#else /* CONFIG_SMP */
+extern void flush_icache_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len);
-#endif
+#define flush_icache_user_page flush_icache_user_page
+#endif /* CONFIG_SMP */
/* This is used only in __do_fault and do_swap_page. */
#define flush_icache_page(vma, page) \
- flush_icache_user_range((vma), (page), 0, 0)
+ flush_icache_user_page((vma), (page), 0, 0)
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
-} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
#endif /* _ALPHA_CACHEFLUSH_H */
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index d1ed5a8133c5..a4d0c19f1e79 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/compiler.h>
-#include <asm/pgtable.h>
#include <asm/machvec.h>
#include <asm/hwrpb.h>
@@ -310,14 +309,18 @@ static inline int __is_mmio(const volatile void __iomem *addr)
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
extern inline unsigned int ioread8(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
mb();
return ret;
}
extern inline unsigned int ioread16(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
mb();
return ret;
}
@@ -358,7 +361,9 @@ extern inline void outw(u16 b, unsigned long port)
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
extern inline unsigned int ioread32(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
mb();
return ret;
}
@@ -403,14 +408,18 @@ extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
extern inline u8 readb(const volatile void __iomem *addr)
{
- u8 ret = __raw_readb(addr);
+ u8 ret;
+ mb();
+ ret = __raw_readb(addr);
mb();
return ret;
}
extern inline u16 readw(const volatile void __iomem *addr)
{
- u16 ret = __raw_readw(addr);
+ u16 ret;
+ mb();
+ ret = __raw_readw(addr);
mb();
return ret;
}
@@ -451,14 +460,18 @@ extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
extern inline u32 readl(const volatile void __iomem *addr)
{
- u32 ret = __raw_readl(addr);
+ u32 ret;
+ mb();
+ ret = __raw_readl(addr);
mb();
return ret;
}
extern inline u64 readq(const volatile void __iomem *addr)
{
- u64 ret = __raw_readq(addr);
+ u64 ret;
+ mb();
+ ret = __raw_readq(addr);
mb();
return ret;
}
@@ -487,14 +500,44 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
#define outb_p outb
#define outw_p outw
#define outl_p outl
-#define readb_relaxed(addr) __raw_readb(addr)
-#define readw_relaxed(addr) __raw_readw(addr)
-#define readl_relaxed(addr) __raw_readl(addr)
-#define readq_relaxed(addr) __raw_readq(addr)
-#define writeb_relaxed(b, addr) __raw_writeb(b, addr)
-#define writew_relaxed(b, addr) __raw_writew(b, addr)
-#define writel_relaxed(b, addr) __raw_writel(b, addr)
-#define writeq_relaxed(b, addr) __raw_writeq(b, addr)
+
+extern u8 readb_relaxed(const volatile void __iomem *addr);
+extern u16 readw_relaxed(const volatile void __iomem *addr);
+extern u32 readl_relaxed(const volatile void __iomem *addr);
+extern u64 readq_relaxed(const volatile void __iomem *addr);
+
+#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
+extern inline u8 readb_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readb(addr);
+}
+
+extern inline u16 readw_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readw(addr);
+}
+#endif
+
+#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
+extern inline u32 readl_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readl(addr);
+}
+
+extern inline u64 readq_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readq(addr);
+}
+#endif
+
+#define writeb_relaxed writeb
+#define writew_relaxed writew
+#define writel_relaxed writel
+#define writeq_relaxed writeq
/*
* String version of IO memory access ops:
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 0267aa8a4f86..162c17b2631f 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -276,15 +276,6 @@ extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return
extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
-#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
-
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
/*
* The smp_read_barrier_depends() in the following functions are required to
* order the load of *dir (the pointer in the top level page table) with any
@@ -305,6 +296,7 @@ extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
smp_read_barrier_depends(); /* see above */
return ret;
}
+#define pmd_offset pmd_offset
/* Find an entry in the third-level page table.. */
extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
@@ -314,9 +306,7 @@ extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
smp_read_barrier_depends(); /* see above */
return ret;
}
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
-#define pte_unmap(pte) do { } while (0)
+#define pte_offset_kernel pte_offset_kernel
extern pgd_t swapper_pg_dir[1024];
@@ -355,8 +345,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
extern void paging_init(void);
-#include <asm-generic/pgtable.h>
-
/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
#define HAVE_ARCH_UNMAPPED_AREA
diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c
index c025a3e5e357..938de13adfbf 100644
--- a/arch/alpha/kernel/io.c
+++ b/arch/alpha/kernel/io.c
@@ -16,21 +16,27 @@
unsigned int
ioread8(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
mb();
return ret;
}
unsigned int ioread16(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
mb();
return ret;
}
unsigned int ioread32(void __iomem *addr)
{
- unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
+ unsigned int ret;
+ mb();
+ ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
mb();
return ret;
}
@@ -148,28 +154,36 @@ EXPORT_SYMBOL(__raw_writeq);
u8 readb(const volatile void __iomem *addr)
{
- u8 ret = __raw_readb(addr);
+ u8 ret;
+ mb();
+ ret = __raw_readb(addr);
mb();
return ret;
}
u16 readw(const volatile void __iomem *addr)
{
- u16 ret = __raw_readw(addr);
+ u16 ret;
+ mb();
+ ret = __raw_readw(addr);
mb();
return ret;
}
u32 readl(const volatile void __iomem *addr)
{
- u32 ret = __raw_readl(addr);
+ u32 ret;
+ mb();
+ ret = __raw_readl(addr);
mb();
return ret;
}
u64 readq(const volatile void __iomem *addr)
{
- u64 ret = __raw_readq(addr);
+ u64 ret;
+ mb();
+ ret = __raw_readq(addr);
mb();
return ret;
}
@@ -207,6 +221,38 @@ EXPORT_SYMBOL(writew);
EXPORT_SYMBOL(writel);
EXPORT_SYMBOL(writeq);
+/*
+ * The _relaxed functions must be ordered w.r.t. each other, but they don't
+ * have to be ordered w.r.t. other memory accesses.
+ */
+u8 readb_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readb(addr);
+}
+
+u16 readw_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readw(addr);
+}
+
+u32 readl_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readl(addr);
+}
+
+u64 readq_relaxed(const volatile void __iomem *addr)
+{
+ mb();
+ return __raw_readq(addr);
+}
+
+EXPORT_SYMBOL(readb_relaxed);
+EXPORT_SYMBOL(readw_relaxed);
+EXPORT_SYMBOL(readl_relaxed);
+EXPORT_SYMBOL(readq_relaxed);
/*
* Read COUNT 8-bit bytes from port PORT into memory starting at SRC.
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 94e4cde8071a..d5367a1c6300 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -677,7 +677,7 @@ SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
default:
error = -EOPNOTSUPP;
break;
- };
+ }
return error;
}
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 7f1925a32c99..81037907268d 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -638,7 +638,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
while (sg+1 < end && (int) sg[1].dma_address == -1) {
size += sg[1].length;
- sg++;
+ sg = sg_next(sg);
}
npages = iommu_num_pages(paddr, size, PAGE_SIZE);
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 48b81d015d8a..b45f0b0d6511 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -37,7 +37,6 @@
#include <asm/reg.h>
#include <linux/uaccess.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/hwrpb.h>
#include <asm/fpu.h>
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index f1fce942fddc..701a05090141 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -2,8 +2,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <asm/pgtable.h>
-
/* Prototypes of functions used across modules here in this directory. */
#define vucp volatile unsigned char *
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index cb8d599e72d6..8c43212ae38e 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -19,7 +19,6 @@
#include <linux/audit.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/fpu.h>
#include "proto.h"
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index f19aa577354b..916e42d74a86 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -55,7 +55,6 @@ static struct notifier_block alpha_panic_block = {
};
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/hwrpb.h>
#include <asm/dma.h>
#include <asm/mmu_context.h>
@@ -254,7 +253,7 @@ reserve_std_resources(void)
/* Fix up for the Jensen's queer RTC placement. */
standard_io_resources[0].start = RTC_PORT(0);
- standard_io_resources[0].end = RTC_PORT(0) + 0x10;
+ standard_io_resources[0].end = RTC_PORT(0) + 0x0f;
for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i)
request_resource(io, standard_io_resources+i);
@@ -430,6 +429,20 @@ register_cpus(void)
arch_initcall(register_cpus);
+#ifdef CONFIG_MAGIC_SYSRQ
+static void sysrq_reboot_handler(int unused)
+{
+ machine_halt();
+}
+
+static const struct sysrq_key_op srm_sysrq_reboot_op = {
+ .handler = sysrq_reboot_handler,
+ .help_msg = "reboot(b)",
+ .action_msg = "Resetting",
+ .enable_mask = SYSRQ_ENABLE_BOOT,
+};
+#endif
+
void __init
setup_arch(char **cmdline_p)
{
@@ -466,7 +479,7 @@ setup_arch(char **cmdline_p)
#ifndef alpha_using_srm
/* Assume that we've booted from SRM if we haven't booted from MILO.
Detect the later by looking for "MILO" in the system serial nr. */
- alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
+ alpha_using_srm = !str_has_prefix((const char *)hwrpb->ssn, "MILO");
#endif
#ifndef alpha_using_qemu
/* Similarly, look for QEMU. */
@@ -550,8 +563,8 @@ setup_arch(char **cmdline_p)
/* If we're using SRM, make sysrq-b halt back to the prom,
not auto-reboot. */
if (alpha_using_srm) {
- struct sysrq_key_op *op = __sysrq_get_key_op('b');
- op->handler = (void *) machine_halt;
+ unregister_sysrq_key('b', __sysrq_reboot_op);
+ register_sysrq_key('b', &srm_sysrq_reboot_op);
}
#endif
@@ -1412,6 +1425,7 @@ c_start(struct seq_file *f, loff_t *pos)
static void *
c_next(struct seq_file *f, void *v, loff_t *pos)
{
+ (*pos)++;
return NULL;
}
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 5f90df30be20..631cc17410d1 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -36,7 +36,6 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
@@ -740,7 +739,7 @@ ipi_flush_icache_page(void *x)
}
void
-flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
struct mm_struct *mm = vma->vm_mm;
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index ce5430056f65..e063b3857b3d 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -23,7 +23,6 @@
#include <asm/dma.h>
#include <asm/mmu_context.h>
#include <asm/irq.h>
-#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index 0aa6a27d0e2f..47459b73cdb7 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -23,7 +23,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_cia.h>
#include <asm/core_lca.h>
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index d33508621820..9fb445d7dca5 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -26,7 +26,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_tsunami.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index 1cdfe55fb987..3c43fd347526 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -22,7 +22,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_lca.h>
#include <asm/hwrpb.h>
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 016f79251141..aea8a54da4bc 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -23,7 +23,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_tsunami.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
@@ -176,7 +175,7 @@ eiger_swizzle(struct pci_dev *dev, u8 *pinp)
case 0x03: bridge_count = 2; break; /* 2 */
case 0x07: bridge_count = 3; break; /* 3 */
case 0x0f: bridge_count = 4; break; /* 4 */
- };
+ }
slot = PCI_SLOT(dev->devfn);
while (dev->bus->self) {
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index d0d44f543d77..0a2ab6cb18db 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -25,7 +25,6 @@
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "proto.h"
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 533899a4a1a1..83d6c53d6d4d 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -18,7 +18,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_marvel.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c
index 702292af2225..e1bee8f84c58 100644
--- a/arch/alpha/kernel/sys_miata.c
+++ b/arch/alpha/kernel/sys_miata.c
@@ -22,7 +22,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index 3af4f94113e1..7690dfd57cb6 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -23,7 +23,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 32850e45834b..53adf43dcd44 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -40,7 +40,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_irongate.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index b106f327f765..47f3ce4f719a 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -24,7 +24,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index b76f65d0e8b5..b5846ffdadce 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -21,7 +21,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_mcpcia.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
index d33074011960..4b1c8d85c4f0 100644
--- a/arch/alpha/kernel/sys_ruffian.c
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -23,7 +23,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index 4d85eaeb44aa..94046f9aea08 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -22,7 +22,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_polaris.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 3cf0d32da5d8..930005b2f630 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -21,7 +21,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_t2.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index a6bdc1da47ad..7c420d8dac53 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -25,7 +25,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_lca.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c
index 17cc203176c8..dd9de84b630c 100644
--- a/arch/alpha/kernel/sys_sx164.c
+++ b/arch/alpha/kernel/sys_sx164.c
@@ -22,7 +22,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index e230c6864088..9e2adb69bc74 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -21,7 +21,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index c8390d8de140..b1f3b4fcf99b 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -26,7 +26,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_titan.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index 2191bde161fd..2c54d707142a 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -20,7 +20,6 @@
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/core_wildfire.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index f6b9664ac504..49754e07e04f 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -121,10 +121,10 @@ dik_show_code(unsigned int *pc)
}
static void
-dik_show_trace(unsigned long *sp)
+dik_show_trace(unsigned long *sp, const char *loglvl)
{
long i = 0;
- printk("Trace:\n");
+ printk("%sTrace:\n", loglvl);
while (0x1ff8 & (unsigned long) sp) {
extern char _stext[], _etext[];
unsigned long tmp = *sp;
@@ -133,24 +133,24 @@ dik_show_trace(unsigned long *sp)
continue;
if (tmp >= (unsigned long) &_etext)
continue;
- printk("[<%lx>] %pSR\n", tmp, (void *)tmp);
+ printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp);
if (i > 40) {
- printk(" ...");
+ printk("%s ...", loglvl);
break;
}
}
- printk("\n");
+ printk("%s\n", loglvl);
}
static int kstack_depth_to_print = 24;
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
unsigned long *stack;
int i;
/*
- * debugging aid: "show_stack(NULL);" prints the
+ * debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the
* back trace for this cpu.
*/
if(sp==NULL)
@@ -163,14 +163,14 @@ void show_stack(struct task_struct *task, unsigned long *sp)
if ((i % 4) == 0) {
if (i)
pr_cont("\n");
- printk(" ");
+ printk("%s ", loglvl);
} else {
pr_cont(" ");
}
pr_cont("%016lx", *stack++);
}
pr_cont("\n");
- dik_show_trace(sp);
+ dik_show_trace(sp, loglvl);
}
void
@@ -184,7 +184,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
dik_show_regs(regs, r9_15);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- dik_show_trace((unsigned long *)(regs+1));
+ dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
dik_show_code((unsigned int *)regs->pc);
if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
@@ -625,7 +625,7 @@ got_exception:
printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
dik_show_code((unsigned int *)pc);
- dik_show_trace((unsigned long *)(regs+1));
+ dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
printk("die_if_kernel recursion detected.\n");
@@ -957,12 +957,12 @@ give_sigsegv:
si_code = SEGV_ACCERR;
else {
struct mm_struct *mm = current->mm;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
if (find_vma(mm, (unsigned long)va))
si_code = SEGV_ACCERR;
else
si_code = SEGV_MAPERR;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
}
send_sig_fault(SIGSEGV, si_code, va, 0, current);
return;
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index c2d7b6d7bac7..c2303a8c2b9f 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -117,7 +117,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -171,7 +171,7 @@ retry:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -180,14 +180,14 @@ retry:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/* Something tried to access memory that isn't in our memory map.
Fix it, but check if it's kernel or user first. */
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (user_mode(regs))
goto do_sigsegv;
@@ -211,14 +211,14 @@ retry:
/* We ran out of memory, or some other thing happened to us that
made us unable to handle the page fault gracefully. */
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Send a sigbus, regardless of whether we were in kernel
or user mode. */
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address, 0);
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 667cd21393b5..3c42b3147fd6 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -24,7 +24,6 @@
#include <linux/gfp.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/hwrpb.h>
#include <asm/dma.h>
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
index 0be19fd1a412..4c453ba96c51 100644
--- a/arch/arc/include/asm/bug.h
+++ b/arch/arc/include/asm/bug.h
@@ -13,7 +13,8 @@
struct task_struct;
void show_regs(struct pt_regs *regs);
-void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
+void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
+ const char *loglvl);
void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
unsigned long address);
void die(const char *str, struct pt_regs *regs, unsigned long address);
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 12be7e1b7cc0..f1ed17edb085 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -248,9 +248,6 @@
extern char empty_zero_page[PAGE_SIZE];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
@@ -282,18 +279,6 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
-#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-/*
- * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
- * and returns ptr to PTE entry corresponding to @addr
- */
-#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
- __pte_index(addr))
-
-/* No mapping of Page Tables in high mem etc, so following same as above */
-#define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
-#define pte_offset_map(dir, addr) pte_offset(dir, addr)
/* Zoo of pte_xxx function */
#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
@@ -332,13 +317,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
}
/*
- * All kernel related VM pages are in init's mm.
- */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
-#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
-
-/*
* Macro to quickly access the PGD entry, utlising the fact that some
* arch may cache the pointer to Page Directory of "current" task
* in a MMU register
@@ -390,8 +368,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
#include <asm/hugepage.h>
#endif
-#include <asm-generic/pgtable.h>
-
/* to cope with aliasing VIPT cache */
#define HAVE_ARCH_UNMAPPED_AREA
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 315528f04bc1..8c8e5172fecd 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -90,10 +90,10 @@ fault:
if (unlikely(ret != -EFAULT))
goto fail;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
FAULT_FLAG_WRITE, NULL);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (likely(!ret))
goto again;
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index 1e440bbfa876..feba91c9d969 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -158,9 +158,11 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
/* Call-back which plugs into unwinding core to dump the stack in
* case of panic/OOPs/BUG etc
*/
-static int __print_sym(unsigned int address, void *unused)
+static int __print_sym(unsigned int address, void *arg)
{
- printk(" %pS\n", (void *)address);
+ const char *loglvl = arg;
+
+ printk("%s %pS\n", loglvl, (void *)address);
return 0;
}
@@ -217,17 +219,18 @@ static int __get_first_nonsched(unsigned int address, void *unused)
*-------------------------------------------------------------------------
*/
-noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs)
+noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
+ const char *loglvl)
{
- pr_info("\nStack Trace:\n");
- arc_unwind_core(tsk, regs, __print_sym, NULL);
+ printk("%s\nStack Trace:\n", loglvl);
+ arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl);
}
EXPORT_SYMBOL(show_stacktrace);
/* Expected by sched Code */
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
- show_stacktrace(tsk, NULL);
+ show_stacktrace(tsk, NULL, loglvl);
}
/* Another API expected by schedular, shows up in "ps" as Wait Channel
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 3393558876a9..28e8bf04b253 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -89,7 +89,7 @@ static void show_faulting_vma(unsigned long address)
/* can't use print_vma_addr() yet as it doesn't check for
* non-inclusive vma
*/
- down_read(&active_mm->mmap_sem);
+ mmap_read_lock(active_mm);
vma = find_vma(active_mm, address);
/* check against the find_vma( ) behaviour which returns the next VMA
@@ -111,7 +111,7 @@ static void show_faulting_vma(unsigned long address)
} else
pr_info(" @No matching VMA found\n");
- up_read(&active_mm->mmap_sem);
+ mmap_read_unlock(active_mm);
}
static void show_ecr_verbose(struct pt_regs *regs)
@@ -240,5 +240,5 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
/* Show stack trace if this Fatality happened in kernel mode */
if (!user_mode(regs))
- show_stacktrace(current, regs);
+ show_stacktrace(current, regs, KERN_DEFAULT);
}
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 92b339c7adba..72f5405a7ec5 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -107,7 +107,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
flags |= FAULT_FLAG_WRITE;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
@@ -141,7 +141,7 @@ retry:
}
/*
- * Fault retry nuances, mmap_sem already relinquished by core mm
+ * Fault retry nuances, mmap_lock already relinquished by core mm
*/
if (unlikely((fault & VM_FAULT_RETRY) &&
(flags & FAULT_FLAG_ALLOW_RETRY))) {
@@ -150,7 +150,7 @@ retry:
}
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Major/minor page fault accounting
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index 479b0d72d3cf..1b9f473c6369 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -6,8 +6,8 @@
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/highmem.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
@@ -92,17 +92,9 @@ EXPORT_SYMBOL(kunmap_atomic_high);
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
{
- pgd_t *pgd_k;
- p4d_t *p4d_k;
- pud_t *pud_k;
- pmd_t *pmd_k;
+ pmd_t *pmd_k = pmd_off_k(kvaddr);
pte_t *pte_k;
- pgd_k = pgd_offset_k(kvaddr);
- p4d_k = p4d_offset(pgd_k, kvaddr);
- pud_k = pud_offset(p4d_k, kvaddr);
- pmd_k = pmd_offset(pud_k, kvaddr);
-
pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!pte_k)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index 2efaf6ca0c06..31f54bdd95f2 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -33,9 +33,9 @@
*/
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/entry.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
#include <asm/processor.h>
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index fb6c85c5d344..8789e0e96787 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -367,6 +367,7 @@ config ARCH_EP93XX
select CPU_ARM920T
select GENERIC_CLOCKEVENTS
select GPIOLIB
+ select HAVE_LEGACY_CLK
help
This enables support for the Cirrus EP93xx series of CPUs.
@@ -438,7 +439,6 @@ config ARCH_PXA
select ARM_CPU_SUSPEND if PM
select AUTO_ZRELADDR
select COMMON_CLK
- select CLKDEV_LOOKUP
select CLKSRC_PXA
select CLKSRC_MMIO
select TIMER_OF
@@ -477,7 +477,6 @@ config ARCH_SA1100
bool "SA1100-based"
select ARCH_MTD_XIP
select ARCH_SPARSEMEM_ENABLE
- select CLKDEV_LOOKUP
select CLKSRC_MMIO
select CLKSRC_PXA
select TIMER_OF if OF
@@ -498,7 +497,6 @@ config ARCH_SA1100
config ARCH_S3C24XX
bool "Samsung S3C24XX SoCs"
select ATAGS
- select CLKDEV_LOOKUP
select CLKSRC_SAMSUNG_PWM
select GENERIC_CLOCKEVENTS
select GPIO_SAMSUNG
@@ -528,6 +526,7 @@ config ARCH_OMAP1
select GENERIC_IRQ_MULTI_HANDLER
select GPIOLIB
select HAVE_IDE
+ select HAVE_LEGACY_CLK
select IRQ_DOMAIN
select NEED_MACH_IO_H if PCCARD
select NEED_MACH_MEMORY_H
@@ -1750,7 +1749,7 @@ config DEPRECATED_PARAM_STRUCT
# TEXT and BSS so we preserve their values in the config files.
config ZBOOT_ROM_TEXT
hex "Compressed ROM boot loader base address"
- default "0"
+ default 0x0
help
The physical address at which the ROM-able zImage is to be
placed in the target. Platforms which normally make use of
@@ -1761,7 +1760,7 @@ config ZBOOT_ROM_TEXT
config ZBOOT_ROM_BSS
hex "Compressed ROM boot loader BSS address"
- default "0"
+ default 0x0
help
The base address of an area of read/write memory in the target
for the ROM-able zImage which must be available while the
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 0fb6de83dd50..59fde2d598d8 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -45,12 +45,10 @@ endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__ARMEB__
-AS += -EB
KBUILD_LDFLAGS += -EB
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__ARMEL__
-AS += -EL
KBUILD_LDFLAGS += -EL
endif
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index c79db44ba128..434a16982e34 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -1410,7 +1410,11 @@ memdump: mov r12, r0
__hyp_reentry_vectors:
W(b) . @ reset
W(b) . @ undef
+#ifdef CONFIG_EFI_STUB
+ W(b) __enter_kernel_from_hyp @ hvc from HYP
+#else
W(b) . @ svc
+#endif
W(b) . @ pabort
W(b) . @ dabort
W(b) __enter_kernel @ hyp
@@ -1429,14 +1433,72 @@ __enter_kernel:
reloc_code_end:
#ifdef CONFIG_EFI_STUB
+__enter_kernel_from_hyp:
+ mrc p15, 4, r0, c1, c0, 0 @ read HSCTLR
+ bic r0, r0, #0x5 @ disable MMU and caches
+ mcr p15, 4, r0, c1, c0, 0 @ write HSCTLR
+ isb
+ b __enter_kernel
+
ENTRY(efi_enter_kernel)
mov r4, r0 @ preserve image base
mov r8, r1 @ preserve DT pointer
+ ARM( adrl r0, call_cache_fn )
+ THUMB( adr r0, call_cache_fn )
+ adr r1, 0f @ clean the region of code we
+ bl cache_clean_flush @ may run with the MMU off
+
+#ifdef CONFIG_ARM_VIRT_EXT
+ @
+ @ The EFI spec does not support booting on ARM in HYP mode,
+ @ since it mandates that the MMU and caches are on, with all
+ @ 32-bit addressable DRAM mapped 1:1 using short descriptors.
+ @
+ @ While the EDK2 reference implementation adheres to this,
+ @ U-Boot might decide to enter the EFI stub in HYP mode
+ @ anyway, with the MMU and caches either on or off.
+ @
+ mrs r0, cpsr @ get the current mode
+ msr spsr_cxsf, r0 @ record boot mode
+ and r0, r0, #MODE_MASK @ are we running in HYP mode?
+ cmp r0, #HYP_MODE
+ bne .Lefi_svc
+
+ mrc p15, 4, r1, c1, c0, 0 @ read HSCTLR
+ tst r1, #0x1 @ MMU enabled at HYP?
+ beq 1f
+
+ @
+ @ When running in HYP mode with the caches on, we're better
+ @ off just carrying on using the cached 1:1 mapping that the
+ @ firmware provided. Set up the HYP vectors so HVC instructions
+ @ issued from HYP mode take us to the correct handler code. We
+ @ will disable the MMU before jumping to the kernel proper.
+ @
+ adr r0, __hyp_reentry_vectors
+ mcr p15, 4, r0, c12, c0, 0 @ set HYP vector base (HVBAR)
+ isb
+ b .Lefi_hyp
+
+ @
+ @ When running in HYP mode with the caches off, we need to drop
+ @ into SVC mode now, and let the decompressor set up its cached
+ @ 1:1 mapping as usual.
+ @
+1: mov r9, r4 @ preserve image base
+ bl __hyp_stub_install @ install HYP stub vectors
+ safe_svcmode_maskall r1 @ drop to SVC mode
+ msr spsr_cxsf, r0 @ record boot mode
+ orr r4, r9, #1 @ restore image base and set LSB
+ b .Lefi_hyp
+.Lefi_svc:
+#endif
mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
tst r0, #0x1 @ MMU enabled?
orreq r4, r4, #1 @ set LSB if not
+.Lefi_hyp:
mov r0, r8 @ DT start
add r1, r8, r2 @ DT end
bl cache_clean_flush
diff --git a/arch/arm/boot/deflate_xip_data.sh b/arch/arm/boot/deflate_xip_data.sh
index 40937248cebe..739f0464321e 100755
--- a/arch/arm/boot/deflate_xip_data.sh
+++ b/arch/arm/boot/deflate_xip_data.sh
@@ -56,7 +56,7 @@ trap 'rm -f "$XIPIMAGE.tmp"; exit 1' 1 2 3
# substitute the data section by a compressed version
$DD if="$XIPIMAGE" count=$data_start iflag=count_bytes of="$XIPIMAGE.tmp"
$DD if="$XIPIMAGE" skip=$data_start iflag=skip_bytes |
-gzip -9 >> "$XIPIMAGE.tmp"
+$_GZIP -9 >> "$XIPIMAGE.tmp"
# replace kernel binary
mv -f "$XIPIMAGE.tmp" "$XIPIMAGE"
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 3034c23e697d..1fbee2a7785f 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -933,8 +933,6 @@
};
usb2: gadget@fff78000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "atmel,at91sam9g45-udc";
reg = <0x00600000 0x80000
0xfff78000 0x400>;
@@ -942,58 +940,6 @@
clocks = <&pmc PMC_TYPE_PERIPHERAL 27>, <&pmc PMC_TYPE_CORE PMC_UTMI>;
clock-names = "pclk", "hclk";
status = "disabled";
-
- ep@0 {
- reg = <0>;
- atmel,fifo-size = <64>;
- atmel,nb-banks = <1>;
- };
-
- ep@1 {
- reg = <1>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@2 {
- reg = <2>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@3 {
- reg = <3>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- };
-
- ep@4 {
- reg = <4>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- };
-
- ep@5 {
- reg = <5>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@6 {
- reg = <6>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
};
clk32k: sckc@fffffd50 {
diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
index ea024e4b6e09..4d70194fd808 100644
--- a/arch/arm/boot/dts/at91sam9rl.dtsi
+++ b/arch/arm/boot/dts/at91sam9rl.dtsi
@@ -299,8 +299,6 @@
};
usb0: gadget@fffd4000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "atmel,at91sam9rl-udc";
reg = <0x00600000 0x100000>,
<0xfffd4000 0x4000>;
@@ -308,58 +306,6 @@
clocks = <&pmc PMC_TYPE_PERIPHERAL 22>, <&pmc PMC_TYPE_CORE PMC_UTMI>;
clock-names = "pclk", "hclk";
status = "disabled";
-
- ep@0 {
- reg = <0>;
- atmel,fifo-size = <64>;
- atmel,nb-banks = <1>;
- };
-
- ep@1 {
- reg = <1>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@2 {
- reg = <2>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@3 {
- reg = <3>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- };
-
- ep@4 {
- reg = <4>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- };
-
- ep@5 {
- reg = <5>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@6 {
- reg = <6>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
};
dma0: dma-controller@ffffe600 {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 7c2eb93f8cac..948fe99ab6c3 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -867,8 +867,6 @@
};
usb2: gadget@f803c000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "atmel,at91sam9g45-udc";
reg = <0x00500000 0x80000
0xf803c000 0x400>;
@@ -876,58 +874,6 @@
clocks = <&pmc PMC_TYPE_CORE PMC_UTMI>, <&pmc PMC_TYPE_PERIPHERAL 23>;
clock-names = "hclk", "pclk";
status = "disabled";
-
- ep@0 {
- reg = <0>;
- atmel,fifo-size = <64>;
- atmel,nb-banks = <1>;
- };
-
- ep@1 {
- reg = <1>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@2 {
- reg = <2>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@3 {
- reg = <3>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- };
-
- ep@4 {
- reg = <4>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- };
-
- ep@5 {
- reg = <5>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@6 {
- reg = <6>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
};
watchdog: watchdog@fffffe40 {
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index c0a3ca8f9bf7..31d8766ec7ef 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -109,8 +109,6 @@
};
usb0: gadget@300000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "atmel,sama5d3-udc";
reg = <0x00300000 0x100000
0xfc02c000 0x400>;
@@ -118,124 +116,6 @@
clocks = <&pmc PMC_TYPE_PERIPHERAL 42>, <&pmc PMC_TYPE_CORE PMC_UTMI>;
clock-names = "pclk", "hclk";
status = "disabled";
-
- ep@0 {
- reg = <0>;
- atmel,fifo-size = <64>;
- atmel,nb-banks = <1>;
- };
-
- ep@1 {
- reg = <1>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@2 {
- reg = <2>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@3 {
- reg = <3>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@4 {
- reg = <4>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@5 {
- reg = <5>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@6 {
- reg = <6>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@7 {
- reg = <7>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@8 {
- reg = <8>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@9 {
- reg = <9>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@10 {
- reg = <10>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@11 {
- reg = <11>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@12 {
- reg = <12>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@13 {
- reg = <13>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@14 {
- reg = <14>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@15 {
- reg = <15>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
};
usb1: ohci@400000 {
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index c53e48445e4d..0bb5b6fa0748 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -1076,8 +1076,6 @@
};
usb0: gadget@500000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "atmel,sama5d3-udc";
reg = <0x00500000 0x100000
0xf8030000 0x4000>;
@@ -1085,111 +1083,6 @@
clocks = <&pmc PMC_TYPE_PERIPHERAL 33>, <&pmc PMC_TYPE_CORE PMC_UTMI>;
clock-names = "pclk", "hclk";
status = "disabled";
-
- ep@0 {
- reg = <0>;
- atmel,fifo-size = <64>;
- atmel,nb-banks = <1>;
- };
-
- ep@1 {
- reg = <1>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@2 {
- reg = <2>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@3 {
- reg = <3>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- };
-
- ep@4 {
- reg = <4>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- };
-
- ep@5 {
- reg = <5>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- };
-
- ep@6 {
- reg = <6>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- };
-
- ep@7 {
- reg = <7>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- };
-
- ep@8 {
- reg = <8>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
-
- ep@9 {
- reg = <9>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
-
- ep@10 {
- reg = <10>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
-
- ep@11 {
- reg = <11>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
-
- ep@12 {
- reg = <12>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
-
- ep@13 {
- reg = <13>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
-
- ep@14 {
- reg = <14>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
-
- ep@15 {
- reg = <15>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- };
};
usb1: ohci@600000 {
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index fff679734c9c..2d9f853ab15f 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -96,8 +96,6 @@
};
usb0: gadget@400000 {
- #address-cells = <1>;
- #size-cells = <0>;
compatible = "atmel,sama5d3-udc";
reg = <0x00400000 0x100000
0xfc02c000 0x4000>;
@@ -105,124 +103,6 @@
clocks = <&pmc PMC_TYPE_PERIPHERAL 47>, <&pmc PMC_TYPE_CORE PMC_UTMI>;
clock-names = "pclk", "hclk";
status = "disabled";
-
- ep@0 {
- reg = <0>;
- atmel,fifo-size = <64>;
- atmel,nb-banks = <1>;
- };
-
- ep@1 {
- reg = <1>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@2 {
- reg = <2>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <3>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@3 {
- reg = <3>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@4 {
- reg = <4>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@5 {
- reg = <5>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@6 {
- reg = <6>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@7 {
- reg = <7>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-dma;
- atmel,can-isoc;
- };
-
- ep@8 {
- reg = <8>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@9 {
- reg = <9>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@10 {
- reg = <10>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@11 {
- reg = <11>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@12 {
- reg = <12>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@13 {
- reg = <13>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@14 {
- reg = <14>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
-
- ep@15 {
- reg = <15>;
- atmel,fifo-size = <1024>;
- atmel,nb-banks = <2>;
- atmel,can-isoc;
- };
};
usb1: ohci@500000 {
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 2674de6ada1f..c9bf2df85cb9 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -30,7 +30,7 @@ config CRYPTO_SHA1_ARM_NEON
config CRYPTO_SHA1_ARM_CE
tristate "SHA1 digest algorithm (ARM v8 Crypto Extensions)"
- depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ depends on KERNEL_MODE_NEON
select CRYPTO_SHA1_ARM
select CRYPTO_HASH
help
@@ -39,7 +39,7 @@ config CRYPTO_SHA1_ARM_CE
config CRYPTO_SHA2_ARM_CE
tristate "SHA-224/256 digest algorithm (ARM v8 Crypto Extensions)"
- depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ depends on KERNEL_MODE_NEON
select CRYPTO_SHA256_ARM
select CRYPTO_HASH
help
@@ -96,7 +96,7 @@ config CRYPTO_AES_ARM_BS
config CRYPTO_AES_ARM_CE
tristate "Accelerated AES using ARMv8 Crypto Extensions"
- depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
select CRYPTO_SIMD
@@ -106,7 +106,7 @@ config CRYPTO_AES_ARM_CE
config CRYPTO_GHASH_ARM_CE
tristate "PMULL-accelerated GHASH using NEON/ARMv8 Crypto Extensions"
- depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ depends on KERNEL_MODE_NEON
select CRYPTO_HASH
select CRYPTO_CRYPTD
select CRYPTO_GF128MUL
@@ -118,13 +118,13 @@ config CRYPTO_GHASH_ARM_CE
config CRYPTO_CRCT10DIF_ARM_CE
tristate "CRCT10DIF digest algorithm using PMULL instructions"
- depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ depends on KERNEL_MODE_NEON
depends on CRC_T10DIF
select CRYPTO_HASH
config CRYPTO_CRC32_ARM_CE
tristate "CRC32(C) digest algorithm using CRC and/or PMULL instructions"
- depends on KERNEL_MODE_NEON && (CC_IS_CLANG || GCC_VERSION >= 40800)
+ depends on KERNEL_MODE_NEON
depends on CRC32
select CRYPTO_HASH
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index deef4d0cb3b5..673c7dd75ab9 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -82,7 +82,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct pt_regs *),
int sig, int code, const char *name);
-extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode,
+ const char *loglvl);
struct mm_struct;
void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr);
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 7114b9aa46b8..2e24e765e6d3 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -258,11 +258,11 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
/*
- * flush_cache_user_range is used when we want to ensure that the
+ * flush_icache_user_range is used when we want to ensure that the
* Harvard caches are synchronised for the user space address range.
* This is used for the ARM private sys_cacheflush system call.
*/
-#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
+#define flush_icache_user_range(s,e) __cpuc_coherent_user_range(s,e)
/*
* Perform necessary cache operations to ensure that data previously
@@ -318,9 +318,6 @@ extern void flush_kernel_dcache_page(struct page *);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
-#define flush_icache_user_range(vma,page,addr,len) \
- flush_dcache_page(page)
-
/*
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index 9383f236e795..84dc0ba822f5 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -13,7 +13,6 @@
#include <asm/highmem.h>
#include <asm/mach/map.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#ifdef CONFIG_EFI
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 472c93db5dac..fc56fc3e1931 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -6,8 +6,8 @@
#define FIXADDR_END 0xfff00000UL
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
+#include <linux/pgtable.h>
#include <asm/kmap_types.h>
-#include <asm/pgtable.h>
enum fixed_addresses {
FIX_EARLYCON_MEM_BASE,
diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h
index 73ba956e379f..aab7e8358e6a 100644
--- a/arch/arm/include/asm/idmap.h
+++ b/arch/arm/include/asm/idmap.h
@@ -3,7 +3,7 @@
#define __ASM_IDMAP_H
#include <linux/compiler.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
/* Tag a function as requiring to be executed via an identity mapping. */
#define __idmap __section(.idmap.text) noinline notrace
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index 9e084a464a97..3502c2f746ca 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -187,6 +187,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
{
return (pmd_t *)pud;
}
+#define pmd_offset pmd_offset
#define pmd_large(pmd) (pmd_val(pmd) & 2)
#define pmd_leaf(pmd) (pmd_val(pmd) & 2)
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 1933aed9f68d..fbb6693c3352 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -133,13 +133,6 @@ static inline pmd_t *pud_page_vaddr(pud_t pud)
return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
}
-/* Find an entry in the second-level page table.. */
-#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
-
#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
#define copy_pmd(pmdpd,pmdps) \
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 30fb2330f57b..d16aba48fa0a 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -22,7 +22,6 @@
#define pgd_bad(pgd) (0)
#define pgd_clear(pgdp)
#define kern_addr_valid(addr) (1)
-#define pmd_offset(a, b) ((void *)0)
/* FIXME */
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
@@ -73,8 +72,6 @@ extern unsigned int kobjsize(const void *objp);
#define FIRST_USER_ADDRESS 0UL
-#include <asm-generic/pgtable.h>
-
#else
/*
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index fba20607c53c..c02f24400369 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -166,14 +166,6 @@ extern struct page *empty_zero_page;
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-/* to find an entry in a page-table-directory */
-#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
-
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-
#define pmd_none(pmd) (!pmd_val(pmd))
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
@@ -183,21 +175,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
-#ifndef CONFIG_HIGHPTE
-#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
-#define __pte_unmap(pte) do { } while (0)
-#else
-#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
-#define __pte_unmap(pte) kunmap_atomic(pte)
-#endif
-
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
-
-#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
-#define pte_unmap(pte) __pte_unmap(pte)
-
#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
@@ -339,8 +316,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
/* FIXME: this is not correct */
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
/*
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
*/
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index 172b08ff3760..987fefb0a4db 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -29,7 +29,8 @@ static inline int __in_irqentry_text(unsigned long ptr)
}
extern void __init early_trap_init(void *);
-extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
+extern void dump_backtrace_entry(unsigned long where, unsigned long from,
+ unsigned long frame, const char *loglvl);
extern void ptrace_break(struct pt_regs *regs);
extern void *vectors_page;
diff --git a/arch/arm/include/asm/unwind.h b/arch/arm/include/asm/unwind.h
index 6e282c33126b..0f8a3439902d 100644
--- a/arch/arm/include/asm/unwind.h
+++ b/arch/arm/include/asm/unwind.h
@@ -36,7 +36,8 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
unsigned long text_addr,
unsigned long text_size);
extern void unwind_table_del(struct unwind_table *tab);
-extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
+extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index 182422981386..254ab7138c85 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -78,13 +78,32 @@ void elf_set_personality(const struct elf32_hdr *x)
EXPORT_SYMBOL(elf_set_personality);
/*
- * Set READ_IMPLIES_EXEC if:
- * - the binary requires an executable stack
- * - we're running on a CPU which doesn't support NX.
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically.
+ *
+ * The decision process for determining the results are:
+ *
+ *              CPU: | lacks NX*  | has NX |
+ * ELF:              |            |           |
+ * ---------------------|------------|------------|
+ * missing PT_GNU_STACK | exec-all   | exec-all  |
+ * PT_GNU_STACK == RWX  | exec-all   | exec-stack |
+ * PT_GNU_STACK == RW   | exec-all  | exec-none |
+ *
+ * exec-all : all PROT_READ user mappings are executable, except when
+ * backed by files on a noexec-filesystem.
+ * exec-none : only PROT_EXEC user mappings are executable.
+ * exec-stack: only the stack and PROT_EXEC user mappings are executable.
+ *
+ * *this column has no architectural effect: NX markings are ignored by
+ * hardware, but may have behavioral effects when "wants X" collides with
+ * "cannot be X" constraints in memory permission flags, as in
+ * https://lkml.kernel.org/r/20190418055759.GA3155@mellanox.com
+ *
*/
int arm_elf_read_implies_exec(int executable_stack)
{
- if (executable_stack != EXSTACK_DISABLE_X)
+ if (executable_stack == EXSTACK_DEFAULT)
return 1;
if (cpu_architecture() < CPU_ARCH_ARMv6)
return 1;
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index cd1234c103fc..98ca3e3fa847 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -98,8 +98,8 @@ void set_fiq_handler(void *start, unsigned int length)
memcpy(base + offset, start, length);
if (!cache_is_vipt_nonaliasing())
- flush_icache_range((unsigned long)base + offset, offset +
- length);
+ flush_icache_range((unsigned long)base + offset,
+ (unsigned long)base + offset + length);
flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
}
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index c49b39340ddb..f8904227e7fd 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -10,6 +10,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/cp15.h>
@@ -18,7 +19,6 @@
#include <asm/asm-offsets.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
-#include <asm/pgtable.h>
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
#include CONFIG_DEBUG_LL_INCLUDE
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 76300f3813e8..974b6c64d3e6 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -10,7 +10,6 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/memblock.h>
-#include <asm/pgtable.h>
#include <linux/of_fdt.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index af0a8500a24e..e15444b25ca0 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -17,7 +17,6 @@
#include <linux/string.h>
#include <linux/gfp.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/smp_plat.h>
#include <asm/unwind.h>
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 46e478fb5ea2..58eaa1f60e16 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -431,7 +431,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
npages = 1; /* for sigpage */
npages += vdso_total_pages;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
hint = sigpage_addr(mm, npages);
addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
@@ -458,7 +458,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
arm_install_vdso(mm, addr + PAGE_SIZE);
up_fail:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
#endif
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 4cc6a7eff635..d0f7c8896c96 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -25,7 +25,6 @@
#include <linux/tracehook.h>
#include <linux/unistd.h>
-#include <asm/pgtable.h>
#include <asm/traps.h>
#define CREATE_TRACE_POINTS
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 46e1be9e57a8..9a6432557871 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -37,7 +37,6 @@
#include <asm/idmap.h>
#include <asm/topology.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/procinfo.h>
#include <asm/processor.h>
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index d08099269e35..d2c9338d74e8 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -2,12 +2,12 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/memory.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index e640871328c1..6166ba38bf99 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -97,12 +97,12 @@ static void set_segfault(struct pt_regs *regs, unsigned long addr)
{
int si_code;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
if (find_vma(current->mm, addr) == NULL)
si_code = SEGV_MAPERR;
else
si_code = SEGV_ACCERR;
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
pr_debug("SWP{B} emulation: access caused memory abort!\n");
arm_notify_die("Illegal memory access", regs,
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 1e70e7227f0f..65a3b1e75480 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -62,21 +62,24 @@ __setup("user_debug=", user_debug_setup);
static void dump_mem(const char *, const char *, unsigned long, unsigned long);
-void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
+void dump_backtrace_entry(unsigned long where, unsigned long from,
+ unsigned long frame, const char *loglvl)
{
unsigned long end = frame + 4 + sizeof(struct pt_regs);
#ifdef CONFIG_KALLSYMS
- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
+ printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n",
+ loglvl, where, (void *)where, from, (void *)from);
#else
- printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+ printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
+ loglvl, where, from);
#endif
if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
- dump_mem("", "Exception stack", frame + 4, end);
+ dump_mem(loglvl, "Exception stack", frame + 4, end);
}
-void dump_backtrace_stm(u32 *stack, u32 instruction)
+void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl)
{
char str[80], *p;
unsigned int x;
@@ -88,12 +91,12 @@ void dump_backtrace_stm(u32 *stack, u32 instruction)
if (++x == 6) {
x = 0;
p = str;
- printk("%s\n", str);
+ printk("%s%s\n", loglvl, str);
}
}
}
if (p != str)
- printk("%s\n", str);
+ printk("%s%s\n", loglvl, str);
}
#ifndef CONFIG_ARM_UNWIND
@@ -201,17 +204,19 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
}
#ifdef CONFIG_ARM_UNWIND
-static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
- unwind_backtrace(regs, tsk);
+ unwind_backtrace(regs, tsk, loglvl);
}
#else
-static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
unsigned int fp, mode;
int ok = 1;
- printk("Backtrace: ");
+ printk("%sBacktrace: ", loglvl);
if (!tsk)
tsk = current;
@@ -238,13 +243,13 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
pr_cont("\n");
if (ok)
- c_backtrace(fp, mode);
+ c_backtrace(fp, mode, loglvl);
}
#endif
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
- dump_backtrace(NULL, tsk);
+ dump_backtrace(NULL, tsk, loglvl);
barrier();
}
@@ -288,7 +293,7 @@ static int __die(const char *str, int err, struct pt_regs *regs)
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
THREAD_SIZE + (unsigned long)task_stack_page(tsk));
- dump_backtrace(regs, tsk);
+ dump_backtrace(regs, tsk, KERN_EMERG);
dump_instr(KERN_EMERG, regs);
}
@@ -566,7 +571,7 @@ __do_cache_op(unsigned long start, unsigned long end)
if (fatal_signal_pending(current))
return 0;
- ret = flush_cache_user_range(start, start + chunk);
+ ret = flush_icache_user_range(start, start + chunk);
if (ret)
return ret;
@@ -663,10 +668,10 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
if (user_debug & UDBG_SYSCALL) {
pr_err("[%d] %s: arm syscall %d\n",
task_pid_nr(current), current->comm, no);
- dump_instr("", regs);
+ dump_instr(KERN_ERR, regs);
if (user_mode(regs)) {
__show_regs(regs);
- c_backtrace(frame_pointer(regs), processor_mode(regs));
+ c_backtrace(frame_pointer(regs), processor_mode(regs), KERN_ERR);
}
}
#endif
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 11a964fd66f4..d2bd0df2318d 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -455,7 +455,8 @@ int unwind_frame(struct stackframe *frame)
return URC_OK;
}
-void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
struct stackframe frame;
@@ -493,7 +494,7 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
urc = unwind_frame(&frame);
if (urc < 0)
break;
- dump_backtrace_entry(where, frame.pc, frame.sp - 4);
+ dump_backtrace_entry(where, frame.pc, frame.sp - 4, loglvl);
}
}
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index e0330a25e1c6..6bfdca4769a7 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -240,7 +240,7 @@ static int install_vvar(struct mm_struct *mm, unsigned long addr)
return PTR_ERR_OR_ZERO(vma);
}
-/* assumes mmap_sem is write-locked */
+/* assumes mmap_lock is write-locked */
void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma;
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 88a720da443b..7f24bc08403e 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -8,13 +8,13 @@
#include "vmlinux-xip.lds.S"
#else
+#include <linux/pgtable.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/mpu.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include "vmlinux.lds.h"
diff --git a/arch/arm/lib/backtrace-clang.S b/arch/arm/lib/backtrace-clang.S
index 2ff375144b55..6174c45f53a5 100644
--- a/arch/arm/lib/backtrace-clang.S
+++ b/arch/arm/lib/backtrace-clang.S
@@ -17,6 +17,7 @@
#define sv_pc r6
#define mask r7
#define sv_lr r8
+#define loglvl r9
ENTRY(c_backtrace)
@@ -99,6 +100,7 @@ ENDPROC(c_backtrace)
@ to ensure 8 byte alignment
movs frame, r0 @ if frame pointer is zero
beq no_frame @ we have no stack frames
+ mov loglvl, r2
tst r1, #0x10 @ 26 or 32-bit mode?
moveq mask, #0xfc000003
movne mask, #0 @ mask for 32-bit
@@ -167,6 +169,7 @@ finished_setup:
mov r1, sv_lr
mov r2, frame
bic r1, r1, mask @ mask PC/LR for the mode
+ mov r3, loglvl
bl dump_backtrace_entry
/*
@@ -183,6 +186,7 @@ finished_setup:
ldr r0, [frame] @ locals are stored in
@ the preceding frame
subeq r0, r0, #4
+ mov r2, loglvl
bleq dump_backtrace_stm @ dump saved registers
/*
@@ -196,7 +200,8 @@ finished_setup:
bhi for_each_frame
1006: adr r0, .Lbad
- mov r1, frame
+ mov r1, loglvl
+ mov r2, frame
bl printk
no_frame: ldmfd sp!, {r4 - r9, fp, pc}
ENDPROC(c_backtrace)
@@ -209,7 +214,7 @@ ENDPROC(c_backtrace)
.long 1005b, 1006b
.popsection
-.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
+.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n"
.align
.Lopcode: .word 0xe92d4800 >> 11 @ stmfd sp!, {... fp, lr}
.word 0x0b000000 @ bl if these bits are set
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 582925238d65..872f658638d9 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -18,6 +18,7 @@
#define sv_pc r6
#define mask r7
#define offset r8
+#define loglvl r9
ENTRY(c_backtrace)
@@ -25,9 +26,10 @@ ENTRY(c_backtrace)
ret lr
ENDPROC(c_backtrace)
#else
- stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location...
+ stmfd sp!, {r4 - r9, lr} @ Save an extra register so we have a location...
movs frame, r0 @ if frame pointer is zero
beq no_frame @ we have no stack frames
+ mov loglvl, r2
tst r1, #0x10 @ 26 or 32-bit mode?
ARM( moveq mask, #0xfc000003 )
@@ -73,6 +75,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions
ldr r1, [frame, #-4] @ get saved lr
mov r2, frame
bic r1, r1, mask @ mask PC/LR for the mode
+ mov r3, loglvl
bl dump_backtrace_entry
ldr r1, [sv_pc, #-4] @ if stmfd sp!, {args} exists,
@@ -80,12 +83,14 @@ for_each_frame: tst frame, mask @ Check for address exceptions
teq r3, r1, lsr #11
ldreq r0, [frame, #-8] @ get sp
subeq r0, r0, #4 @ point at the last arg
+ mov r2, loglvl
bleq dump_backtrace_stm @ dump saved registers
1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc}
ldr r3, .Ldsi @ instruction exists,
teq r3, r1, lsr #11
subeq r0, frame, #16
+ mov r2, loglvl
bleq dump_backtrace_stm @ dump saved registers
teq sv_fp, #0 @ zero saved fp means
@@ -96,9 +101,10 @@ for_each_frame: tst frame, mask @ Check for address exceptions
bhi for_each_frame
1006: adr r0, .Lbad
- mov r1, frame
+ mov r1, loglvl
+ mov r2, frame
bl printk
-no_frame: ldmfd sp!, {r4 - r8, pc}
+no_frame: ldmfd sp!, {r4 - r9, pc}
ENDPROC(c_backtrace)
.pushsection __ex_table,"a"
@@ -109,7 +115,7 @@ ENDPROC(c_backtrace)
.long 1004b, 1006b
.popsection
-.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
+.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n"
.align
.Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc}
.word 0xe92d0000 >> 11 @ stmfd sp!, {}
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index d72b14c96670..106f83a5ea6d 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -101,7 +101,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
atomic = faulthandler_disabled();
if (!atomic)
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
while (n) {
pte_t *pte;
spinlock_t *ptl;
@@ -109,11 +109,11 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
while (!pin_page_for_write(to, &pte, &ptl)) {
if (!atomic)
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (__put_user(0, (char __user *)to))
goto out;
if (!atomic)
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
}
tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
@@ -133,7 +133,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
spin_unlock(ptl);
}
if (!atomic)
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
out:
return n;
@@ -170,17 +170,17 @@ __clear_user_memset(void __user *addr, unsigned long n)
return 0;
}
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
while (n) {
pte_t *pte;
spinlock_t *ptl;
int tocopy;
while (!pin_page_for_write(addr, &pte, &ptl)) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (__put_user(0, (char __user *)addr))
goto out;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
}
tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
@@ -198,7 +198,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
else
spin_unlock(ptl);
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
out:
return n;
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
index 575b2e2b6759..5960e3dfd2bf 100644
--- a/arch/arm/mach-ebsa110/core.c
+++ b/arch/arm/mach-ebsa110/core.c
@@ -17,7 +17,6 @@
#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/system_misc.h>
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index 015f75d1c98d..eee095f0e2f6 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -14,7 +14,6 @@
#include <linux/spinlock.h>
#include <video/vga.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-imx/mm-imx21.c b/arch/arm/mach-imx/mm-imx21.c
index e117d2883df9..50a2edac8513 100644
--- a/arch/arm/mach-imx/mm-imx21.c
+++ b/arch/arm/mach-imx/mm-imx21.c
@@ -8,7 +8,6 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/pinctrl/machine.h>
-#include <asm/pgtable.h>
#include <asm/mach/map.h>
#include "common.h"
diff --git a/arch/arm/mach-imx/mm-imx27.c b/arch/arm/mach-imx/mm-imx27.c
index dcbe7ec6d543..4e4125140025 100644
--- a/arch/arm/mach-imx/mm-imx27.c
+++ b/arch/arm/mach-imx/mm-imx27.c
@@ -8,7 +8,6 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/pinctrl/machine.h>
-#include <asm/pgtable.h>
#include <asm/mach/map.h>
#include "common.h"
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 67264c48ed68..ea2d58a63903 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -13,7 +13,6 @@
#include <linux/io.h>
#include <linux/pinctrl/machine.h>
-#include <asm/pgtable.h>
#include <asm/system_misc.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index 9da3ae232211..0fe5e1dc9d89 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -21,10 +21,10 @@
#include <linux/stat.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/pgtable.h>
#include <asm/mach-types.h>
#include <asm/mach/time.h>
-#include <asm/pgtable.h>
#include "hardware.h"
#include "cm.h"
diff --git a/arch/arm/mach-iop32x/i2c.c b/arch/arm/mach-iop32x/i2c.c
index dc9f6a14ab1b..e422286af469 100644
--- a/arch/arm/mach-iop32x/i2c.c
+++ b/arch/arm/mach-iop32x/i2c.c
@@ -17,7 +17,6 @@
#include <linux/serial_core.h>
#include <linux/io.h>
#include <linux/gpio/machine.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/mach/map.h>
#include <asm/setup.h>
diff --git a/arch/arm/mach-iop32x/iq31244.c b/arch/arm/mach-iop32x/iq31244.c
index 04a7d389d365..49caaa703881 100644
--- a/arch/arm/mach-iop32x/iq31244.c
+++ b/arch/arm/mach-iop32x/iq31244.c
@@ -31,7 +31,6 @@
#include <asm/mach/time.h>
#include <asm/mach-types.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include "hardware.h"
#include "irqs.h"
diff --git a/arch/arm/mach-iop32x/iq80321.c b/arch/arm/mach-iop32x/iq80321.c
index 4bd596d6c9c1..b455d7073296 100644
--- a/arch/arm/mach-iop32x/iq80321.c
+++ b/arch/arm/mach-iop32x/iq80321.c
@@ -27,7 +27,6 @@
#include <asm/mach/time.h>
#include <asm/mach-types.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include "hardware.h"
#include "irqs.h"
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index 5382a93ad0f8..78b9a5ee41c9 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -35,7 +35,6 @@
#include <asm/mach/time.h>
#include <asm/mach-types.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include "hardware.h"
#include "irqs.h"
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 381f452de28d..184262d660ba 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -33,7 +33,6 @@
#include <mach/hardware.h>
#include <mach/io.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/exception.h>
#include <asm/irq.h>
diff --git a/arch/arm/mach-keystone/platsmp.c b/arch/arm/mach-keystone/platsmp.c
index c810e23a8fa0..673fcf3b34b1 100644
--- a/arch/arm/mach-keystone/platsmp.c
+++ b/arch/arm/mach-keystone/platsmp.c
@@ -12,11 +12,11 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/io.h>
+#include <linux/pgtable.h>
#include <asm/smp_plat.h>
#include <asm/prom.h>
#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
#include "keystone.h"
diff --git a/arch/arm/mach-mmp/Kconfig b/arch/arm/mach-mmp/Kconfig
index 6fe1550f43ec..0dd999212944 100644
--- a/arch/arm/mach-mmp/Kconfig
+++ b/arch/arm/mach-mmp/Kconfig
@@ -124,6 +124,8 @@ config MACH_MMP2_DT
select PINCTRL_SINGLE
select ARCH_HAS_RESET_CONTROLLER
select CPU_PJ4
+ select PM_GENERIC_DOMAINS if PM
+ select PM_GENERIC_DOMAINS_OF if PM && OF
help
Include support for Marvell MMP2 based platforms using
the device tree.
diff --git a/arch/arm/mach-mmp/Makefile b/arch/arm/mach-mmp/Makefile
index 7b3a7f979eec..e3758f7e1fe7 100644
--- a/arch/arm/mach-mmp/Makefile
+++ b/arch/arm/mach-mmp/Makefile
@@ -12,12 +12,6 @@ obj-$(CONFIG_CPU_PXA910) += pxa910.o
obj-$(CONFIG_CPU_MMP2) += mmp2.o
obj-$(CONFIG_MMP_SRAM) += sram.o
-ifeq ($(CONFIG_COMMON_CLK), )
-obj-y += clock.o
-obj-$(CONFIG_CPU_PXA168) += clock-pxa168.o
-obj-$(CONFIG_CPU_PXA910) += clock-pxa910.o
-obj-$(CONFIG_CPU_MMP2) += clock-mmp2.o
-endif
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_CPU_PXA910) += pm-pxa910.o
obj-$(CONFIG_CPU_MMP2) += pm-mmp2.o
diff --git a/arch/arm/mach-mmp/clock-mmp2.c b/arch/arm/mach-mmp/clock-mmp2.c
deleted file mode 100644
index 7536398bf1c1..000000000000
--- a/arch/arm/mach-mmp/clock-mmp2.c
+++ /dev/null
@@ -1,114 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/clk/mmp.h>
-
-#include "addr-map.h"
-
-#include "common.h"
-#include "clock.h"
-
-/*
- * APB Clock register offsets for MMP2
- */
-#define APBC_RTC APBC_REG(0x000)
-#define APBC_TWSI1 APBC_REG(0x004)
-#define APBC_TWSI2 APBC_REG(0x008)
-#define APBC_TWSI3 APBC_REG(0x00c)
-#define APBC_TWSI4 APBC_REG(0x010)
-#define APBC_KPC APBC_REG(0x018)
-#define APBC_UART1 APBC_REG(0x02c)
-#define APBC_UART2 APBC_REG(0x030)
-#define APBC_UART3 APBC_REG(0x034)
-#define APBC_GPIO APBC_REG(0x038)
-#define APBC_PWM0 APBC_REG(0x03c)
-#define APBC_PWM1 APBC_REG(0x040)
-#define APBC_PWM2 APBC_REG(0x044)
-#define APBC_PWM3 APBC_REG(0x048)
-#define APBC_SSP0 APBC_REG(0x04c)
-#define APBC_SSP1 APBC_REG(0x050)
-#define APBC_SSP2 APBC_REG(0x054)
-#define APBC_SSP3 APBC_REG(0x058)
-#define APBC_SSP4 APBC_REG(0x05c)
-#define APBC_SSP5 APBC_REG(0x060)
-#define APBC_TWSI5 APBC_REG(0x07c)
-#define APBC_TWSI6 APBC_REG(0x080)
-#define APBC_UART4 APBC_REG(0x088)
-
-#define APMU_USB APMU_REG(0x05c)
-#define APMU_NAND APMU_REG(0x060)
-#define APMU_SDH0 APMU_REG(0x054)
-#define APMU_SDH1 APMU_REG(0x058)
-#define APMU_SDH2 APMU_REG(0x0e8)
-#define APMU_SDH3 APMU_REG(0x0ec)
-
-static void sdhc_clk_enable(struct clk *clk)
-{
- uint32_t clk_rst;
-
- clk_rst = __raw_readl(clk->clk_rst);
- clk_rst |= clk->enable_val;
- __raw_writel(clk_rst, clk->clk_rst);
-}
-
-static void sdhc_clk_disable(struct clk *clk)
-{
- uint32_t clk_rst;
-
- clk_rst = __raw_readl(clk->clk_rst);
- clk_rst &= ~clk->enable_val;
- __raw_writel(clk_rst, clk->clk_rst);
-}
-
-struct clkops sdhc_clk_ops = {
- .enable = sdhc_clk_enable,
- .disable = sdhc_clk_disable,
-};
-
-/* APB peripheral clocks */
-static APBC_CLK(uart1, UART1, 1, 26000000);
-static APBC_CLK(uart2, UART2, 1, 26000000);
-static APBC_CLK(uart3, UART3, 1, 26000000);
-static APBC_CLK(uart4, UART4, 1, 26000000);
-static APBC_CLK(twsi1, TWSI1, 0, 26000000);
-static APBC_CLK(twsi2, TWSI2, 0, 26000000);
-static APBC_CLK(twsi3, TWSI3, 0, 26000000);
-static APBC_CLK(twsi4, TWSI4, 0, 26000000);
-static APBC_CLK(twsi5, TWSI5, 0, 26000000);
-static APBC_CLK(twsi6, TWSI6, 0, 26000000);
-static APBC_CLK(gpio, GPIO, 0, 26000000);
-
-static APMU_CLK(nand, NAND, 0xbf, 100000000);
-static APMU_CLK_OPS(sdh0, SDH0, 0x1b, 200000000, &sdhc_clk_ops);
-static APMU_CLK_OPS(sdh1, SDH1, 0x1b, 200000000, &sdhc_clk_ops);
-static APMU_CLK_OPS(sdh2, SDH2, 0x1b, 200000000, &sdhc_clk_ops);
-static APMU_CLK_OPS(sdh3, SDH3, 0x1b, 200000000, &sdhc_clk_ops);
-
-static struct clk_lookup mmp2_clkregs[] = {
- INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL),
- INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL),
- INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL),
- INIT_CLKREG(&clk_uart4, "pxa2xx-uart.3", NULL),
- INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.0", NULL),
- INIT_CLKREG(&clk_twsi2, "pxa2xx-i2c.1", NULL),
- INIT_CLKREG(&clk_twsi3, "pxa2xx-i2c.2", NULL),
- INIT_CLKREG(&clk_twsi4, "pxa2xx-i2c.3", NULL),
- INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL),
- INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL),
- INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
- INIT_CLKREG(&clk_gpio, "mmp2-gpio", NULL),
- INIT_CLKREG(&clk_sdh0, "sdhci-pxav3.0", "PXA-SDHCLK"),
- INIT_CLKREG(&clk_sdh1, "sdhci-pxav3.1", "PXA-SDHCLK"),
- INIT_CLKREG(&clk_sdh2, "sdhci-pxav3.2", "PXA-SDHCLK"),
- INIT_CLKREG(&clk_sdh3, "sdhci-pxav3.3", "PXA-SDHCLK"),
-};
-
-void __init mmp2_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
- phys_addr_t apbc_phys)
-{
- clkdev_add_table(ARRAY_AND_SIZE(mmp2_clkregs));
-}
diff --git a/arch/arm/mach-mmp/clock-pxa168.c b/arch/arm/mach-mmp/clock-pxa168.c
deleted file mode 100644
index 2d4a5d96a1ff..000000000000
--- a/arch/arm/mach-mmp/clock-pxa168.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/clk/mmp.h>
-
-#include "addr-map.h"
-
-#include "common.h"
-#include "clock.h"
-
-/*
- * APB clock register offsets for PXA168
- */
-#define APBC_UART1 APBC_REG(0x000)
-#define APBC_UART2 APBC_REG(0x004)
-#define APBC_GPIO APBC_REG(0x008)
-#define APBC_PWM1 APBC_REG(0x00c)
-#define APBC_PWM2 APBC_REG(0x010)
-#define APBC_PWM3 APBC_REG(0x014)
-#define APBC_PWM4 APBC_REG(0x018)
-#define APBC_RTC APBC_REG(0x028)
-#define APBC_TWSI0 APBC_REG(0x02c)
-#define APBC_KPC APBC_REG(0x030)
-#define APBC_TWSI1 APBC_REG(0x06c)
-#define APBC_UART3 APBC_REG(0x070)
-#define APBC_SSP1 APBC_REG(0x81c)
-#define APBC_SSP2 APBC_REG(0x820)
-#define APBC_SSP3 APBC_REG(0x84c)
-#define APBC_SSP4 APBC_REG(0x858)
-#define APBC_SSP5 APBC_REG(0x85c)
-
-#define APMU_NAND APMU_REG(0x060)
-#define APMU_LCD APMU_REG(0x04c)
-#define APMU_ETH APMU_REG(0x0fc)
-#define APMU_USB APMU_REG(0x05c)
-
-/* APB peripheral clocks */
-static APBC_CLK(uart1, UART1, 1, 14745600);
-static APBC_CLK(uart2, UART2, 1, 14745600);
-static APBC_CLK(uart3, UART3, 1, 14745600);
-static APBC_CLK(twsi0, TWSI0, 1, 33000000);
-static APBC_CLK(twsi1, TWSI1, 1, 33000000);
-static APBC_CLK(pwm1, PWM1, 1, 13000000);
-static APBC_CLK(pwm2, PWM2, 1, 13000000);
-static APBC_CLK(pwm3, PWM3, 1, 13000000);
-static APBC_CLK(pwm4, PWM4, 1, 13000000);
-static APBC_CLK(ssp1, SSP1, 4, 0);
-static APBC_CLK(ssp2, SSP2, 4, 0);
-static APBC_CLK(ssp3, SSP3, 4, 0);
-static APBC_CLK(ssp4, SSP4, 4, 0);
-static APBC_CLK(ssp5, SSP5, 4, 0);
-static APBC_CLK(gpio, GPIO, 0, 13000000);
-static APBC_CLK(keypad, KPC, 0, 32000);
-static APBC_CLK(rtc, RTC, 8, 32768);
-
-static APMU_CLK(nand, NAND, 0x19b, 156000000);
-static APMU_CLK(lcd, LCD, 0x7f, 312000000);
-static APMU_CLK(eth, ETH, 0x09, 0);
-static APMU_CLK(usb, USB, 0x12, 0);
-
-/* device and clock bindings */
-static struct clk_lookup pxa168_clkregs[] = {
- INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL),
- INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL),
- INIT_CLKREG(&clk_uart3, "pxa2xx-uart.2", NULL),
- INIT_CLKREG(&clk_twsi0, "pxa2xx-i2c.0", NULL),
- INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.1", NULL),
- INIT_CLKREG(&clk_pwm1, "pxa168-pwm.0", NULL),
- INIT_CLKREG(&clk_pwm2, "pxa168-pwm.1", NULL),
- INIT_CLKREG(&clk_pwm3, "pxa168-pwm.2", NULL),
- INIT_CLKREG(&clk_pwm4, "pxa168-pwm.3", NULL),
- INIT_CLKREG(&clk_ssp1, "pxa168-ssp.0", NULL),
- INIT_CLKREG(&clk_ssp2, "pxa168-ssp.1", NULL),
- INIT_CLKREG(&clk_ssp3, "pxa168-ssp.2", NULL),
- INIT_CLKREG(&clk_ssp4, "pxa168-ssp.3", NULL),
- INIT_CLKREG(&clk_ssp5, "pxa168-ssp.4", NULL),
- INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
- INIT_CLKREG(&clk_lcd, "pxa168-fb", NULL),
- INIT_CLKREG(&clk_gpio, "mmp-gpio", NULL),
- INIT_CLKREG(&clk_keypad, "pxa27x-keypad", NULL),
- INIT_CLKREG(&clk_eth, "pxa168-eth", "MFUCLK"),
- INIT_CLKREG(&clk_usb, NULL, "PXA168-USBCLK"),
- INIT_CLKREG(&clk_rtc, "sa1100-rtc", NULL),
-};
-
-void __init pxa168_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
- phys_addr_t apbc_phys)
-{
- clkdev_add_table(ARRAY_AND_SIZE(pxa168_clkregs));
-}
diff --git a/arch/arm/mach-mmp/clock-pxa910.c b/arch/arm/mach-mmp/clock-pxa910.c
deleted file mode 100644
index 3cd83ff91bb0..000000000000
--- a/arch/arm/mach-mmp/clock-pxa910.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/clk/mmp.h>
-
-#include "addr-map.h"
-
-#include "common.h"
-#include "clock.h"
-
-/*
- * APB Clock register offsets for PXA910
- */
-#define APBC_UART0 APBC_REG(0x000)
-#define APBC_UART1 APBC_REG(0x004)
-#define APBC_GPIO APBC_REG(0x008)
-#define APBC_PWM1 APBC_REG(0x00c)
-#define APBC_PWM2 APBC_REG(0x010)
-#define APBC_PWM3 APBC_REG(0x014)
-#define APBC_PWM4 APBC_REG(0x018)
-#define APBC_SSP1 APBC_REG(0x01c)
-#define APBC_SSP2 APBC_REG(0x020)
-#define APBC_RTC APBC_REG(0x028)
-#define APBC_TWSI0 APBC_REG(0x02c)
-#define APBC_KPC APBC_REG(0x030)
-#define APBC_SSP3 APBC_REG(0x04c)
-#define APBC_TWSI1 APBC_REG(0x06c)
-
-#define APMU_NAND APMU_REG(0x060)
-#define APMU_USB APMU_REG(0x05c)
-
-static APBC_CLK(uart1, UART0, 1, 14745600);
-static APBC_CLK(uart2, UART1, 1, 14745600);
-static APBC_CLK(twsi0, TWSI0, 1, 33000000);
-static APBC_CLK(twsi1, TWSI1, 1, 33000000);
-static APBC_CLK(pwm1, PWM1, 1, 13000000);
-static APBC_CLK(pwm2, PWM2, 1, 13000000);
-static APBC_CLK(pwm3, PWM3, 1, 13000000);
-static APBC_CLK(pwm4, PWM4, 1, 13000000);
-static APBC_CLK(gpio, GPIO, 0, 13000000);
-static APBC_CLK(rtc, RTC, 8, 32768);
-
-static APMU_CLK(nand, NAND, 0x19b, 156000000);
-static APMU_CLK(u2o, USB, 0x1b, 480000000);
-
-/* device and clock bindings */
-static struct clk_lookup pxa910_clkregs[] = {
- INIT_CLKREG(&clk_uart1, "pxa2xx-uart.0", NULL),
- INIT_CLKREG(&clk_uart2, "pxa2xx-uart.1", NULL),
- INIT_CLKREG(&clk_twsi0, "pxa2xx-i2c.0", NULL),
- INIT_CLKREG(&clk_twsi1, "pxa2xx-i2c.1", NULL),
- INIT_CLKREG(&clk_pwm1, "pxa910-pwm.0", NULL),
- INIT_CLKREG(&clk_pwm2, "pxa910-pwm.1", NULL),
- INIT_CLKREG(&clk_pwm3, "pxa910-pwm.2", NULL),
- INIT_CLKREG(&clk_pwm4, "pxa910-pwm.3", NULL),
- INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
- INIT_CLKREG(&clk_gpio, "mmp-gpio", NULL),
- INIT_CLKREG(&clk_u2o, NULL, "U2OCLK"),
- INIT_CLKREG(&clk_rtc, "sa1100-rtc", NULL),
-};
-
-void __init pxa910_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
- phys_addr_t apbc_phys, phys_addr_t apbcp_phys)
-{
- clkdev_add_table(ARRAY_AND_SIZE(pxa910_clkregs));
-}
diff --git a/arch/arm/mach-mmp/clock.c b/arch/arm/mach-mmp/clock.c
deleted file mode 100644
index 291fe41e3547..000000000000
--- a/arch/arm/mach-mmp/clock.c
+++ /dev/null
@@ -1,105 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/arm/mach-mmp/clock.c
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include "regs-apbc.h"
-#include "clock.h"
-
-static void apbc_clk_enable(struct clk *clk)
-{
- uint32_t clk_rst;
-
- clk_rst = APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(clk->fnclksel);
- __raw_writel(clk_rst, clk->clk_rst);
-}
-
-static void apbc_clk_disable(struct clk *clk)
-{
- __raw_writel(0, clk->clk_rst);
-}
-
-struct clkops apbc_clk_ops = {
- .enable = apbc_clk_enable,
- .disable = apbc_clk_disable,
-};
-
-static void apmu_clk_enable(struct clk *clk)
-{
- __raw_writel(clk->enable_val, clk->clk_rst);
-}
-
-static void apmu_clk_disable(struct clk *clk)
-{
- __raw_writel(0, clk->clk_rst);
-}
-
-struct clkops apmu_clk_ops = {
- .enable = apmu_clk_enable,
- .disable = apmu_clk_disable,
-};
-
-static DEFINE_SPINLOCK(clocks_lock);
-
-int clk_enable(struct clk *clk)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&clocks_lock, flags);
- if (clk->enabled++ == 0)
- clk->ops->enable(clk);
- spin_unlock_irqrestore(&clocks_lock, flags);
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
- unsigned long flags;
-
- if (!clk)
- return;
-
- WARN_ON(clk->enabled == 0);
-
- spin_lock_irqsave(&clocks_lock, flags);
- if (--clk->enabled == 0)
- clk->ops->disable(clk);
- spin_unlock_irqrestore(&clocks_lock, flags);
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- unsigned long rate;
-
- if (clk->ops->getrate)
- rate = clk->ops->getrate(clk);
- else
- rate = clk->rate;
-
- return rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long flags;
- int ret = -EINVAL;
-
- if (clk->ops->setrate) {
- spin_lock_irqsave(&clocks_lock, flags);
- ret = clk->ops->setrate(clk, rate);
- spin_unlock_irqrestore(&clocks_lock, flags);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
diff --git a/arch/arm/mach-mmp/clock.h b/arch/arm/mach-mmp/clock.h
deleted file mode 100644
index 0256c894fa11..000000000000
--- a/arch/arm/mach-mmp/clock.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#include <linux/clkdev.h>
-
-struct clkops {
- void (*enable)(struct clk *);
- void (*disable)(struct clk *);
- unsigned long (*getrate)(struct clk *);
- int (*setrate)(struct clk *, unsigned long);
-};
-
-struct clk {
- const struct clkops *ops;
-
- void __iomem *clk_rst; /* clock reset control register */
- int fnclksel; /* functional clock select (APBC) */
- uint32_t enable_val; /* value for clock enable (APMU) */
- unsigned long rate;
- int enabled;
-};
-
-extern struct clkops apbc_clk_ops;
-extern struct clkops apmu_clk_ops;
-
-#define APBC_CLK(_name, _reg, _fnclksel, _rate) \
-struct clk clk_##_name = { \
- .clk_rst = APBC_##_reg, \
- .fnclksel = _fnclksel, \
- .rate = _rate, \
- .ops = &apbc_clk_ops, \
-}
-
-#define APBC_CLK_OPS(_name, _reg, _fnclksel, _rate, _ops) \
-struct clk clk_##_name = { \
- .clk_rst = APBC_##_reg, \
- .fnclksel = _fnclksel, \
- .rate = _rate, \
- .ops = _ops, \
-}
-
-#define APMU_CLK(_name, _reg, _eval, _rate) \
-struct clk clk_##_name = { \
- .clk_rst = APMU_##_reg, \
- .enable_val = _eval, \
- .rate = _rate, \
- .ops = &apmu_clk_ops, \
-}
-
-#define APMU_CLK_OPS(_name, _reg, _eval, _rate, _ops) \
-struct clk clk_##_name = { \
- .clk_rst = APMU_##_reg, \
- .enable_val = _eval, \
- .rate = _rate, \
- .ops = _ops, \
-}
-
-#define INIT_CLKREG(_clk, _devname, _conname) \
- { \
- .clk = _clk, \
- .dev_id = _devname, \
- .con_id = _conname, \
- }
-
-extern struct clk clk_pxa168_gpio;
-extern struct clk clk_pxa168_timers;
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c
index b642e900727a..1e9389245d0e 100644
--- a/arch/arm/mach-mmp/pxa168.c
+++ b/arch/arm/mach-mmp/pxa168.c
@@ -19,7 +19,6 @@
#include <asm/system_misc.h>
#include "addr-map.h"
-#include "clock.h"
#include "common.h"
#include <linux/soc/mmp/cputype.h>
#include "devices.h"
diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c
index 049a65f47b42..41b2e8abc9e6 100644
--- a/arch/arm/mach-mmp/time.c
+++ b/arch/arm/mach-mmp/time.c
@@ -34,7 +34,6 @@
#include "regs-apbc.h"
#include "irqs.h"
#include <linux/soc/mmp/cputype.h>
-#include "clock.h"
#define TIMERS_VIRT_BASE TIMERS1_VIRT_BASE
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 0631a7b02678..aa265ede5730 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -34,7 +34,6 @@
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mach/arch.h>
@@ -633,7 +632,7 @@ static void __init map_sa1100_gpio_regs( void )
int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
pmd_t *pmd;
- pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset_k(virt), virt), virt), virt);
+ pmd = pmd_off_k(virt);
*pmd = __pmd(phys | prot);
flush_pmd_entry(pmd);
}
diff --git a/arch/arm/mach-sa1100/hackkit.c b/arch/arm/mach-sa1100/hackkit.c
index 6d37d263e0d2..3085f1c2e586 100644
--- a/arch/arm/mach-sa1100/hackkit.c
+++ b/arch/arm/mach-sa1100/hackkit.c
@@ -22,11 +22,11 @@
#include <linux/gpio.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
+#include <linux/pgtable.h>
#include <asm/mach-types.h>
#include <asm/setup.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
diff --git a/arch/arm/mach-tegra/iomap.h b/arch/arm/mach-tegra/iomap.h
index 160cb18850f2..4cb7e5fee137 100644
--- a/arch/arm/mach-tegra/iomap.h
+++ b/arch/arm/mach-tegra/iomap.h
@@ -10,7 +10,7 @@
#ifndef __MACH_TEGRA_IOMAP_H
#define __MACH_TEGRA_IOMAP_H
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <linux/sizes.h>
#define TEGRA_IRAM_BASE 0x40000000
diff --git a/arch/arm/mach-vt8500/Kconfig b/arch/arm/mach-vt8500/Kconfig
index 8841199058ea..d01cdd9ad9c7 100644
--- a/arch/arm/mach-vt8500/Kconfig
+++ b/arch/arm/mach-vt8500/Kconfig
@@ -2,7 +2,6 @@
config ARCH_VT8500
bool
select GPIOLIB
- select CLKDEV_LOOKUP
select VT8500_TIMER
select PINCTRL
help
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index a9dd2f71cd19..e1ca6a5732d2 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -24,13 +24,13 @@
#include <linux/irqchip/arm-gic.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
+#include <linux/pgtable.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <asm/mach-types.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/smp_scu.h>
#include <asm/system_info.h>
#include <asm/hardware/cache-l2x0.h>
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index a94bd08fdec2..44f7292ec27b 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -14,7 +14,6 @@
#include <linux/mm.h>
#include <linux/highmem.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index a6488bb6cfa9..6a769a6c314e 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -9,7 +9,6 @@
#include <linux/mm.h>
#include <linux/highmem.h>
-#include <asm/pgtable.h>
#include <asm/shmparam.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 382e1c2855e8..eb5d338657d1 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -14,7 +14,6 @@
#include <linux/mm.h>
#include <linux/highmem.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 677549d6854c..c18d23a5e5f1 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -16,7 +16,6 @@
#include <asm/domain.h>
#include <asm/fixmap.h>
#include <asm/memory.h>
-#include <asm/pgtable.h>
#include <asm/ptdump.h>
static struct addr_marker address_markers[] = {
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 489aaafa6ebd..0e49154454a6 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -17,7 +17,6 @@
#include <asm/bugs.h>
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "mm.h"
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index ff230e9affc4..c6550eddfce1 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -18,7 +18,6 @@
#include <linux/highmem.h>
#include <linux/perf_event.h>
-#include <asm/pgtable.h>
#include <asm/system_misc.h>
#include <asm/system_info.h>
#include <asm/tlbflush.h>
@@ -272,11 +271,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mmap_read_trylock(mm)) {
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
@@ -294,7 +293,7 @@ retry:
fault = __do_page_fault(mm, addr, fsr, flags, tsk);
/* If we need to retry but a fatal signal is pending, handle the
- * signal first. We do not need to release the mmap_sem because
+ * signal first. We do not need to release the mmap_lock because
* it would already be released in __lock_page_or_retry in
* mm/filemap.c. */
if (fault_signal_pending(fault, regs)) {
@@ -326,7 +325,7 @@ retry:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Handle the "normal" case first - VM_FAULT_MAJOR
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index e013f6b81328..187fab227b50 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -18,7 +18,7 @@
static inline void set_fixmap_pte(int idx, pte_t pte)
{
unsigned long vaddr = __fix_to_virt(idx);
- pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+ pte_t *ptep = virt_to_kpte(vaddr);
set_pte_ext(ptep, pte, 0);
local_flush_tlb_kernel_page(vaddr);
@@ -26,7 +26,7 @@ static inline void set_fixmap_pte(int idx, pte_t pte)
static inline pte_t get_fixmap_pte(unsigned long vaddr)
{
- pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+ pte_t *ptep = virt_to_kpte(vaddr);
return *ptep;
}
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index cd54411ef1b8..448e57c6f653 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -3,12 +3,12 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/cputype.h>
#include <asm/idmap.h>
#include <asm/hwcap.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/system_info.h>
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 75529d76d28c..000e8210000b 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -141,16 +141,8 @@ void __check_vmalloc_seq(struct mm_struct *mm)
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmdp;
-
- flush_cache_vunmap(addr, end);
- pgd = pgd_offset_k(addr);
- p4d = p4d_offset(pgd, addr);
- pud = pud_offset(p4d, addr);
- pmdp = pmd_offset(pud, addr);
+ pmd_t *pmdp = pmd_off_k(addr);
+
do {
pmd_t pmd = *pmdp;
@@ -191,10 +183,7 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
size_t size, const struct mem_type *type)
{
unsigned long addr = virt, end = virt + size;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
+ pmd_t *pmd = pmd_off_k(addr);
/*
* Remove and free any PTE-based mapping, and
@@ -202,10 +191,6 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
*/
unmap_area_sections(virt, size);
- pgd = pgd_offset_k(addr);
- p4d = p4d_offset(pgd, addr);
- pud = pud_offset(p4d, addr);
- pmd = pmd_offset(pud, addr);
do {
pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn += SZ_1M >> PAGE_SHIFT;
@@ -225,21 +210,13 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
size_t size, const struct mem_type *type)
{
unsigned long addr = virt, end = virt + size;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
+ pmd_t *pmd = pmd_off_k(addr);
/*
* Remove and free any PTE-based mapping, and
* sync the current kernel mapping.
*/
unmap_area_sections(virt, size);
-
- pgd = pgd_offset_k(virt);
- p4d = p4d_offset(pgd, addr);
- pud = pud_offset(p4d, addr);
- pmd = pmd_offset(pud, addr);
do {
unsigned long super_pmd_val, i;
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 4f1f72b75890..9ff683612f2a 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -2,8 +2,7 @@
#ifdef CONFIG_MMU
#include <linux/list.h>
#include <linux/vmalloc.h>
-
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
/* the upper-most page table pointer */
extern pmd_t *top_pmd;
@@ -36,11 +35,6 @@ static inline pte_t get_top_pte(unsigned long va)
return *ptep;
}
-static inline pmd_t *pmd_off_k(unsigned long virt)
-{
- return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(virt), virt), virt), virt);
-}
-
struct mem_type {
pteval_t prot_pte;
pteval_t prot_pte_s2;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c425288f1a86..628028bfbb92 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -356,12 +356,7 @@ static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
static inline pmd_t * __init fixmap_pmd(unsigned long addr)
{
- pgd_t *pgd = pgd_offset_k(addr);
- p4d_t *p4d = p4d_offset(pgd, addr);
- pud_t *pud = pud_offset(p4d, addr);
- pmd_t *pmd = pmd_offset(pud, addr);
-
- return pmd;
+ return pmd_off_k(addr);
}
void __init early_fixmap_init(void)
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index d546efad7e97..9790ae3a8c68 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -5,7 +5,6 @@
#include <linux/mm.h>
#include <linux/module.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/set_memory.h>
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 2785da387c91..6837cf7a4812 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -11,11 +11,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index e9ea237ed785..df49b10250b8 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -11,11 +11,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 920c279e7879..e89ce467f672 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -11,11 +11,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 0bdf25a95b10..7fdd1a205e8e 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -11,11 +11,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 39361e196d61..3b687e6dd9fd 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -20,11 +20,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 1a94bbf6e53f..f2ec3bc60874 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -6,11 +6,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 52b66cf0259e..01bbe7576c1c 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -6,11 +6,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 31ac8acc34dc..a234cd8ba5e6 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -13,10 +13,10 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index ca2c7ca8af21..53c029dcfd83 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -14,10 +14,10 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index a381a0c9f109..0bfad62ea858 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -37,10 +37,10 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 1ba253c2bce1..0487a2c3439b 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -13,10 +13,10 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 4b8a00220cc9..cf9bfcc825ca 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -6,10 +6,10 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 555becf9c758..6fb3898ad1cd 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -8,10 +8,10 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index ef517530130b..a054c0e9c034 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -6,11 +6,11 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index dddf833fe000..2c73e0d47d08 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -11,10 +11,10 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index b12b76bc8d30..61ce82aca6f0 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -8,10 +8,10 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index d47d6c5cee63..1645ccaffe96 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -9,10 +9,10 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index baba503ba816..4071f7a61cb6 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -12,12 +12,12 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <mach/hardware.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 75ebacc8e4e5..e723bd4119d3 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -17,12 +17,12 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <mach/hardware.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 1dd0d5ca27da..a0618f3e6836 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -9,11 +9,11 @@
*/
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 48e0ef6f0dcc..28c9d32fa99a 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -9,11 +9,11 @@
#include <linux/arm-smccc.h>
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/memory.h>
#include "proc-macros.S"
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 42eaecc43cfe..a17afe7e195a 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -23,9 +23,9 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
-#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/page.h>
#include <asm/ptrace.h>
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 18ac5a1f8922..d82590aa71c0 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -19,9 +19,9 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
-#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/page.h>
#include <asm/ptrace.h>
diff --git a/arch/arm/mm/pv-fixup-asm.S b/arch/arm/mm/pv-fixup-asm.S
index 769778928356..8eade0416739 100644
--- a/arch/arm/mm/pv-fixup-asm.S
+++ b/arch/arm/mm/pv-fixup-asm.S
@@ -6,10 +6,10 @@
* for Keystone 2
*/
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/asm-offsets.h>
#include <asm/cp15.h>
#include <asm/memory.h>
-#include <asm/pgtable.h>
.section ".idmap.text", "ax"
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7f9d38444d6d..8a46ed3ab429 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1299,6 +1299,14 @@ config COMPAT_VDSO
You must have a 32-bit build of glibc 2.22 or later for programs
to seamlessly take advantage of this.
+config THUMB2_COMPAT_VDSO
+ bool "Compile the 32-bit vDSO for Thumb-2 mode" if EXPERT
+ depends on COMPAT_VDSO
+ default y
+ help
+ Compile the compat vDSO with '-mthumb -fomit-frame-pointer' if y,
+ otherwise with '-marm'.
+
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on SYSCTL
@@ -1740,8 +1748,9 @@ config ARM64_DEBUG_PRIORITY_MASKING
endif
config RELOCATABLE
- bool
+ bool "Build a relocatable kernel image" if EXPERT
select ARCH_HAS_RELR
+ default y
help
This builds the kernel as a Position Independent Executable (PIE),
which retains all relocation metadata required to relocate the
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 44537dcfd251..8dd05b2a925c 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -235,7 +235,6 @@ config ARCH_TEGRA
bool "NVIDIA Tegra SoC Family"
select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC_PM
- select CLKDEV_LOOKUP
select CLKSRC_MMIO
select TIMER_OF
select GENERIC_CLOCKEVENTS
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 650e1185c190..76359cfb328a 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -94,7 +94,6 @@ endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
-AS += -EB
# Prefer the baremetal ELF build target, but not all toolchains include
# it so fall back to the standard linux version if needed.
KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
@@ -102,7 +101,6 @@ UTS_MACHINE := aarch64_be
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__AARCH64EL__
-AS += -EL
# Same as above, prefer ELF but fall back to linux target if needed.
KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
UTS_MACHINE := aarch64
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
index d8c44d3ca15a..f52de8f7806a 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
@@ -539,12 +539,12 @@
firmware {
svc {
- compatible = "intel,stratix10-svc";
+ compatible = "intel,agilex-svc";
method = "smc";
memory-region = <&service_reserved>;
fpga_mgr: fpga-mgr {
- compatible = "intel,stratix10-soc-fpga-mgr";
+ compatible = "intel,agilex-soc-fpga-mgr";
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
index 7c2b79dda3d7..31b9217bb5bf 100644
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -2244,6 +2244,10 @@
resets = <&gcc GCC_USB30_PRIM_BCR>;
+ interconnects = <&aggre2_noc MASTER_USB3 &mc_virt SLAVE_EBI1>,
+ <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_USB3>;
+ interconnect-names = "usb-ddr", "apps-usb";
+
usb_1_dwc3: dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xe000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
index 5938f8b2aa2f..70466cc4b405 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi
@@ -633,6 +633,11 @@ ap_ts_i2c: &i2c14 {
status = "okay";
};
+&mss_pil {
+ iommus = <&apps_smmu 0x780 0x1>,
+ <&apps_smmu 0x724 0x3>;
+};
+
&pm8998_pwrkey {
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 7cce6f1b7c9e..8eb5a31346d2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -3136,6 +3136,10 @@
resets = <&gcc GCC_USB30_PRIM_BCR>;
+ interconnects = <&aggre2_noc MASTER_USB3_0 &mem_noc SLAVE_EBI1>,
+ <&gladiator_noc MASTER_APPSS_PROC &config_noc SLAVE_USB3_0>;
+ interconnect-names = "usb-ddr", "apps-usb";
+
usb_1_dwc3: dwc3@a600000 {
compatible = "snps,dwc3";
reg = <0 0x0a600000 0 0xcd00>;
@@ -3180,6 +3184,10 @@
resets = <&gcc GCC_USB30_SEC_BCR>;
+ interconnects = <&aggre2_noc MASTER_USB3_1 &mem_noc SLAVE_EBI1>,
+ <&gladiator_noc MASTER_APPSS_PROC &config_noc SLAVE_USB3_1>;
+ interconnect-names = "usb-ddr", "apps-usb";
+
usb_2_dwc3: dwc3@a800000 {
compatible = "snps,dwc3";
reg = <0 0x0a800000 0 0xcd00>;
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index b263e239cb59..a45366c3909b 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -12,6 +12,7 @@
#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/psci.h>
+#include <linux/stddef.h>
#include <asm/cputype.h>
#include <asm/io.h>
@@ -31,14 +32,14 @@
* is therefore used to delimit the MADT GICC structure minimum length
* appropriately.
*/
-#define ACPI_MADT_GICC_MIN_LENGTH ACPI_OFFSET( \
+#define ACPI_MADT_GICC_MIN_LENGTH offsetof( \
struct acpi_madt_generic_interrupt, efficiency_class)
#define BAD_MADT_GICC_ENTRY(entry, end) \
(!(entry) || (entry)->header.length < ACPI_MADT_GICC_MIN_LENGTH || \
(unsigned long)(entry) + (entry)->header.length > (end))
-#define ACPI_MADT_GICC_SPE (ACPI_OFFSET(struct acpi_madt_generic_interrupt, \
+#define ACPI_MADT_GICC_SPE (offsetof(struct acpi_madt_generic_interrupt, \
spe_interrupt) + sizeof(u16))
/* Basic configuration for ACPI */
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 9543b5e0534d..a08890da696c 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -101,8 +101,8 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define ATOMIC_INIT(i) { (i) }
-#define arch_atomic_read(v) READ_ONCE((v)->counter)
-#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_read(v) __READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) __WRITE_ONCE(((v)->counter), (i))
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
@@ -225,6 +225,6 @@ static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#include <asm-generic/atomic-instrumented.h>
+#define ARCH_ATOMIC
#endif /* __ASM_ATOMIC_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 7d9cc5ec4971..fb4c27506ef4 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -76,8 +76,8 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
#define __smp_store_release(p, v) \
do { \
typeof(p) __p = (p); \
- union { typeof(*p) __val; char __c[1]; } __u = \
- { .__val = (__force typeof(*p)) (v) }; \
+ union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force __unqual_scalar_typeof(*p)) (v) }; \
compiletime_assert_atomic_type(*p); \
kasan_check_write(__p, sizeof(*p)); \
switch (sizeof(*p)) { \
@@ -110,7 +110,7 @@ do { \
#define __smp_load_acquire(p) \
({ \
- union { typeof(*p) __val; char __c[1]; } __u; \
+ union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u; \
typeof(p) __p = (p); \
compiletime_assert_atomic_type(*p); \
kasan_check_read(__p, sizeof(*p)); \
@@ -136,33 +136,33 @@ do { \
: "Q" (*__p) : "memory"); \
break; \
} \
- __u.__val; \
+ (typeof(*p))__u.__val; \
})
#define smp_cond_load_relaxed(ptr, cond_expr) \
({ \
typeof(ptr) __PTR = (ptr); \
- typeof(*ptr) VAL; \
+ __unqual_scalar_typeof(*ptr) VAL; \
for (;;) { \
VAL = READ_ONCE(*__PTR); \
if (cond_expr) \
break; \
__cmpwait_relaxed(__PTR, VAL); \
} \
- VAL; \
+ (typeof(*ptr))VAL; \
})
#define smp_cond_load_acquire(ptr, cond_expr) \
({ \
typeof(ptr) __PTR = (ptr); \
- typeof(*ptr) VAL; \
+ __unqual_scalar_typeof(*ptr) VAL; \
for (;;) { \
VAL = smp_load_acquire(__PTR); \
if (cond_expr) \
break; \
__cmpwait_relaxed(__PTR, VAL); \
} \
- VAL; \
+ (typeof(*ptr))VAL; \
})
#include <asm-generic/barrier.h>
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index ce50c1f1f1ea..9384fd8fc13c 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -94,20 +94,7 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
kick_all_cpus_sync();
}
-
-static inline void flush_cache_mm(struct mm_struct *mm)
-{
-}
-
-static inline void flush_cache_page(struct vm_area_struct *vma,
- unsigned long user_addr, unsigned long pfn)
-{
-}
-
-static inline void flush_cache_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
-}
+#define flush_icache_range flush_icache_range
/*
* Cache maintenance functions used by the DMA API. No to be used directly.
@@ -123,12 +110,7 @@ extern void __dma_flush_area(const void *, size_t);
*/
extern void copy_to_user_page(struct vm_area_struct *, struct page *,
unsigned long, void *, const void *, unsigned long);
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- } while (0)
-
-#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+#define copy_to_user_page copy_to_user_page
/*
* flush_dcache_page is used when the kernel has written to the page
@@ -154,29 +136,11 @@ static __always_inline void __flush_icache_all(void)
dsb(ish);
}
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-
-/*
- * We don't appear to need to do anything here. In fact, if we did, we'd
- * duplicate cache flushing elsewhere performed by flush_dcache_page().
- */
-#define flush_icache_page(vma,page) do { } while (0)
-
-/*
- * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache).
- */
-static inline void flush_cache_vmap(unsigned long start, unsigned long end)
-{
-}
-
-static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
-{
-}
-
int set_memory_valid(unsigned long addr, int numpages, int enable);
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
-#endif
+#include <asm-generic/cacheflush.h>
+
+#endif /* __ASM_CACHEFLUSH_H */
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 4f00d50585a4..8d1c8dcb87fd 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -96,7 +96,28 @@
*/
#define elf_check_arch(x) ((x)->e_machine == EM_AARCH64)
-#define elf_read_implies_exec(ex,stk) (stk != EXSTACK_DISABLE_X)
+/*
+ * An executable for which elf_read_implies_exec() returns TRUE will
+ * have the READ_IMPLIES_EXEC personality flag set automatically.
+ *
+ * The decision process for determining the results are:
+ *
+ *              CPU*: | arm32    | arm64 |
+ * ELF:              |            |            |
+ * ---------------------|------------|------------|
+ * missing PT_GNU_STACK | exec-all   | exec-none  |
+ * PT_GNU_STACK == RWX  | exec-stack | exec-stack |
+ * PT_GNU_STACK == RW   | exec-none | exec-none |
+ *
+ * exec-all : all PROT_READ user mappings are executable, except when
+ * backed by files on a noexec-filesystem.
+ * exec-none : only PROT_EXEC user mappings are executable.
+ * exec-stack: only the stack and PROT_EXEC user mappings are executable.
+ *
+ * *all arm64 CPUs support NX, so there is no "lacks NX" column.
+ *
+ */
+#define compat_elf_read_implies_exec(ex, stk) (stk == EXSTACK_DEFAULT)
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 6facd1308e7c..ff50dd731852 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -9,11 +9,11 @@
#define __ASM_IO_H
#include <linux/types.h>
+#include <linux/pgtable.h>
#include <asm/byteorder.h>
#include <asm/barrier.h>
#include <asm/memory.h>
-#include <asm/pgtable.h>
#include <asm/early_ioremap.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index a6e5da755359..3bf626f6fe0c 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -8,7 +8,7 @@
#ifndef __ASM_KERNEL_PGTABLE_H
#define __ASM_KERNEL_PGTABLE_H
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/sparsemem.h>
/*
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 0c9b5fc4ba0a..352aaebf4198 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -81,12 +81,39 @@ extern u32 __kvm_get_mdcr_el2(void);
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
-/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
+/*
+ * Obtain the PC-relative address of a kernel symbol
+ * s: symbol
+ *
+ * The goal of this macro is to return a symbol's address based on a
+ * PC-relative computation, as opposed to a loading the VA from a
+ * constant pool or something similar. This works well for HYP, as an
+ * absolute VA is guaranteed to be wrong. Only use this if trying to
+ * obtain the address of a symbol (i.e. not something you obtained by
+ * following a pointer).
+ */
+#define hyp_symbol_addr(s) \
+ ({ \
+ typeof(s) *addr; \
+ asm("adrp %0, %1\n" \
+ "add %0, %0, :lo12:%1\n" \
+ : "=r" (addr) : "S" (&s)); \
+ addr; \
+ })
+
+/*
+ * Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
+ * provided that sym is really a *symbol* and not a pointer obtained from
+ * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
+ * sparse quiet.
+ */
#define __hyp_this_cpu_ptr(sym) \
({ \
- void *__ptr = hyp_symbol_addr(sym); \
+ void *__ptr; \
+ __verify_pcpu_ptr(&sym); \
+ __ptr = hyp_symbol_addr(sym); \
__ptr += read_sysreg(tpidr_el2); \
- (typeof(&sym))__ptr; \
+ (typeof(sym) __kernel __force *)__ptr; \
})
#define __hyp_this_cpu_read(sym) \
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 6ea53e6e8b26..4d0f8ea600ba 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
}
-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
-{
- if (vcpu_has_ptrauth(vcpu))
- vcpu_ptrauth_disable(vcpu);
-}
-
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.vsesr_el2;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index abbdf9703e20..c3e6fcc664b1 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state;
- /* Pointer to host CPU context */
- struct kvm_cpu_context *host_cpu_context;
-
struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
@@ -404,8 +401,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
* CP14 and CP15 live in the same array, as they are backed by the
* same system registers.
*/
-#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
-#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
+#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
+
+#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
+#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
struct kvm_vm_stat {
ulong remote_tlb_flush;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index f1a74163d764..b12bfc1f051a 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -83,11 +83,11 @@ alternative_cb_end
#else
+#include <linux/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
@@ -108,26 +108,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
/*
- * Obtain the PC-relative address of a kernel symbol
- * s: symbol
- *
- * The goal of this macro is to return a symbol's address based on a
- * PC-relative computation, as opposed to a loading the VA from a
- * constant pool or something similar. This works well for HYP, as an
- * absolute VA is guaranteed to be wrong. Only use this if trying to
- * obtain the address of a symbol (i.e. not something you obtained by
- * following a pointer).
- */
-#define hyp_symbol_addr(s) \
- ({ \
- typeof(s) *addr; \
- asm("adrp %0, %1\n" \
- "add %0, %0, :lo12:%1\n" \
- : "=r" (addr) : "S" (&s)); \
- addr; \
- })
-
-/*
* We currently support using a VM-specified IPA size. For backward
* compatibility, the default IPA size is fixed to 40bits.
*/
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index ab46187c6300..b0bd9b55594c 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -14,13 +14,13 @@
#include <linux/sched.h>
#include <linux/sched/hotplug.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/proc-fns.h>
#include <asm-generic/mm_hooks.h>
#include <asm/cputype.h>
-#include <asm/pgtable.h>
#include <asm/sysreg.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 1f3218fc52fc..6dbd267ab931 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -340,7 +340,7 @@ static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
#ifdef CONFIG_NUMA_BALANCING
/*
- * See the comment in include/asm-generic/pgtable.h
+ * See the comment in include/linux/pgtable.h
*/
static inline int pte_protnone(pte_t pte)
{
@@ -506,15 +506,13 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
return __pmd_to_phys(pmd);
}
-static inline void pte_unmap(pte_t *pte) { }
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)__va(pmd_page_paddr(pmd));
+}
/* Find an entry in the third-level page table. */
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
#define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
-#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
@@ -568,11 +566,13 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
return __pud_to_phys(pud);
}
-/* Find an entry in the second-level page table. */
-#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+static inline unsigned long pud_page_vaddr(pud_t pud)
+{
+ return (unsigned long)__va(pud_page_paddr(pud));
+}
+/* Find an entry in the second-level page table. */
#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
-#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
@@ -626,11 +626,13 @@ static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
return __p4d_to_phys(p4d);
}
-/* Find an entry in the frst-level page table. */
-#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+static inline unsigned long p4d_page_vaddr(p4d_t p4d)
+{
+ return (unsigned long)__va(p4d_page_paddr(p4d));
+}
+/* Find an entry in the frst-level page table. */
#define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
-#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
#define pud_set_fixmap_offset(p4d, addr) pud_set_fixmap(pud_offset_phys(p4d, addr))
@@ -657,16 +659,6 @@ static inline phys_addr_t p4d_page_paddr(p4d_t p4d)
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
-/* to find an entry in a page-table-directory */
-#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-
-#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
-
-#define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr)))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-
#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
@@ -853,8 +845,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
extern int kern_addr_valid(unsigned long addr);
-#include <asm-generic/pgtable.h>
-
/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
*/
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 5017b531a415..fc7613023c19 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -64,7 +64,8 @@ struct stackframe {
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
-extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
+extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl);
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h
index 9a364aeae5fb..b767904f28b1 100644
--- a/arch/arm64/include/asm/stage2_pgtable.h
+++ b/arch/arm64/include/asm/stage2_pgtable.h
@@ -9,7 +9,7 @@
#define __ARM64_S2_PGTABLE_H_
#include <linux/hugetlb.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map
diff --git a/arch/arm64/include/asm/vmap_stack.h b/arch/arm64/include/asm/vmap_stack.h
index 0cc6636e3f15..894e031b28d2 100644
--- a/arch/arm64/include/asm/vmap_stack.h
+++ b/arch/arm64/include/asm/vmap_stack.h
@@ -7,8 +7,8 @@
#include <linux/gfp.h>
#include <linux/kconfig.h>
#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
#include <asm/memory.h>
-#include <asm/pgtable.h>
#include <asm/thread_info.h>
/*
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 46ec402e97ed..a7586a4db142 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -24,12 +24,12 @@
#include <linux/of_fdt.h>
#include <linux/smp.h>
#include <linux/serial_core.h>
+#include <linux/pgtable.h>
#include <acpi/ghes.h>
#include <asm/cputype.h>
#include <asm/cpu_ops.h>
#include <asm/daifflags.h>
-#include <asm/pgtable.h>
#include <asm/smp_plat.h>
int acpi_noirq = 1; /* skip ACPI IRQ initialization */
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 15e80c876d46..5df49366e9ab 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -130,7 +130,7 @@ static int clear_os_lock(unsigned int cpu)
return 0;
}
-static int debug_monitors_init(void)
+static int __init debug_monitors_init(void)
{
return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
"arm64/debug_monitors:starting",
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 8618faa82e6d..86a5cf9bc19a 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -69,7 +69,8 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
if (addr == FTRACE_ADDR)
return &plt[FTRACE_PLT_IDX];
- if (addr == FTRACE_REGS_ADDR && IS_ENABLED(CONFIG_FTRACE_WITH_REGS))
+ if (addr == FTRACE_REGS_ADDR &&
+ IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
return &plt[FTRACE_REGS_PLT_IDX];
#endif
return NULL;
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 632702146813..037421c66b14 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/pgtable.h>
#include <asm/asm_pointer_auth.h>
#include <asm/assembler.h>
@@ -26,7 +27,6 @@
#include <asm/kvm_arm.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/scs.h>
#include <asm/smp.h>
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index a8a4b55f3a09..68e14152d6e9 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -32,7 +32,6 @@
#include <asm/memory.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/sections.h>
#include <asm/smp.h>
@@ -189,7 +188,7 @@ static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
pmd_t *pmdp;
pte_t *ptep;
- pgdp = pgd_offset_raw(trans_pgd, dst_addr);
+ pgdp = pgd_offset_pgd(trans_pgd, dst_addr);
if (pgd_none(READ_ONCE(*pgdp))) {
pudp = (void *)get_safe_page(GFP_ATOMIC);
if (!pudp)
@@ -491,7 +490,7 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
unsigned long addr = start;
pgd_t *src_pgdp = pgd_offset_k(start);
- dst_pgdp = pgd_offset_raw(dst_pgdp, start);
+ dst_pgdp = pgd_offset_pgd(dst_pgdp, start);
do {
next = pgd_addr_end(addr, end);
if (pgd_none(READ_ONCE(*src_pgdp)))
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 91a83104c6e8..07c4c8cc4a67 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -10,6 +10,7 @@
#include <linux/mm_types.h>
#include <linux/sched.h>
#include <linux/types.h>
+#include <linux/pgtable.h>
#include <asm/archrandom.h>
#include <asm/cacheflush.h>
@@ -17,7 +18,6 @@
#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
enum kaslr_status {
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 570988c7a7ff..1006ed2d7c60 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -117,7 +117,7 @@ pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
struct device *dev = &root->device->dev;
struct resource *bus_res = &root->secondary;
u16 seg = root->segment;
- struct pci_ecam_ops *ecam_ops;
+ const struct pci_ecam_ops *ecam_ops;
struct resource cfgres;
struct acpi_device *adev;
struct pci_config_window *cfg;
@@ -185,7 +185,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
root_ops->release_info = pci_acpi_generic_release_info;
root_ops->prepare_resources = pci_acpi_root_prepare_resources;
- root_ops->pci_ops = &ri->cfg->ops->pci_ops;
+ root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops;
bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
if (!bus)
return NULL;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index eade7807e819..6089638c7d43 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -306,7 +306,7 @@ void __show_regs(struct pt_regs *regs)
void show_regs(struct pt_regs * regs)
{
__show_regs(regs);
- dump_backtrace(regs, NULL);
+ dump_backtrace(regs, NULL, KERN_DEFAULT);
}
static void tls_thread_flush(void)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 76790a5f2a0d..68b7f34a08f5 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -34,7 +34,6 @@
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
#include <asm/fpsimd.h>
-#include <asm/pgtable.h>
#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
#include <asm/syscall.h>
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 3fd2c11c09fc..93b3844cf442 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -319,6 +319,10 @@ void __init setup_arch(char **cmdline_p)
xen_early_init();
efi_init();
+
+ if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
+ pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
+
arm64_memblock_init();
paging_init();
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 4b6f4999d06a..e43a8ff19f0f 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -43,7 +43,6 @@
#include <asm/kvm_mmu.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/smp_plat.h>
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 9405d1b7f4b0..c1dee9066ff9 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -3,13 +3,13 @@
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/pgtable.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/exec.h>
-#include <asm/pgtable.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
#include <asm/smp_plat.h>
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index d332590f5978..50cc30acf106 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -53,9 +53,9 @@ static const char *handler[]= {
int show_unhandled_signals = 0;
-static void dump_backtrace_entry(unsigned long where)
+static void dump_backtrace_entry(unsigned long where, const char *loglvl)
{
- printk(" %pS\n", (void *)where);
+ printk("%s %pS\n", loglvl, (void *)where);
}
static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
@@ -83,7 +83,8 @@ static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
printk("%sCode: %s\n", lvl, str);
}
-void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
struct stackframe frame;
int skip = 0;
@@ -115,11 +116,11 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
thread_saved_pc(tsk));
}
- printk("Call trace:\n");
+ printk("%sCall trace:\n", loglvl);
do {
/* skip until specified stack frame */
if (!skip) {
- dump_backtrace_entry(frame.pc);
+ dump_backtrace_entry(frame.pc, loglvl);
} else if (frame.fp == regs->regs[29]) {
skip = 0;
/*
@@ -129,16 +130,16 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
* at which an exception has taken place, use regs->pc
* instead.
*/
- dump_backtrace_entry(regs->pc);
+ dump_backtrace_entry(regs->pc, loglvl);
}
} while (!unwind_frame(tsk, &frame));
put_task_stack(tsk);
}
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
- dump_backtrace(NULL, tsk);
+ dump_backtrace(NULL, tsk, loglvl);
barrier();
}
@@ -447,12 +448,12 @@ void arm64_notify_segfault(unsigned long addr)
{
int code;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
if (find_vma(current->mm, addr) == NULL)
code = SEGV_MAPERR;
else
code = SEGV_ACCERR;
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
force_signal_inject(SIGSEGV, code, addr);
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index d51a898fd60f..4e016574bd91 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -340,7 +340,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct mm_struct *mm = current->mm;
int ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
ret = aarch32_kuser_helpers_setup(mm);
@@ -357,7 +357,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
#endif /* CONFIG_COMPAT_VDSO */
out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
#endif /* CONFIG_COMPAT */
@@ -398,7 +398,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
struct mm_struct *mm = current->mm;
int ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
ret = __setup_additional_pages(VDSO_ABI_AA64,
@@ -406,7 +406,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
bprm,
uses_interp);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 3964738ebbde..7ea1e827e505 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -105,6 +105,14 @@ VDSO_CFLAGS += -D__uint128_t='void*'
VDSO_CFLAGS += $(call cc32-disable-warning,shift-count-overflow)
VDSO_CFLAGS += -Wno-int-to-pointer-cast
+# Compile as THUMB2 or ARM. Unwinding via frame-pointers in THUMB2 is
+# unreliable.
+ifeq ($(CONFIG_THUMB2_COMPAT_VDSO), y)
+VDSO_CFLAGS += -mthumb -fomit-frame-pointer
+else
+VDSO_CFLAGS += -marm
+endif
+
VDSO_AFLAGS := $(VDSO_CAFLAGS)
VDSO_AFLAGS += -D__ASSEMBLY__
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 3be632177631..6827da7f3aa5 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -13,7 +13,6 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include "image.h"
diff --git a/arch/arm64/kvm/aarch32.c b/arch/arm64/kvm/aarch32.c
index 0a356aa91aa1..40a62a99fbf8 100644
--- a/arch/arm64/kvm/aarch32.c
+++ b/arch/arm64/kvm/aarch32.c
@@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = {
[7] = { 4, 4 }, /* FIQ, unused */
};
+static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ if (vcpu->arch.sysregs_loaded_on_cpu) {
+ kvm_arch_vcpu_put(vcpu);
+ return true;
+ }
+
+ preempt_enable();
+ return false;
+}
+
+static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
+{
+ if (loaded) {
+ kvm_arch_vcpu_load(vcpu, smp_processor_id());
+ preempt_enable();
+ }
+}
+
/*
* When an exception is taken, most CPSR fields are left unchanged in the
* handler. However, some are explicitly overridden (e.g. M[4:0]).
@@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
void kvm_inject_undef32(struct kvm_vcpu *vcpu)
{
+ bool loaded = pre_fault_synchronize(vcpu);
+
prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
+ post_fault_synchronize(vcpu, loaded);
}
/*
@@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
u32 vect_offset;
u32 *far, *fsr;
bool is_lpae;
+ bool loaded;
+
+ loaded = pre_fault_synchronize(vcpu);
if (is_pabt) {
vect_offset = 12;
@@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
*fsr = DFSR_FSC_EXTABT_nLPAE;
}
+
+ post_fault_synchronize(vcpu, loaded);
}
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 7a57381c05e8..90cb90561446 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -144,11 +144,6 @@ out_fail_alloc:
return ret;
}
-int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
@@ -340,10 +335,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
int *last_ran;
- kvm_host_data_t *cpu_data;
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
- cpu_data = this_cpu_ptr(&kvm_host_data);
/*
* We might get preempted before the vCPU actually runs, but
@@ -355,7 +348,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
vcpu->cpu = cpu;
- vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu);
@@ -370,7 +362,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
else
vcpu_set_wfx_traps(vcpu);
- vcpu_ptrauth_setup_lazy(vcpu);
+ if (vcpu_has_ptrauth(vcpu))
+ vcpu_ptrauth_disable(vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -990,11 +983,17 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
* Ensure a rebooted VM will fault in RAM pages and detect if the
* guest MMU is turned off and flush the caches as needed.
*
- * S2FWB enforces all memory accesses to RAM being cacheable, we
- * ensure that the cache is always coherent.
+ * S2FWB enforces all memory accesses to RAM being cacheable,
+ * ensuring that the data side is always coherent. We still
+ * need to invalidate the I-cache though, as FWB does *not*
+ * imply CTR_EL0.DIC.
*/
- if (vcpu->arch.has_run_once && !cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
- stage2_unmap_vm(vcpu->kvm);
+ if (vcpu->arch.has_run_once) {
+ if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
+ stage2_unmap_vm(vcpu->kvm);
+ else
+ __flush_icache_all();
+ }
vcpu_reset_hcr(vcpu);
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index eb194696ef62..5a02d4c90559 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -162,40 +162,14 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
-#define __ptrauth_save_key(regs, key) \
-({ \
- regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
- regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
-})
-
-/*
- * Handle the guest trying to use a ptrauth instruction, or trying to access a
- * ptrauth register.
- */
-void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpu_context *ctxt;
-
- if (vcpu_has_ptrauth(vcpu)) {
- vcpu_ptrauth_enable(vcpu);
- ctxt = vcpu->arch.host_cpu_context;
- __ptrauth_save_key(ctxt->sys_regs, APIA);
- __ptrauth_save_key(ctxt->sys_regs, APIB);
- __ptrauth_save_key(ctxt->sys_regs, APDA);
- __ptrauth_save_key(ctxt->sys_regs, APDB);
- __ptrauth_save_key(ctxt->sys_regs, APGA);
- } else {
- kvm_inject_undefined(vcpu);
- }
-}
-
/*
* Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
- * a NOP).
+ * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
+ * that we can do is give the guest an UNDEF.
*/
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- kvm_arm_vcpu_ptrauth_trap(vcpu);
+ kvm_inject_undefined(vcpu);
return 1;
}
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 0fc9872a1467..e95af204fec7 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
@@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 676b6585e5ae..db1c4487d95d 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -490,6 +490,64 @@ static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
return true;
}
+static bool __hyp_text esr_is_ptrauth_trap(u32 esr)
+{
+ u32 ec = ESR_ELx_EC(esr);
+
+ if (ec == ESR_ELx_EC_PAC)
+ return true;
+
+ if (ec != ESR_ELx_EC_SYS64)
+ return false;
+
+ switch (esr_sys64_to_sysreg(esr)) {
+ case SYS_APIAKEYLO_EL1:
+ case SYS_APIAKEYHI_EL1:
+ case SYS_APIBKEYLO_EL1:
+ case SYS_APIBKEYHI_EL1:
+ case SYS_APDAKEYLO_EL1:
+ case SYS_APDAKEYHI_EL1:
+ case SYS_APDBKEYLO_EL1:
+ case SYS_APDBKEYHI_EL1:
+ case SYS_APGAKEYLO_EL1:
+ case SYS_APGAKEYHI_EL1:
+ return true;
+ }
+
+ return false;
+}
+
+#define __ptrauth_save_key(regs, key) \
+({ \
+ regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
+ regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
+})
+
+static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *ctxt;
+ u64 val;
+
+ if (!vcpu_has_ptrauth(vcpu) ||
+ !esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
+ return false;
+
+ ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
+ __ptrauth_save_key(ctxt->sys_regs, APIA);
+ __ptrauth_save_key(ctxt->sys_regs, APIB);
+ __ptrauth_save_key(ctxt->sys_regs, APDA);
+ __ptrauth_save_key(ctxt->sys_regs, APDB);
+ __ptrauth_save_key(ctxt->sys_regs, APGA);
+
+ vcpu_ptrauth_enable(vcpu);
+
+ val = read_sysreg(hcr_el2);
+ val |= (HCR_API | HCR_APK);
+ write_sysreg(val, hcr_el2);
+
+ return true;
+}
+
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
@@ -524,6 +582,9 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (__hyp_handle_fpsimd(vcpu))
return true;
+ if (__hyp_handle_ptrauth(vcpu))
+ return true;
+
if (!__populate_fault_info(vcpu))
return true;
@@ -642,7 +703,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt;
u64 exit_code;
- host_ctxt = vcpu->arch.host_cpu_context;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
@@ -747,7 +808,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
vcpu = kern_hyp_va(vcpu);
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index ea5d22fbdacf..cc7e957f5b2c 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -39,7 +39,6 @@ static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR);
- ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR);
ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0);
ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1);
@@ -123,7 +122,6 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
isb();
}
- write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR);
write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0);
write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1);
@@ -267,12 +265,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
+ struct kvm_cpu_context *host_ctxt;
if (!has_vhe())
return;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__sysreg_save_user_state(host_ctxt);
/*
@@ -303,12 +302,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
+ struct kvm_cpu_context *host_ctxt;
if (!has_vhe())
return;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
deactivate_traps_vhe_put();
__sysreg_save_el1_state(guest_ctxt);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 290154e32c0b..8c0035cab6b6 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1084,7 +1084,7 @@ void stage2_unmap_vm(struct kvm *kvm)
int idx;
idx = srcu_read_lock(&kvm->srcu);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
@@ -1092,7 +1092,7 @@ void stage2_unmap_vm(struct kvm *kvm)
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -1848,11 +1848,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
/* Let's check if we will get back a huge page backed by hugetlbfs */
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma_intersection(current->mm, hva, hva + 1);
if (unlikely(!vma)) {
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return -EFAULT;
}
@@ -1879,7 +1879,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (vma_pagesize == PMD_SIZE ||
(vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
/* We need minimum second+third level pages */
ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
@@ -2456,7 +2456,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
(kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
@@ -2515,7 +2515,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
out:
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return ret;
}
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index e71d00bb5271..b5ae3a5d509e 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
*/
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host;
u32 events_guest, events_host;
if (!has_vhe())
return;
- host_ctxt = vcpu->arch.host_cpu_context;
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
@@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host;
u32 events_guest, events_host;
if (!has_vhe())
return;
- host_ctxt = vcpu->arch.host_cpu_context;
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 80985439bfb2..baf5ce9225ce 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -78,7 +78,6 @@ static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
switch (reg) {
case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break;
case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
- case ACTLR_EL1: *val = read_sysreg_s(SYS_ACTLR_EL1); break;
case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
@@ -118,7 +117,6 @@ static bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
switch (reg) {
case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break;
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
- case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); break;
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
@@ -1034,16 +1032,13 @@ static bool trap_ptrauth(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- kvm_arm_vcpu_ptrauth_trap(vcpu);
-
/*
- * Return false for both cases as we never skip the trapped
- * instruction:
- *
- * - Either we re-execute the same key register access instruction
- * after enabling ptrauth.
- * - Or an UNDEF is injected as ptrauth is not supported/enabled.
+ * If we land here, that is because we didn't fixup the access on exit
+ * by allowing the PtrAuth sysregs. The only way this happens is when
+ * the guest does not have PtrAuth support enabled.
*/
+ kvm_inject_undefined(vcpu);
+
return false;
}
@@ -1319,10 +1314,16 @@ static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
+ int reg = r->reg;
+
+ /* See the 32bit mapping in kvm_host.h */
+ if (p->is_aarch32)
+ reg = r->reg / 2;
+
if (p->is_write)
- vcpu_write_sys_reg(vcpu, p->regval, r->reg);
+ vcpu_write_sys_reg(vcpu, p->regval, reg);
else
- p->regval = vcpu_read_sys_reg(vcpu, r->reg);
+ p->regval = vcpu_read_sys_reg(vcpu, reg);
return true;
}
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 9cb6b4c8355a..aa9d356451eb 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -27,6 +27,14 @@ static bool access_actlr(struct kvm_vcpu *vcpu,
return ignore_write(vcpu, p);
p->regval = vcpu_read_sys_reg(vcpu, ACTLR_EL1);
+
+ if (p->is_aarch32) {
+ if (r->Op2 & 2)
+ p->regval = upper_32_bits(p->regval);
+ else
+ p->regval = lower_32_bits(p->regval);
+ }
+
return true;
}
@@ -47,6 +55,8 @@ static const struct sys_reg_desc genericv8_cp15_regs[] = {
/* ACTLR */
{ Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
access_actlr },
+ { Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b011),
+ access_actlr },
};
static struct kvm_sys_reg_target_table genericv8_target_table = {
diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c
index 60eccae2abad..78b87a64ca0a 100644
--- a/arch/arm64/lib/csum.c
+++ b/arch/arm64/lib/csum.c
@@ -14,7 +14,11 @@ static u64 accumulate(u64 sum, u64 data)
return tmp + (tmp >> 64);
}
-unsigned int do_csum(const unsigned char *buff, int len)
+/*
+ * We over-read the buffer and this makes KASAN unhappy. Instead, disable
+ * instrumentation and call kasan explicitly.
+ */
+unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
{
unsigned int offset, shift, sum;
const u64 *ptr;
@@ -42,7 +46,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
* odd/even alignment, and means we can ignore it until the very end.
*/
shift = offset * 8;
- data = READ_ONCE_NOCHECK(*ptr++);
+ data = *ptr++;
#ifdef __LITTLE_ENDIAN
data = (data >> shift) << shift;
#else
@@ -58,10 +62,10 @@ unsigned int do_csum(const unsigned char *buff, int len)
while (unlikely(len > 64)) {
__uint128_t tmp1, tmp2, tmp3, tmp4;
- tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
- tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2));
- tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4));
- tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6));
+ tmp1 = *(__uint128_t *)ptr;
+ tmp2 = *(__uint128_t *)(ptr + 2);
+ tmp3 = *(__uint128_t *)(ptr + 4);
+ tmp4 = *(__uint128_t *)(ptr + 6);
len -= 64;
ptr += 8;
@@ -85,7 +89,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
__uint128_t tmp;
sum64 = accumulate(sum64, data);
- tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
+ tmp = *(__uint128_t *)ptr;
len -= 16;
ptr += 2;
@@ -100,7 +104,7 @@ unsigned int do_csum(const unsigned char *buff, int len)
}
if (len > 0) {
sum64 = accumulate(sum64, data);
- data = READ_ONCE_NOCHECK(*ptr);
+ data = *ptr;
len -= 8;
}
/*
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 0da020c563e6..0b8da1cc1c07 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -22,7 +22,6 @@
#include <asm/fixmap.h>
#include <asm/kasan.h>
#include <asm/memory.h>
-#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptdump.h>
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index df8ae73d950b..8afb238ff335 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -36,7 +36,6 @@
#include <asm/processor.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
@@ -498,11 +497,11 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mmap_read_trylock(mm)) {
if (!user_mode(regs) && !search_exception_tables(regs->pc))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in which
@@ -511,7 +510,7 @@ retry:
might_sleep();
#ifdef CONFIG_DEBUG_VM
if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
goto no_context;
}
#endif
@@ -533,7 +532,7 @@ retry:
goto retry;
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Handle the "normal" (no error) case first.
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 2339811f317b..7291b26ce788 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -18,7 +18,6 @@
#include <asm/kernel-pgtable.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -191,7 +190,7 @@ void __init kasan_copy_shadow(pgd_t *pgdir)
pgdp = pgd_offset_k(KASAN_SHADOW_START);
pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
- pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
+ pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
do {
set_pgd(pgdp_new, READ_ONCE(*pgdp));
} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e7fbc6275329..990929c8837e 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -341,7 +341,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
int flags)
{
unsigned long addr, end, next;
- pgd_t *pgdp = pgd_offset_raw(pgdir, virt);
+ pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
/*
* If the virtual and physical address don't have the same offset
@@ -663,13 +663,13 @@ static void __init map_kernel(pgd_t *pgdp)
&vmlinux_initdata, 0, VM_NO_GUARD);
map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
- if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) {
+ if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) {
/*
* The fixmap falls in a separate pgd to the kernel, and doesn't
* live in the carveout for the swapper_pg_dir. We can simply
* re-use the existing dir for the fixmap.
*/
- set_pgd(pgd_offset_raw(pgdp, FIXADDR_START),
+ set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START),
READ_ONCE(*pgd_offset_k(FIXADDR_START)));
} else if (CONFIG_PGTABLE_LEVELS > 3) {
pgd_t *bm_pgdp;
@@ -682,7 +682,7 @@ static void __init map_kernel(pgd_t *pgdp)
* entry instead.
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- bm_pgdp = pgd_offset_raw(pgdp, FIXADDR_START);
+ bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START);
bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START);
bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START);
pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 4175bcb8ccb3..23f648c2a199 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -8,7 +8,6 @@
#include <linux/sched.h>
#include <linux/vmalloc.h>
-#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index b7bebb12a56d..796e47a571e6 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -9,11 +9,11 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/asm_pointer_auth.h>
#include <asm/hwcap.h>
-#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index e65e8d82442a..6444ebfd06a6 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -11,6 +11,7 @@ config C6X
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select CLKDEV_LOOKUP
+ select HAVE_LEGACY_CLK
select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select HAVE_ARCH_TRACEHOOK
diff --git a/arch/c6x/include/asm/cacheflush.h b/arch/c6x/include/asm/cacheflush.h
index 4540b40475e6..10922d528de6 100644
--- a/arch/c6x/include/asm/cacheflush.h
+++ b/arch/c6x/include/asm/cacheflush.h
@@ -17,21 +17,6 @@
#include <asm/string.h>
/*
- * virtually-indexed cache management (our cache is physically indexed)
- */
-#define flush_cache_all() do {} while (0)
-#define flush_cache_mm(mm) do {} while (0)
-#define flush_cache_dup_mm(mm) do {} while (0)
-#define flush_cache_range(mm, start, end) do {} while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
-#define flush_cache_vmap(start, end) do {} while (0)
-#define flush_cache_vunmap(start, end) do {} while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do {} while (0)
-#define flush_dcache_mmap_lock(mapping) do {} while (0)
-#define flush_dcache_mmap_unlock(mapping) do {} while (0)
-
-/*
* physically-indexed cache management
*/
#define flush_icache_range(s, e) \
@@ -49,14 +34,12 @@ do { \
(unsigned long) page_address(page) + PAGE_SIZE)); \
} while (0)
-
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_icache_range((unsigned) (dst), (unsigned) (dst) + (len)); \
} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
#endif /* _ASM_C6X_CACHEFLUSH_H */
diff --git a/arch/c6x/include/asm/pgtable.h b/arch/c6x/include/asm/pgtable.h
index 197c473b796a..8a91ceda39fa 100644
--- a/arch/c6x/include/asm/pgtable.h
+++ b/arch/c6x/include/asm/pgtable.h
@@ -26,7 +26,6 @@
#define pgd_clear(pgdp)
#define kern_addr_valid(addr) (1)
-#define pmd_offset(a, b) ((void *)0)
#define pmd_none(x) (!pmd_val(x))
#define pmd_present(x) (pmd_val(x))
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
@@ -64,6 +63,4 @@ extern unsigned long empty_zero_page;
*/
#define pgprot_writecombine pgprot_noncached
-#include <asm-generic/pgtable.h>
-
#endif /* _ASM_C6X_PGTABLE_H */
diff --git a/arch/c6x/kernel/traps.c b/arch/c6x/kernel/traps.c
index ec61034fdf56..2b9121c755be 100644
--- a/arch/c6x/kernel/traps.c
+++ b/arch/c6x/kernel/traps.c
@@ -344,12 +344,13 @@ asmlinkage int process_exception(struct pt_regs *regs)
static int kstack_depth_to_print = 48;
-static void show_trace(unsigned long *stack, unsigned long *endstack)
+static void show_trace(unsigned long *stack, unsigned long *endstack,
+ const char *loglvl)
{
unsigned long addr;
int i;
- pr_debug("Call trace:");
+ printk("%sCall trace:", loglvl);
i = 0;
while (stack + 1 <= endstack) {
addr = *stack++;
@@ -364,16 +365,17 @@ static void show_trace(unsigned long *stack, unsigned long *endstack)
if (__kernel_text_address(addr)) {
#ifndef CONFIG_KALLSYMS
if (i % 5 == 0)
- pr_debug("\n ");
+ printk("%s\n ", loglvl);
#endif
- pr_debug(" [<%08lx>] %pS\n", addr, (void *)addr);
+ printk("%s [<%08lx>] %pS\n", loglvl, addr, (void *)addr);
i++;
}
}
- pr_debug("\n");
+ printk("%s\n", loglvl);
}
-void show_stack(struct task_struct *task, unsigned long *stack)
+void show_stack(struct task_struct *task, unsigned long *stack,
+ const char *loglvl)
{
unsigned long *p, *endstack;
int i;
@@ -398,7 +400,7 @@ void show_stack(struct task_struct *task, unsigned long *stack)
pr_cont(" %08lx", *p++);
}
pr_cont("\n");
- show_trace(stack, endstack);
+ show_trace(stack, endstack, loglvl);
}
int is_valid_bugaddr(unsigned long addr)
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
index 332f51bc68fb..e909587f24c5 100644
--- a/arch/csky/include/asm/io.h
+++ b/arch/csky/include/asm/io.h
@@ -4,7 +4,7 @@
#ifndef __ASM_CSKY_IO_H
#define __ASM_CSKY_IO_H
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <linux/types.h>
#include <linux/version.h>
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index 9ab4a445ad99..2002cb7f1053 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -32,13 +32,6 @@
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-/* Find an entry in the third-level page table.. */
-#define __pte_offset_t(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- (pmd_page_vaddr(*(dir)) + __pte_offset_t(address))
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address))
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
#define pte_clear(mm, addr, ptep) set_pte((ptep), \
(((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
@@ -54,8 +47,6 @@
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | \
_CACHE_MASK)
-#define pte_unmap(pte) ((void)(pte))
-
#define __swp_type(x) (((x).val >> 4) & 0xff)
#define __swp_offset(x) ((x).val >> 12)
#define __swp_entry(type, offset) ((swp_entry_t) {((type) << 4) | \
@@ -229,15 +220,6 @@ static inline pte_t pte_mkyoung(pte_t pte)
return pte;
}
-#define __pgd_offset(address) pgd_index(address)
-#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
@@ -281,19 +263,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
(pgprot_val(newprot)));
}
-/* to find an entry in a page-table-directory */
-static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address)
-{
- return mm->pgd + pgd_index(address);
-}
-
-/* Find an entry in the third-level page table.. */
-static inline pte_t *pte_offset(pmd_t *dir, unsigned long address)
-{
- return (pte_t *) (pmd_page_vaddr(*dir)) +
- ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
-}
-
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init(void);
@@ -306,6 +275,4 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#include <asm-generic/pgtable.h>
-
#endif /* __ASM_CSKY_PGTABLE_H */
diff --git a/arch/csky/kernel/module.c b/arch/csky/kernel/module.c
index b5ad7d9de18c..6cd82d69c655 100644
--- a/arch/csky/kernel/module.c
+++ b/arch/csky/kernel/module.c
@@ -10,7 +10,6 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
-#include <asm/pgtable.h>
#ifdef CONFIG_CPU_CK810
#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000)
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c
index 5a82230bddf9..944ca2fdcdd9 100644
--- a/arch/csky/kernel/ptrace.c
+++ b/arch/csky/kernel/ptrace.c
@@ -18,7 +18,6 @@
#include <asm/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/asm-offsets.h>
@@ -344,7 +343,7 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
trace_sys_exit(regs, syscall_get_return_value(current, regs));
}
-extern void show_stack(struct task_struct *task, unsigned long *stack);
+extern void show_stack(struct task_struct *task, unsigned long *stack, const char *loglvl);
void show_regs(struct pt_regs *fp)
{
unsigned long *sp;
@@ -420,6 +419,6 @@ void show_regs(struct pt_regs *fp)
}
pr_cont("\n");
- show_stack(NULL, (unsigned long *)fp->regs[4]);
+ show_stack(NULL, (unsigned long *)fp->regs[4], KERN_INFO);
return;
}
diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c
index 92809e1da723..16ae20a0af34 100644
--- a/arch/csky/kernel/stacktrace.c
+++ b/arch/csky/kernel/stacktrace.c
@@ -91,14 +91,14 @@ static void notrace walk_stackframe(struct task_struct *task,
static bool print_trace_address(unsigned long pc, void *arg)
{
- print_ip_sym(pc);
+ print_ip_sym((const char *)arg, pc);
return false;
}
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
pr_cont("Call Trace:\n");
- walk_stackframe(task, NULL, print_trace_address, NULL);
+ walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
}
static bool save_wchan(unsigned long pc, void *arg)
diff --git a/arch/csky/kernel/vdso.c b/arch/csky/kernel/vdso.c
index 60ff7adfad1d..abc3dbc658d4 100644
--- a/arch/csky/kernel/vdso.c
+++ b/arch/csky/kernel/vdso.c
@@ -50,7 +50,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
unsigned long addr;
struct mm_struct *mm = current->mm;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
addr = get_unmapped_area(NULL, STACK_TOP, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(addr)) {
@@ -70,7 +70,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
mm->context.vdso = (void *)addr;
up_fail:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
index 4e6dc68f3258..0b9cbf2cf6a9 100644
--- a/arch/csky/mm/fault.c
+++ b/arch/csky/mm/fault.c
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
- int offset = __pgd_offset(address);
+ int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
@@ -120,7 +120,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
if (in_atomic() || !mm)
goto bad_area_nosemaphore;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -170,7 +170,7 @@ good_area:
address);
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -178,7 +178,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -217,7 +217,7 @@ out_of_memory:
do_sigbus:
tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c
index 3b3f622f5ae9..89ec32e602a1 100644
--- a/arch/csky/mm/highmem.c
+++ b/arch/csky/mm/highmem.c
@@ -92,7 +92,7 @@ static void __init kmap_pages_init(void)
vaddr = PKMAP_BASE;
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
- pgd = swapper_pg_dir + __pgd_offset(vaddr);
+ pgd = swapper_pg_dir + pgd_index(vaddr);
pud = (pud_t *)pgd;
pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index cb64d8647a78..af627128314f 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -24,7 +24,6 @@
#include <asm/setup.h>
#include <asm/cachectl.h>
#include <asm/dma.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
@@ -158,9 +157,9 @@ void __init fixrange_init(unsigned long start, unsigned long end,
unsigned long vaddr;
vaddr = start;
- i = __pgd_offset(vaddr);
- j = __pud_offset(vaddr);
- k = __pmd_offset(vaddr);
+ i = pgd_index(vaddr);
+ j = pud_index(vaddr);
+ k = pmd_index(vaddr);
pgd = pgd_base + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
diff --git a/arch/csky/mm/tlb.c b/arch/csky/mm/tlb.c
index eb3ba6c9c927..ed1512381112 100644
--- a/arch/csky/mm/tlb.c
+++ b/arch/csky/mm/tlb.c
@@ -7,7 +7,6 @@
#include <linux/sched.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
/*
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index ec800e9d5aad..d11666d538fe 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -13,7 +13,6 @@ config H8300
select GENERIC_CPU_DEVICES
select MODULES_USE_ELF_RELA
select GENERIC_CLOCKEVENTS
- select CLKDEV_LOOKUP
select COMMON_CLK
select ARCH_WANT_FRAME_POINTERS
select OF
diff --git a/arch/h8300/boot/compressed/Makefile b/arch/h8300/boot/compressed/Makefile
index 9e2701069bbe..5942793f77a0 100644
--- a/arch/h8300/boot/compressed/Makefile
+++ b/arch/h8300/boot/compressed/Makefile
@@ -18,7 +18,7 @@ CONFIG_MEMORY_START ?= 0x00400000
CONFIG_BOOT_LINK_OFFSET ?= 0x00280000
IMAGE_OFFSET := $(shell printf "0x%08x" $$(($(CONFIG_MEMORY_START)+$(CONFIG_BOOT_LINK_OFFSET))))
-LIBGCC := $(shell $(CROSS-COMPILE)$(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+LIBGCC := $(shell $(CROSS-COMPILE)$(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name 2>/dev/null)
LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -estartup -T $(obj)/vmlinux.lds \
--defsym output=$(CONFIG_MEMORY_START)
diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h
index f00828720dc4..ea833a5d8bcf 100644
--- a/arch/h8300/include/asm/pgtable.h
+++ b/arch/h8300/include/asm/pgtable.h
@@ -2,7 +2,6 @@
#ifndef _H8300_PGTABLE_H
#define _H8300_PGTABLE_H
#include <asm-generic/pgtable-nopud.h>
-#include <asm-generic/pgtable.h>
extern void paging_init(void);
#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index e35cdf092e07..0ef55e3052c9 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -45,7 +45,6 @@
#include <linux/uaccess.h>
#include <asm/traps.h>
#include <asm/setup.h>
-#include <asm/pgtable.h>
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index 23a979a85f14..28ac88358a89 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -31,7 +31,6 @@
#include <asm/setup.h>
#include <asm/irq.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/page.h>
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index ef7489b7c459..38d335488a54 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -43,7 +43,6 @@
#include <asm/setup.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/ucontext.h>
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
index e47a9e0dc278..5d8b969cd8f3 100644
--- a/arch/h8300/kernel/traps.c
+++ b/arch/h8300/kernel/traps.c
@@ -115,7 +115,7 @@ void die(const char *str, struct pt_regs *fp, unsigned long err)
static int kstack_depth_to_print = 24;
-void show_stack(struct task_struct *task, unsigned long *esp)
+void show_stack(struct task_struct *task, unsigned long *esp, const char *loglvl)
{
unsigned long *stack, addr;
int i;
@@ -125,17 +125,17 @@ void show_stack(struct task_struct *task, unsigned long *esp)
stack = esp;
- pr_info("Stack from %08lx:", (unsigned long)stack);
+ printk("%sStack from %08lx:", loglvl, (unsigned long)stack);
for (i = 0; i < kstack_depth_to_print; i++) {
if (((unsigned long)stack & (THREAD_SIZE - 1)) >=
THREAD_SIZE-4)
break;
if (i % 8 == 0)
- pr_info(" ");
+ printk("%s ", loglvl);
pr_cont(" %08lx", *stack++);
}
- pr_info("\nCall Trace:\n");
+ printk("%s\nCall Trace:\n", loglvl);
i = 0;
stack = esp;
while (((unsigned long)stack & (THREAD_SIZE - 1)) < THREAD_SIZE-4) {
@@ -150,10 +150,10 @@ void show_stack(struct task_struct *task, unsigned long *esp)
*/
if (check_kernel_text(addr)) {
if (i % 4 == 0)
- pr_info(" ");
+ printk("%s ", loglvl);
pr_cont(" [<%08lx>]", addr);
i++;
}
}
- pr_info("\n");
+ printk("%s\n", loglvl);
}
diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c
index fabffb83930a..d4bc9c16f2df 100644
--- a/arch/h8300/mm/fault.c
+++ b/arch/h8300/mm/fault.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/ptrace.h>
-#include <asm/pgtable.h>
void die(const char *str, struct pt_regs *fp, unsigned long err);
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 27a0020e3771..1f3b345d68b9 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -36,7 +36,6 @@
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
/*
diff --git a/arch/h8300/mm/memory.c b/arch/h8300/mm/memory.c
index 3785f72bf3fc..4a60e2b5eb96 100644
--- a/arch/h8300/mm/memory.c
+++ b/arch/h8300/mm/memory.c
@@ -26,7 +26,6 @@
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/io.h>
diff --git a/arch/hexagon/Makefile b/arch/hexagon/Makefile
index 4c5858b80f0e..c168c6980d05 100644
--- a/arch/hexagon/Makefile
+++ b/arch/hexagon/Makefile
@@ -30,7 +30,7 @@ TIR_NAME := r19
KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__
KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME)
-LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
+LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name 2>/dev/null)
libs-y += $(LIBGCC)
head-y := arch/hexagon/kernel/head.o
diff --git a/arch/hexagon/include/asm/cacheflush.h b/arch/hexagon/include/asm/cacheflush.h
index fb447de45d54..6eff0730e6ef 100644
--- a/arch/hexagon/include/asm/cacheflush.h
+++ b/arch/hexagon/include/asm/cacheflush.h
@@ -25,29 +25,17 @@
#define LINESIZE 32
#define LINEBITS 5
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_icache_page(vma, pg) do { } while (0)
-#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
/*
* Flush Dcache range through current map.
*/
extern void flush_dcache_range(unsigned long start, unsigned long end);
+#define flush_dcache_range flush_dcache_range
/*
* Flush Icache range through current map.
*/
extern void flush_icache_range(unsigned long start, unsigned long end);
+#define flush_icache_range flush_icache_range
/*
* Memory-management related flushes are there to ensure in non-physically
@@ -78,6 +66,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len);
+#define copy_to_user_page copy_to_user_page
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
@@ -85,4 +74,6 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
+#include <asm-generic/cacheflush.h>
+
#endif
diff --git a/arch/hexagon/include/asm/fixmap.h b/arch/hexagon/include/asm/fixmap.h
index 97b1b062e750..920660a04aa4 100644
--- a/arch/hexagon/include/asm/fixmap.h
+++ b/arch/hexagon/include/asm/fixmap.h
@@ -15,8 +15,4 @@
#include <asm-generic/fixmap.h>
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), \
- (vaddr)), (vaddr)), (vaddr)), (vaddr))
-
#endif
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index 2a17d4eb2fa4..dbb22b80b8c4 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -206,33 +206,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_val(*ptep) = _NULL_PTE;
}
-#ifdef NEED_PMD_INDEX_DESPITE_BEING_2_LEVEL
-/**
- * pmd_index - returns the index of the entry in the PMD page
- * which would control the given virtual address
- */
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-#endif
-
-/**
- * pgd_index - returns the index of the entry in the PGD page
- * which would control the given virtual address
- *
- * This returns the *index* for the address in the pgd_t
- */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-
-/*
- * pgd_offset - find an offset in a page-table-directory
- */
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-
-/*
- * pgd_offset_k - get kernel (init_mm) pgd entry pointer for addr
- */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
/**
* pmd_none - check if pmd_entry is mapped
* @pmd_entry: pmd entry
@@ -403,31 +376,14 @@ static inline int pte_exec(pte_t pte)
*/
#define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte)
-/*
- * May need to invoke the virtual machine as well...
- */
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-
-/*
- * pte_offset_map - returns the linear address of the page table entry
- * corresponding to an address
- */
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-
-#define pte_offset_map_nested(pmd, addr) pte_offset_map(pmd, addr)
-
-/* pte_offset_kernel - kernel version of pte_offset */
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) (unsigned long) __va(pmd_val(*dir) & PAGE_MASK) \
- + __pte_offset(address))
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK);
+}
/* ZERO_PAGE - returns the globally shared zero page */
#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
-#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
/*
* Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is
* interpreted as swap information. The remaining free bits are interpreted as
@@ -460,7 +416,4 @@ static inline int pte_exec(pte_t pte)
((type << 1) | \
((offset & 0x7ffff0) << 9) | ((offset & 0xf) << 6)) })
-/* Oh boy. There are a lot of possible arch overrides found in this file. */
-#include <asm-generic/pgtable.h>
-
#endif
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index 69c623b14ddd..904134b37232 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -79,7 +79,7 @@ static const char *ex_name(int ex)
}
static void do_show_stack(struct task_struct *task, unsigned long *fp,
- unsigned long ip)
+ unsigned long ip, const char *loglvl)
{
int kstack_depth_to_print = 24;
unsigned long offset, size;
@@ -93,9 +93,8 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
if (task == NULL)
task = current;
- printk(KERN_INFO "CPU#%d, %s/%d, Call Trace:\n",
- raw_smp_processor_id(), task->comm,
- task_pid_nr(task));
+ printk("%sCPU#%d, %s/%d, Call Trace:\n", loglvl, raw_smp_processor_id(),
+ task->comm, task_pid_nr(task));
if (fp == NULL) {
if (task == current) {
@@ -108,7 +107,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
}
if ((((unsigned long) fp) & 0x3) || ((unsigned long) fp < 0x1000)) {
- printk(KERN_INFO "-- Corrupt frame pointer %p\n", fp);
+ printk("%s-- Corrupt frame pointer %p\n", loglvl, fp);
return;
}
@@ -125,8 +124,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
name = kallsyms_lookup(ip, &size, &offset, &modname, tmpstr);
- printk(KERN_INFO "[%p] 0x%lx: %s + 0x%lx", fp, ip, name,
- offset);
+ printk("%s[%p] 0x%lx: %s + 0x%lx", loglvl, fp, ip, name, offset);
if (((unsigned long) fp < low) || (high < (unsigned long) fp))
printk(KERN_CONT " (FP out of bounds!)");
if (modname)
@@ -136,8 +134,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
newfp = (unsigned long *) *fp;
if (((unsigned long) newfp) & 0x3) {
- printk(KERN_INFO "-- Corrupt frame pointer %p\n",
- newfp);
+ printk("%s-- Corrupt frame pointer %p\n", loglvl, newfp);
break;
}
@@ -147,7 +144,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
+ 8);
if (regs->syscall_nr != -1) {
- printk(KERN_INFO "-- trap0 -- syscall_nr: %ld",
+ printk("%s-- trap0 -- syscall_nr: %ld", loglvl,
regs->syscall_nr);
printk(KERN_CONT " psp: %lx elr: %lx\n",
pt_psp(regs), pt_elr(regs));
@@ -155,7 +152,7 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
} else {
/* really want to see more ... */
kstack_depth_to_print += 6;
- printk(KERN_INFO "-- %s (0x%lx) badva: %lx\n",
+ printk("%s-- %s (0x%lx) badva: %lx\n", loglvl,
ex_name(pt_cause(regs)), pt_cause(regs),
pt_badva(regs));
}
@@ -178,10 +175,10 @@ static void do_show_stack(struct task_struct *task, unsigned long *fp,
}
}
-void show_stack(struct task_struct *task, unsigned long *fp)
+void show_stack(struct task_struct *task, unsigned long *fp, const char *loglvl)
{
/* Saved link reg is one word above FP */
- do_show_stack(task, fp, 0);
+ do_show_stack(task, fp, 0, loglvl);
}
int die(const char *str, struct pt_regs *regs, long err)
@@ -207,7 +204,7 @@ int die(const char *str, struct pt_regs *regs, long err)
print_modules();
show_regs(regs);
- do_show_stack(current, &regs->r30, pt_elr(regs));
+ do_show_stack(current, &regs->r30, pt_elr(regs), KERN_EMERG);
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c
index 25a1d9cfd4cc..b70970ac809f 100644
--- a/arch/hexagon/kernel/vdso.c
+++ b/arch/hexagon/kernel/vdso.c
@@ -52,7 +52,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
unsigned long vdso_base;
struct mm_struct *mm = current->mm;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
/* Try to get it loaded right near ld.so/glibc. */
@@ -76,7 +76,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
mm->context.vdso = (void *)vdso_base;
up_fail:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c
index f8ddc35cf159..650bca92f0b7 100644
--- a/arch/hexagon/mm/uaccess.c
+++ b/arch/hexagon/mm/uaccess.c
@@ -11,7 +11,7 @@
*/
#include <linux/types.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
/*
* For clear_user(), exploit previously defined copy_to_user function
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
index 72334b26317a..cd3808f96b93 100644
--- a/arch/hexagon/mm/vm_fault.c
+++ b/arch/hexagon/mm/vm_fault.c
@@ -11,7 +11,6 @@
* execptions.
*/
-#include <asm/pgtable.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
@@ -55,7 +54,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -107,11 +106,11 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Handle copyin/out exception cases */
if (!user_mode(regs))
@@ -138,7 +137,7 @@ good_area:
return;
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (user_mode(regs)) {
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 32240000dc0c..f817f3d5e758 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -40,7 +40,7 @@ $(error Sorry, you need a newer version of the assember, one that is built from
endif
quiet_cmd_gzip = GZIP $@
-cmd_gzip = cat $(real-prereqs) | gzip -n -f -9 > $@
+cmd_gzip = cat $(real-prereqs) | $(_GZIP) -n -f -9 > $@
quiet_cmd_objcopy = OBJCOPY $@
cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
index 6d3478f8abc8..708c0fa5d975 100644
--- a/arch/ia64/include/asm/cacheflush.h
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -12,44 +12,22 @@
#include <asm/page.h>
-/*
- * Cache flushing routines. This is the kind of stuff that can be very expensive, so try
- * to avoid them whenever possible.
- */
-
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define flush_icache_page(vma,page) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
#define flush_dcache_page(page) \
do { \
clear_bit(PG_arch_1, &(page)->flags); \
} while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-
-extern void flush_icache_range (unsigned long start, unsigned long end);
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#define flush_icache_range flush_icache_range
extern void clflush_cache_range(void *addr, int size);
-
-#define flush_icache_user_range(vma, page, user_addr, len) \
+#define flush_icache_user_page(vma, page, user_addr, len) \
do { \
unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
flush_icache_range(_addr, _addr + (len)); \
} while (0)
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
-} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
#endif /* _ASM_IA64_CACHEFLUSH_H */
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 787b0a91d255..10850897a91c 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -364,44 +364,13 @@ pgd_index (unsigned long address)
return (region << (PAGE_SHIFT - 6)) | l1index;
}
-
-/* The offset in the 1-level directory is given by the 3 region bits
- (61..63) and the level-1 bits. */
-static inline pgd_t*
-pgd_offset (const struct mm_struct *mm, unsigned long address)
-{
- return mm->pgd + pgd_index(address);
-}
-
-/* In the kernel's mapped region we completely ignore the region number
- (since we know it's in region number 5). */
-#define pgd_offset_k(addr) \
- (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
+#define pgd_index pgd_index
/* Look up a pgd entry in the gate area. On IA-64, the gate-area
resides in the kernel-mapped segment, hence we use pgd_offset_k()
here. */
#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
-#if CONFIG_PGTABLE_LEVELS == 4
-/* Find an entry in the second-level page table.. */
-#define pud_offset(dir,addr) \
- ((pud_t *) p4d_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
-#endif
-
-/* Find an entry in the third-level page table.. */
-#define pmd_offset(dir,addr) \
- ((pmd_t *) pud_page_vaddr(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
-
-/*
- * Find an entry in the third-level page table. This looks more complicated than it
- * should be because some platforms place page tables in high memory.
- */
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
-#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
-#define pte_unmap(pte) do { } while (0)
-
/* atomic versions of the some PTE manipulations: */
static inline int
@@ -583,6 +552,5 @@ extern struct page *zero_page_memmap_ptr;
#include <asm-generic/pgtable-nopud.h>
#endif
#include <asm-generic/pgtable-nop4d.h>
-#include <asm-generic/pgtable.h>
#endif /* _ASM_IA64_PGTABLE_H */
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
index 7ff574d56429..b3aa46090101 100644
--- a/arch/ia64/include/asm/ptrace.h
+++ b/arch/ia64/include/asm/ptrace.h
@@ -114,7 +114,6 @@ static inline long regs_return_value(struct pt_regs *regs)
struct task_struct; /* forward decl */
struct unw_frame_info; /* forward decl */
- extern void ia64_do_show_stack (struct unw_frame_info *, void *);
extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
unsigned long *);
extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 5c7e79eccaee..8aa473a4b0f4 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -37,7 +37,7 @@
#include <linux/page-flags.h>
#include <asm/intrinsics.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/extable.h>
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index a54eacbc61a9..f932b25fb817 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -37,7 +37,6 @@
#include <asm/io.h>
#include <asm/kregs.h>
#include <asm/meminit.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mca.h>
#include <asm/setup.h>
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 2ac926331500..c5efac285bc3 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -38,12 +38,12 @@
*/
+#include <linux/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/kregs.h>
#include <asm/asm-offsets.h>
-#include <asm/pgtable.h>
#include <asm/percpu.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index e6f45170a4b9..30f1ef760136 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -21,18 +21,19 @@
*/
+#include <linux/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/fpu.h>
#include <asm/kregs.h>
#include <asm/mmu_context.h>
#include <asm/asm-offsets.h>
#include <asm/pal.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/mca_asm.h>
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/export.h>
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index e7862e4cb1e7..6fff934150eb 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -16,6 +16,7 @@
*/
#include <linux/module.h>
+#include <linux/pgtable.h>
#include <linux/jiffies.h>
#include <linux/errno.h>
@@ -37,7 +38,6 @@
#include <asm/intrinsics.h>
#include <asm/io.h>
#include <asm/hw_irq.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_PERFMON
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 1efcbe5f0c78..d6d4229b28db 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -48,11 +48,11 @@
*/
+#include <linux/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/break.h>
#include <asm/kregs.h>
#include <asm/asm-offsets.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index a6d6a0556f08..7a7df944d798 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -17,8 +17,8 @@
#include <linux/preempt.h>
#include <linux/extable.h>
#include <linux/kdebug.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/exception.h>
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6fb54dfa1350..2703f7795672 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1631,7 +1631,7 @@ default_monarch_init_process(struct notifier_block *self, unsigned long val, voi
if (read_trylock(&tasklist_lock)) {
do_each_thread (g, t) {
printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
- show_stack(t, NULL);
+ show_stack(t, NULL, KERN_DEFAULT);
} while_each_thread (g, t);
read_unlock(&tasklist_lock);
}
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 086cfa4999fd..0d6b8cf9d1d0 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -25,9 +25,9 @@
* Use per cpu MCA/INIT stacks for all data.
*/
#include <linux/threads.h>
+#include <linux/pgtable.h>
#include <asm/asmmacro.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mca_asm.h>
#include <asm/mca.h>
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index df257002950e..971f166873aa 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2260,13 +2260,13 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
* now we atomically find some area in the address space and
* remap the buffer in it.
*/
- down_write(&task->mm->mmap_sem);
+ mmap_write_lock(task->mm);
/* find some free area in address space, must have mmap sem held */
vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
if (IS_ERR_VALUE(vma->vm_start)) {
DPRINT(("Cannot find unmapped area for size %ld\n", size));
- up_write(&task->mm->mmap_sem);
+ mmap_write_unlock(task->mm);
goto error;
}
vma->vm_end = vma->vm_start + size;
@@ -2277,7 +2277,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
/* can only be applied to current task, need to have the mm semaphore held when called */
if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
DPRINT(("Can't remap buffer\n"));
- up_write(&task->mm->mmap_sem);
+ mmap_write_unlock(task->mm);
goto error;
}
@@ -2288,7 +2288,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
insert_vm_struct(mm, vma);
vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
- up_write(&task->mm->mmap_sem);
+ mmap_write_unlock(task->mm);
/*
* keep track of user level virtual address
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 10cb9382ab76..96dfb9e4b16f 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -64,12 +64,13 @@ EXPORT_SYMBOL(boot_option_idle_override);
void (*pm_power_off) (void);
EXPORT_SYMBOL(pm_power_off);
-void
+static void
ia64_do_show_stack (struct unw_frame_info *info, void *arg)
{
unsigned long ip, sp, bsp;
+ const char *loglvl = arg;
- printk("\nCall Trace:\n");
+ printk("%s\nCall Trace:\n", loglvl);
do {
unw_get_ip(info, &ip);
if (ip == 0)
@@ -77,22 +78,22 @@ ia64_do_show_stack (struct unw_frame_info *info, void *arg)
unw_get_sp(info, &sp);
unw_get_bsp(info, &bsp);
- printk(" [<%016lx>] %pS\n"
+ printk("%s [<%016lx>] %pS\n"
" sp=%016lx bsp=%016lx\n",
- ip, (void *)ip, sp, bsp);
+ loglvl, ip, (void *)ip, sp, bsp);
} while (unw_unwind(info) >= 0);
}
void
-show_stack (struct task_struct *task, unsigned long *sp)
+show_stack (struct task_struct *task, unsigned long *sp, const char *loglvl)
{
if (!task)
- unw_init_running(ia64_do_show_stack, NULL);
+ unw_init_running(ia64_do_show_stack, (void *)loglvl);
else {
struct unw_frame_info info;
unw_init_from_blocked_task(&info, task);
- ia64_do_show_stack(&info, NULL);
+ ia64_do_show_stack(&info, (void *)loglvl);
}
}
@@ -150,7 +151,7 @@ show_regs (struct pt_regs *regs)
((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
}
} else
- show_stack(NULL, NULL);
+ show_stack(NULL, NULL, KERN_DEFAULT);
}
/* local support for deprecated console_print */
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index bf9c24d9ce84..82aaacf64583 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -25,7 +25,6 @@
#include <linux/elf.h>
#include <linux/tracehook.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace_offsets.h>
#include <asm/rse.h>
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
index 7124fe7bec7c..527a7b896a6e 100644
--- a/arch/ia64/kernel/relocate_kernel.S
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -8,10 +8,10 @@
* Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
* Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
*/
+#include <linux/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/kregs.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mca_asm.h>
/* Must be relocatable PIC code callable as a C function
@@ -319,5 +319,3 @@ GLOBAL_ENTRY(ia64_dump_cpu_regs)
;;
br.ret.sptk.many rp
END(ia64_dump_cpu_regs)
-
-
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 4009383453f7..d2d440fe855b 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -25,6 +25,7 @@
*/
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <linux/acpi.h>
#include <linux/console.h>
@@ -56,7 +57,6 @@
#include <asm/meminit.h>
#include <asm/page.h>
#include <asm/patch.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/sal.h>
#include <asm/sections.h>
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index de35c54f033d..bbfd421e6deb 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -40,7 +40,6 @@
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/sal.h>
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 6501d9a9a21b..016683b743c2 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -50,7 +50,6 @@
#include <asm/mca.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/sal.h>
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 3776ef225125..0750f367837d 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -19,9 +19,9 @@
#include <linux/nmi.h>
#include <linux/genalloc.h>
#include <linux/gfp.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
#include <asm/pal.h>
-#include <asm/pgtable.h>
#include <linux/atomic.h>
#include <asm/tlbflush.h>
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 6b5652ee76f9..d259690eb91a 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/pgtable.h>
#include <asm/cache.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/thread_info.h>
#define EMITS_PT_NOTE
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 8786fa5c7612..d7d31c718d2d 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -22,7 +22,6 @@
#include <asm/meminit.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/mca.h>
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 12242aa0dad1..3a4dec334cc5 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -15,7 +15,6 @@
#include <linux/prefetch.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/exception.h>
@@ -75,8 +74,8 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
- /* mmap_sem is performance critical.... */
- prefetchw(&mm->mmap_sem);
+ /* mmap_lock is performance critical.... */
+ prefetchw(&mm->mmap_lock);
/*
* If we're in an interrupt or have no user context, we must not take the fault..
@@ -87,7 +86,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
#ifdef CONFIG_VIRTUAL_MEM_MAP
/*
* If fault is in region 5 and we are in the kernel, we may already
- * have the mmap_sem (pfn_valid macro is called during mmap). There
+ * have the mmap_lock (pfn_valid macro is called during mmap). There
* is no vma for region 5 addr's anyway, so skip getting the semaphore
* and go directly to the exception handling code.
*/
@@ -107,7 +106,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (mask & VM_WRITE)
flags |= FAULT_FLAG_WRITE;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma_prev(mm, address, &prev_vma);
if (!vma && !prev_vma )
@@ -174,7 +173,7 @@ retry:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -183,7 +182,7 @@ retry:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
check_expansion:
@@ -214,7 +213,7 @@ retry:
goto good_area;
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
#ifdef CONFIG_VIRTUAL_MEM_MAP
bad_area_no_up:
#endif
@@ -280,7 +279,7 @@ retry:
return;
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ca760f6cb18f..0b3fb4c7af29 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -118,13 +118,13 @@ ia64_init_addr_space (void)
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
vm_area_free(vma);
return;
}
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
}
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
@@ -136,13 +136,13 @@ ia64_init_addr_space (void)
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
VM_DONTEXPAND | VM_DONTDUMP;
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
vm_area_free(vma);
return;
}
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
}
}
}
diff --git a/arch/m68k/68000/m68EZ328.c b/arch/m68k/68000/m68EZ328.c
index 6a309a3cfbfc..05f137dc257e 100644
--- a/arch/m68k/68000/m68EZ328.c
+++ b/arch/m68k/68000/m68EZ328.c
@@ -17,7 +17,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/rtc.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/machdep.h>
#include <asm/MC68EZ328.h>
#ifdef CONFIG_UCSIMM
diff --git a/arch/m68k/68000/m68VZ328.c b/arch/m68k/68000/m68VZ328.c
index 81b5491685a4..ada87b23afdc 100644
--- a/arch/m68k/68000/m68VZ328.c
+++ b/arch/m68k/68000/m68VZ328.c
@@ -22,8 +22,8 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/rtc.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/MC68VZ328.h>
#include <asm/bootstd.h>
diff --git a/arch/m68k/68000/timers.c b/arch/m68k/68000/timers.c
index 1c8e8a83c325..e8dfdd2556a5 100644
--- a/arch/m68k/68000/timers.c
+++ b/arch/m68k/68000/timers.c
@@ -22,7 +22,6 @@
#include <linux/clocksource.h>
#include <linux/rtc.h>
#include <asm/setup.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/MC68VZ328.h>
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 60ac1cd8b96f..bd2d29c22a10 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -28,7 +28,7 @@ config COLDFIRE
select CPU_HAS_NO_MULDIV64
select GENERIC_CSUM
select GPIOLIB
- select HAVE_CLK
+ select HAVE_LEGACY_CLK
endchoice
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index 5d9288384096..ce6db5e5a5a3 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -135,10 +135,10 @@ vmlinux.gz: vmlinux
ifndef CONFIG_KGDB
cp vmlinux vmlinux.tmp
$(STRIP) vmlinux.tmp
- gzip -9c vmlinux.tmp >vmlinux.gz
+ $(_GZIP) -9c vmlinux.tmp >vmlinux.gz
rm vmlinux.tmp
else
- gzip -9c vmlinux >vmlinux.gz
+ $(_GZIP) -9c vmlinux >vmlinux.gz
endif
bzImage: vmlinux.bz2
@@ -148,10 +148,10 @@ vmlinux.bz2: vmlinux
ifndef CONFIG_KGDB
cp vmlinux vmlinux.tmp
$(STRIP) vmlinux.tmp
- bzip2 -1c vmlinux.tmp >vmlinux.bz2
+ $(_BZIP2) -1c vmlinux.tmp >vmlinux.bz2
rm vmlinux.tmp
else
- bzip2 -1c vmlinux >vmlinux.bz2
+ $(_BZIP2) -1c vmlinux >vmlinux.bz2
endif
archclean:
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index 4eb911d64e8d..8f23b2fab64c 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -32,7 +32,6 @@
#include <asm/bootinfo-amiga.h>
#include <asm/byteorder.h>
#include <asm/setup.h>
-#include <asm/pgtable.h>
#include <asm/amigahw.h>
#include <asm/amigaints.h>
#include <asm/irq.h>
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index 7d168e6dfb01..762da5d7a415 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -13,7 +13,6 @@
#include <asm/bootinfo.h>
#include <asm/bootinfo-apollo.h>
#include <asm/byteorder.h>
-#include <asm/pgtable.h>
#include <asm/apollohw.h>
#include <asm/irq.h>
#include <asm/machdep.h>
diff --git a/arch/m68k/atari/atasound.c b/arch/m68k/atari/atasound.c
index 1c1181ebb947..a8724d998c39 100644
--- a/arch/m68k/atari/atasound.c
+++ b/arch/m68k/atari/atasound.c
@@ -26,7 +26,6 @@
#include <asm/atarihw.h>
#include <asm/irq.h>
-#include <asm/pgtable.h>
#include <asm/atariints.h>
diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c
index 6152f9f631d2..ce79b322a99c 100644
--- a/arch/m68k/atari/stram.c
+++ b/arch/m68k/atari/stram.c
@@ -26,7 +26,6 @@
#include <asm/setup.h>
#include <asm/machdep.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/atarihw.h>
#include <asm/atari_stram.h>
#include <asm/io.h>
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 8ebaabc931cd..50f4d01363df 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -31,7 +31,6 @@
#include <asm/bootinfo.h>
#include <asm/bootinfo-vme.h>
#include <asm/byteorder.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/traps.h>
diff --git a/arch/m68k/coldfire/pci.c b/arch/m68k/coldfire/pci.c
index 62b0eb6cf69a..84eab0f5e00a 100644
--- a/arch/m68k/coldfire/pci.c
+++ b/arch/m68k/coldfire/pci.c
@@ -216,8 +216,10 @@ static int __init mcf_pci_init(void)
/* Keep a virtual mapping to IO/config space active */
iospace = (unsigned long) ioremap(PCI_IO_PA, PCI_IO_SIZE);
- if (iospace == 0)
+ if (iospace == 0) {
+ pci_free_host_bridge(bridge);
return -ENODEV;
+ }
pr_info("Coldfire: PCI IO/config window mapped to 0x%x\n",
(u32) iospace);
diff --git a/arch/m68k/configs/stmark2_defconfig b/arch/m68k/configs/stmark2_defconfig
index 27fa9465d19d..2b746f55f419 100644
--- a/arch/m68k/configs/stmark2_defconfig
+++ b/arch/m68k/configs/stmark2_defconfig
@@ -48,7 +48,6 @@ CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_ROM=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PLATRAM=y
-CONFIG_MTD_M25P80=y
CONFIG_MTD_SPI_NOR=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
diff --git a/arch/m68k/include/asm/cacheflush_mm.h b/arch/m68k/include/asm/cacheflush_mm.h
index 1e2544ecaf88..1ac55e7b47f0 100644
--- a/arch/m68k/include/asm/cacheflush_mm.h
+++ b/arch/m68k/include/asm/cacheflush_mm.h
@@ -254,9 +254,11 @@ static inline void __flush_page_to_ram(void *vaddr)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
-extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len);
extern void flush_icache_range(unsigned long address, unsigned long endaddr);
+extern void flush_icache_user_range(unsigned long address,
+ unsigned long endaddr);
static inline void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr,
@@ -264,7 +266,7 @@ static inline void copy_to_user_page(struct vm_area_struct *vma,
{
flush_cache_page(vma, vaddr, page_to_pfn(page));
memcpy(dst, src, len);
- flush_icache_user_range(vma, page, vaddr, len);
+ flush_icache_user_page(vma, page, vaddr, len);
}
static inline void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr,
diff --git a/arch/m68k/include/asm/cacheflush_no.h b/arch/m68k/include/asm/cacheflush_no.h
index 11e9a9dcbfb2..2731f07e7be8 100644
--- a/arch/m68k/include/asm/cacheflush_no.h
+++ b/arch/m68k/include/asm/cacheflush_no.h
@@ -9,25 +9,8 @@
#include <asm/mcfsim.h>
#define flush_cache_all() __flush_cache_all()
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_range(start, len) __flush_dcache_all()
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, len) __flush_icache_all()
-#define flush_icache_page(vma,pg) do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
void mcf_cache_push(void);
@@ -98,4 +81,6 @@ static inline void cache_clear(unsigned long paddr, int len)
__clear_cache_all();
}
+#include <asm-generic/cacheflush.h>
+
#endif /* _M68KNOMMU_CACHEFLUSH_H */
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
index 0031cd387b75..8d4ec05996c5 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -170,7 +170,7 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
}
#define __pte_page(pte) ((unsigned long) (pte_val(pte) & PAGE_MASK))
-#define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd)))
+#define pmd_page_vaddr(pmd) ((unsigned long) (pmd_val(pmd)))
static inline int pte_none(pte_t pte)
{
@@ -311,64 +311,6 @@ static inline pte_t pte_mkcache(pte_t pte)
extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
/*
- * Find an entry in a pagetable directory.
- */
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/*
- * Find an entry in a kernel pagetable directory.
- */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/*
- * Find an entry in the third-level pagetable.
- */
-#define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) __pmd_page(*(dir)) + __pte_offset(address))
-
-/*
- * Disable caching for page at given kernel virtual address.
- */
-static inline void nocache_page(void *vaddr)
-{
- pgd_t *dir;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
- unsigned long addr = (unsigned long) vaddr;
-
- dir = pgd_offset_k(addr);
- p4dp = p4d_offset(dir, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset_kernel(pmdp, addr);
- *ptep = pte_mknocache(*ptep);
-}
-
-/*
- * Enable caching for page at given kernel virtual address.
- */
-static inline void cache_page(void *vaddr)
-{
- pgd_t *dir;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
- unsigned long addr = (unsigned long) vaddr;
-
- dir = pgd_offset_k(addr);
- p4dp = p4d_offset(dir, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset_kernel(pmdp, addr);
- *ptep = pte_mkcache(*ptep);
-}
-
-/*
* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e))
*/
#define __swp_type(x) ((x).val & 0xFF)
@@ -380,9 +322,6 @@ static inline void cache_page(void *vaddr)
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-#define pte_offset_map(pmdp, addr) ((pte_t *)__pmd_page(*pmdp) + \
- __pte_offset(addr))
-#define pte_unmap(pte) ((void) 0)
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index c66e42917912..b4fc3b4f6bb3 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -18,6 +18,12 @@ extern void init_pointer_table(void *table, int type);
extern void *get_pointer_table(int type);
extern int free_pointer_table(void *table, int type);
+/*
+ * Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any.
+ */
+
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
return get_pointer_table(TABLE_PTE);
@@ -82,7 +88,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
{
pmd_set(pmd, page);
}
-#define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd))
+#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 48f19f0ab1e7..8076467eff4b 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -128,7 +128,7 @@ static inline void pud_set(pud_t *pudp, pmd_t *pmdp)
}
#define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK))
-#define __pmd_page(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
+#define pmd_page_vaddr(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
#define pud_page_vaddr(pud) ((unsigned long)__va(pud_val(pud) & _TABLE_MASK))
@@ -192,91 +192,9 @@ static inline pte_t pte_mkcache(pte_t pte)
return pte;
}
-#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
-
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-
-/* to find an entry in a page-table-directory */
-static inline pgd_t *pgd_offset(const struct mm_struct *mm,
- unsigned long address)
-{
- return mm->pgd + pgd_index(address);
-}
-
#define swapper_pg_dir kernel_pg_dir
extern pgd_t kernel_pg_dir[128];
-static inline pgd_t *pgd_offset_k(unsigned long address)
-{
- return kernel_pg_dir + (address >> PGDIR_SHIFT);
-}
-
-
-/* Find an entry in the second-level page table.. */
-static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address)
-{
- return (pmd_t *)pud_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
-}
-
-/* Find an entry in the third-level page table.. */
-static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
-{
- return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
-}
-
-#define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-#define pte_unmap(pte) ((void)0)
-
-/*
- * Allocate and free page tables. The xxx_kernel() versions are
- * used to allocate a kernel page table - this turns on ASN bits
- * if any.
- */
-
-/* Prior to calling these routines, the page should have been flushed
- * from both the cache and ATC, or the CPU might not notice that the
- * cache setting for the page has been changed. -jskov
- */
-static inline void nocache_page(void *vaddr)
-{
- unsigned long addr = (unsigned long)vaddr;
-
- if (CPU_IS_040_OR_060) {
- pgd_t *dir;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
-
- dir = pgd_offset_k(addr);
- p4dp = p4d_offset(dir, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset_kernel(pmdp, addr);
- *ptep = pte_mknocache(*ptep);
- }
-}
-
-static inline void cache_page(void *vaddr)
-{
- unsigned long addr = (unsigned long)vaddr;
-
- if (CPU_IS_040_OR_060) {
- pgd_t *dir;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
-
- dir = pgd_offset_k(addr);
- p4dp = p4d_offset(dir, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset_kernel(pmdp, addr);
- *ptep = pte_mkcache(*ptep);
- }
-}
-
/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
#define __swp_type(x) (((x).val >> 4) & 0xff)
#define __swp_offset(x) ((x).val >> 12)
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index f0e5167de834..aca22c2c1ee2 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -176,7 +176,6 @@ pgprot_t pgprot_dmacoherent(pgprot_t prot);
#define pgprot_dmacoherent(prot) pgprot_dmacoherent(prot)
#endif /* CONFIG_COLDFIRE */
-#include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */
#endif /* _M68K_PGTABLE_H */
diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h
index ccc4568299e5..87151d67d91e 100644
--- a/arch/m68k/include/asm/pgtable_no.h
+++ b/arch/m68k/include/asm/pgtable_no.h
@@ -53,6 +53,4 @@ extern void paging_init(void);
#define KMAP_START 0
#define KMAP_END 0xffffffff
-#include <asm-generic/pgtable.h>
-
#endif /* _M68KNOMMU_PGTABLE_H */
diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h
index 0caa18a08437..5b24283a0a42 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -112,8 +112,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __pte_page(pte) \
((unsigned long) __va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
-#define __pmd_page(pmd) \
-((unsigned long) __va (pmd_val (pmd) & PAGE_MASK))
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK);
+}
static inline int pte_none (pte_t pte) { return !pte_val (pte); }
static inline int pte_present (pte_t pte) { return pte_val (pte) & SUN3_PAGE_VALID; }
@@ -127,7 +130,7 @@ static inline void pte_clear (struct mm_struct *mm, unsigned long addr, pte_t *p
({ pte_t __pte; pte_val(__pte) = pfn | pgprot_val(pgprot); __pte; })
#define pte_page(pte) virt_to_page(__pte_page(pte))
-#define pmd_page(pmd) virt_to_page(__pmd_page(pmd))
+#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }
@@ -171,21 +174,6 @@ static inline pte_t pte_mkcache(pte_t pte) { return pte; }
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
-/* Find an entry in a pagetable directory. */
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-
-#define pgd_offset(mm, address) \
-((mm)->pgd + pgd_index(address))
-
-/* Find an entry in a kernel pagetable directory. */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* Find an entry in the third-level pagetable. */
-#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
-#define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address))
-#define pte_offset_map(pmd, address) ((pte_t *)page_address(pmd_page(*pmd)) + pte_index(address))
-#define pte_unmap(pte) do { } while (0)
-
/* Macros to (de)construct the fake PTEs representing swap pages. */
#define __swp_type(x) ((x).val & 0x7F)
#define __swp_offset(x) (((x).val) >> 7)
diff --git a/arch/m68k/include/asm/sun3xflop.h b/arch/m68k/include/asm/sun3xflop.h
index ef04c43acd13..93f2a8431c0e 100644
--- a/arch/m68k/include/asm/sun3xflop.h
+++ b/arch/m68k/include/asm/sun3xflop.h
@@ -10,8 +10,8 @@
#ifndef __ASM_SUN3X_FLOPPY_H
#define __ASM_SUN3X_FLOPPY_H
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/sun3x.h>
diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h
index a24cfe4a0d32..dcfb69361408 100644
--- a/arch/m68k/include/asm/uaccess_no.h
+++ b/arch/m68k/include/asm/uaccess_no.h
@@ -42,7 +42,7 @@ static inline int _access_ok(unsigned long addr, unsigned long size)
__put_user_asm(__pu_err, __pu_val, ptr, l); \
break; \
case 8: \
- memcpy(ptr, &__pu_val, sizeof (*(ptr))); \
+ memcpy((void __force *)ptr, &__pu_val, sizeof(*(ptr))); \
break; \
default: \
__pu_err = __put_user_bad(); \
@@ -60,7 +60,7 @@ extern int __put_user_bad(void);
* aliasing issues.
*/
-#define __ptr(x) ((unsigned long *)(x))
+#define __ptr(x) ((unsigned long __user *)(x))
#define __put_user_asm(err,x,ptr,bwl) \
__asm__ ("move" #bwl " %0,%1" \
@@ -85,7 +85,7 @@ extern int __put_user_bad(void);
u64 l; \
__typeof__(*(ptr)) t; \
} __gu_val; \
- memcpy(&__gu_val.l, ptr, sizeof(__gu_val.l)); \
+ memcpy(&__gu_val.l, (const void __force *)ptr, sizeof(__gu_val.l)); \
(x) = __gu_val.t; \
break; \
} \
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index a54788458ca3..29de2b3108ea 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -255,6 +255,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/bootinfo.h>
#include <asm/bootinfo-amiga.h>
#include <asm/bootinfo-atari.h>
@@ -264,7 +265,6 @@
#include <asm/bootinfo-vme.h>
#include <asm/setup.h>
#include <asm/entry.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
#ifdef CONFIG_MAC
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 8f0d9140700f..90ae376b7ab1 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -36,7 +36,6 @@
#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/setup.h>
-#include <asm/pgtable.h>
asmlinkage void ret_from_fork(void);
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index 748c63bd0081..94b3b274186d 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -23,7 +23,6 @@
#include <linux/uaccess.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
/*
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index a63483de7a42..e779b19e0193 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -38,7 +38,6 @@
#include <asm/bootinfo.h>
#include <asm/irq.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
unsigned long memory_start;
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 05610e6924c1..b3ff39588f36 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -47,7 +47,6 @@
#include <asm/setup.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/ucontext.h>
#include <asm/cacheflush.h>
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 18a4de7d5934..1c235d8f53f3 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -399,7 +399,7 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
* Verify that the specified address region actually belongs
* to this process.
*/
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm, addr);
if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
goto out_unlock;
@@ -450,7 +450,7 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
}
}
out_unlock:
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
out:
return ret;
}
@@ -472,7 +472,7 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
spinlock_t *ptl;
unsigned long mem_value;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
pgd = pgd_offset(mm, (unsigned long)mem);
if (!pgd_present(*pgd))
goto bad_access;
@@ -501,11 +501,11 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
__put_user(newval, mem);
pte_unmap_unlock(pte, ptl);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return mem_value;
bad_access:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* This is not necessarily a bad access, we can get here if
a memory we're trying to write to should be copied-on-write.
Make the kernel do the necessary page stuff, then re-iterate.
@@ -545,13 +545,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
struct mm_struct *mm = current->mm;
unsigned long mem_value;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
mem_value = *mem;
if (mem_value == oldval)
*mem = newval;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return mem_value;
}
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 344f93d36a9a..df6fc782754f 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -811,13 +811,13 @@ asmlinkage void buserr_c(struct frame *fp)
static int kstack_depth_to_print = 48;
-void show_trace(unsigned long *stack)
+static void show_trace(unsigned long *stack, const char *loglvl)
{
unsigned long *endstack;
unsigned long addr;
int i;
- pr_info("Call Trace:");
+ printk("%sCall Trace:", loglvl);
addr = (unsigned long)stack + THREAD_SIZE - 1;
endstack = (unsigned long *)(addr & -THREAD_SIZE);
i = 0;
@@ -916,7 +916,7 @@ void show_registers(struct pt_regs *regs)
default:
pr_cont("\n");
}
- show_stack(NULL, (unsigned long *)addr);
+ show_stack(NULL, (unsigned long *)addr, KERN_INFO);
pr_info("Code:");
set_fs(KERNEL_DS);
@@ -935,7 +935,8 @@ void show_registers(struct pt_regs *regs)
pr_cont("\n");
}
-void show_stack(struct task_struct *task, unsigned long *stack)
+void show_stack(struct task_struct *task, unsigned long *stack,
+ const char *loglvl)
{
unsigned long *p;
unsigned long *endstack;
@@ -949,7 +950,7 @@ void show_stack(struct task_struct *task, unsigned long *stack)
}
endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
- pr_info("Stack from %08lx:", (unsigned long)stack);
+ printk("%sStack from %08lx:", loglvl, (unsigned long)stack);
p = stack;
for (i = 0; i < kstack_depth_to_print; i++) {
if (p + 1 > endstack)
@@ -959,7 +960,7 @@ void show_stack(struct task_struct *task, unsigned long *stack)
pr_cont(" %08lx", *p++);
}
pr_cont("\n");
- show_trace(stack);
+ show_trace(stack, loglvl);
}
/*
diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c
index 1b4c562753da..928dbd33fc4a 100644
--- a/arch/m68k/kernel/uboot.c
+++ b/arch/m68k/kernel/uboot.c
@@ -26,7 +26,6 @@
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
/*
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index d0126ab01360..5c9f3a2d6538 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -36,7 +36,6 @@
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/macintosh.h>
diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c
index 079e64898e6a..5ecb3310e874 100644
--- a/arch/m68k/mm/cache.c
+++ b/arch/m68k/mm/cache.c
@@ -73,7 +73,7 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
/* Push n pages at kernel virtual address and clear the icache */
/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
-void flush_icache_range(unsigned long address, unsigned long endaddr)
+void flush_icache_user_range(unsigned long address, unsigned long endaddr)
{
if (CPU_IS_COLDFIRE) {
unsigned long start, end;
@@ -104,9 +104,18 @@ void flush_icache_range(unsigned long address, unsigned long endaddr)
: "di" (FLUSH_I));
}
}
+
+void flush_icache_range(unsigned long address, unsigned long endaddr)
+{
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+ flush_icache_user_range(address, endaddr);
+ set_fs(old_fs);
+}
EXPORT_SYMBOL(flush_icache_range);
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
if (CPU_IS_COLDFIRE) {
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 3bfb5c8ac3c7..a94a814ad6ad 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -86,7 +86,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
@@ -165,7 +165,7 @@ good_area:
flags |= FAULT_FLAG_TRIED;
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -174,7 +174,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return 0;
/*
@@ -182,7 +182,7 @@ good_area:
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
@@ -211,6 +211,6 @@ acc_err:
current->thread.faddr = address;
send_sig:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return send_fault_sig(regs);
}
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 6d3147662ff2..53040857a9ed 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -141,7 +141,7 @@ static inline void init_pointer_tables(void)
if (!pmd_present(*pmd))
continue;
- pte_dir = (pte_t *)__pmd_page(*pmd);
+ pte_dir = (pte_t *)pmd_page_vaddr(*pmd);
init_pointer_table(pte_dir, TABLE_PTE);
}
}
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 80064e6d064f..29f47923aa46 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -17,7 +17,6 @@
#include <asm/setup.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/mcf_pgalloc.h>
#include <asm/tlbflush.h>
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 904c2a663977..2bb006bdc31c 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -45,6 +45,31 @@ unsigned long mm_cachebits;
EXPORT_SYMBOL(mm_cachebits);
#endif
+/* Prior to calling these routines, the page should have been flushed
+ * from both the cache and ATC, or the CPU might not notice that the
+ * cache setting for the page has been changed. -jskov
+ */
+static inline void nocache_page(void *vaddr)
+{
+ unsigned long addr = (unsigned long)vaddr;
+
+ if (CPU_IS_040_OR_060) {
+ pte_t *ptep = virt_to_kpte(addr);
+
+ *ptep = pte_mknocache(*ptep);
+ }
+}
+
+static inline void cache_page(void *vaddr)
+{
+ unsigned long addr = (unsigned long)vaddr;
+
+ if (CPU_IS_040_OR_060) {
+ pte_t *ptep = virt_to_kpte(addr);
+
+ *ptep = pte_mkcache(*ptep);
+ }
+}
/*
* Motorola 680x0 user's manual recommends using uncached memory for address
diff --git a/arch/m68k/mm/sun3kmap.c b/arch/m68k/mm/sun3kmap.c
index ae03555449b8..4f2a7ef8348b 100644
--- a/arch/m68k/mm/sun3kmap.c
+++ b/arch/m68k/mm/sun3kmap.c
@@ -15,7 +15,6 @@
#include <linux/vmalloc.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/sun3mmu.h>
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index 5d8d956d9329..dad494224497 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -21,7 +21,6 @@
#include <asm/setup.h>
#include <linux/uaccess.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/io.h>
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index 545a1fe0e119..490700aa2212 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -29,7 +29,6 @@
#include <asm/bootinfo.h>
#include <asm/bootinfo-vme.h>
#include <asm/byteorder.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/traps.h>
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 9bc2da69f80c..5b86d10e0f84 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -32,7 +32,6 @@
#include <asm/bootinfo.h>
#include <asm/bootinfo-vme.h>
#include <asm/byteorder.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/traps.h>
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index f31890078197..4627de3c0603 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -29,7 +29,6 @@
#include <asm/io.h>
#include <asm/bootinfo.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/traps.h>
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 229ea37dfe1b..7204c0ea0dc7 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -22,7 +22,6 @@
#include <asm/setup.h>
#include <asm/contregs.h>
#include <asm/movs.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/sun3-head.h>
#include <asm/sun3mmu.h>
diff --git a/arch/m68k/sun3/dvma.c b/arch/m68k/sun3/dvma.c
index a2c1c9304895..f15ff16b9997 100644
--- a/arch/m68k/sun3/dvma.c
+++ b/arch/m68k/sun3/dvma.c
@@ -14,7 +14,6 @@
#include <linux/memblock.h>
#include <linux/list.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sun3mmu.h>
#include <asm/dvma.h>
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index 582a1284059a..7aa879b7c7ff 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -22,7 +22,6 @@
#include <asm/traps.h>
#include <linux/uaccess.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sun3mmu.h>
#include <asm/segment.h>
#include <asm/oplib.h>
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
index 399f3d06125f..4b560f4d3960 100644
--- a/arch/m68k/sun3/sun3dvma.c
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -16,7 +16,6 @@
#include <linux/list.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/dvma.h>
#undef DVMA_DEBUG
diff --git a/arch/m68k/sun3x/dvma.c b/arch/m68k/sun3x/dvma.c
index c4b8aa1d80f4..fef52d222d46 100644
--- a/arch/m68k/sun3x/dvma.c
+++ b/arch/m68k/sun3x/dvma.c
@@ -22,7 +22,6 @@
#include <asm/dvma.h>
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
/* IOMMU support */
diff --git a/arch/m68k/sun3x/prom.c b/arch/m68k/sun3x/prom.c
index be14c899ab7d..74d2fe57524b 100644
--- a/arch/m68k/sun3x/prom.c
+++ b/arch/m68k/sun3x/prom.c
@@ -10,7 +10,6 @@
#include <linux/string.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/traps.h>
#include <asm/sun3xprom.h>
diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h
index 11f56c85056b..39f8fb6768d8 100644
--- a/arch/microblaze/include/asm/cacheflush.h
+++ b/arch/microblaze/include/asm/cacheflush.h
@@ -57,9 +57,6 @@ void microblaze_cache_init(void);
#define invalidate_icache() mbc->iin();
#define invalidate_icache_range(start, end) mbc->iinr(start, end);
-#define flush_icache_user_range(vma, pg, adr, len) flush_icache();
-#define flush_icache_page(vma, pg) do { } while (0)
-
#define enable_dcache() mbc->de();
#define disable_dcache() mbc->dd();
/* FIXME for LL-temac driver */
@@ -77,27 +74,9 @@ do { \
flush_dcache_range((unsigned) (addr), (unsigned) (addr) + PAGE_SIZE); \
} while (0);
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-
#define flush_cache_page(vma, vmaddr, pfn) \
flush_dcache_range(pfn << PAGE_SHIFT, (pfn << PAGE_SHIFT) + PAGE_SIZE);
-/* MS: kgdb code use this macro, wrong len with FLASH */
-#if 0
-#define flush_cache_range(vma, start, len) { \
- flush_icache_range((unsigned) (start), (unsigned) (start) + (len)); \
- flush_dcache_range((unsigned) (start), (unsigned) (start) + (len)); \
-}
-#endif
-
-#define flush_cache_range(vma, start, len) do { } while (0)
-
static inline void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr,
void *dst, void *src, int len)
@@ -109,12 +88,8 @@ static inline void copy_to_user_page(struct vm_area_struct *vma,
flush_dcache_range(addr, addr + PAGE_SIZE);
}
}
+#define copy_to_user_page copy_to_user_page
-static inline void copy_from_user_page(struct vm_area_struct *vma,
- struct page *page, unsigned long vaddr,
- void *dst, void *src, int len)
-{
- memcpy(dst, src, len);
-}
+#include <asm-generic/cacheflush.h>
#endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 1d7a91252d03..ebb6b7939bb8 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -12,11 +12,11 @@
#include <linux/kernel.h> /* For min/max macros */
#include <linux/highmem.h>
+#include <linux/pgtable.h>
#include <asm/setup.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/cache.h>
-#include <asm/pgtable.h>
#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
#include <asm-generic/pgalloc.h>
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 6b056f6545d8..3fa1df90925e 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -21,7 +21,6 @@ extern int mem_init_done;
#define pgd_bad(pgd) (0)
#define pgd_clear(pgdp)
#define kern_addr_valid(addr) (1)
-#define pmd_offset(a, b) ((void *) 0)
#define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */
#define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */
@@ -438,27 +437,15 @@ static inline void ptep_mkdirty(struct mm_struct *mm,
/* Convert pmd entry to page */
/* our pmd entry is an effective address of pte table*/
/* returns effective address of the pmd entry*/
-#define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return ((unsigned long) (pmd_val(pmd) & PAGE_MASK));
+}
/* returns struct *page of the pmd entry*/
#define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* to find an entry in a page-table-directory */
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
/* Find an entry in the third-level page table.. */
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, addr) \
- ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
-#define pte_offset_map(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
-
-#define pte_unmap(pte) kunmap_atomic(pte)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
@@ -507,8 +494,6 @@ void __init *early_get_page(void);
#endif /* CONFIG_MMU */
#ifndef __ASSEMBLY__
-#include <asm-generic/pgtable.h>
-
extern unsigned long ioremap_bot, ioremap_base;
void setup_memory(void);
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 070ba6139a62..6723c56ec378 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -12,7 +12,7 @@
#include <asm/mmu.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/extable.h>
#include <linux/string.h>
diff --git a/arch/microblaze/include/asm/unwind.h b/arch/microblaze/include/asm/unwind.h
index c327d673622a..3db81777a887 100644
--- a/arch/microblaze/include/asm/unwind.h
+++ b/arch/microblaze/include/asm/unwind.h
@@ -20,7 +20,8 @@ extern struct trap_handler_info microblaze_trap_handlers;
extern const char _hw_exception_handler;
extern const char ex_handler_unhandled;
-void microblaze_unwind(struct task_struct *task, struct stack_trace *trace);
+void microblaze_unwind(struct task_struct *task, struct stack_trace *trace,
+ const char *loglvl);
#endif /* __MICROBLAZE_UNWIND_H */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 95558f32d60a..54411de22fa6 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -68,9 +68,9 @@
#include <asm/entry.h>
#include <asm/current.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/signal.h>
#include <asm/registers.h>
#include <asm/asm-offsets.h>
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index d9a2014a222f..9f12e3c2bb42 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -11,8 +11,8 @@
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index dd121e33b8e3..2310daff1f8a 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -18,6 +18,7 @@
#include <linux/console.h>
#include <linux/debugfs.h>
#include <linux/of_fdt.h>
+#include <linux/pgtable.h>
#include <asm/setup.h>
#include <asm/sections.h>
@@ -33,7 +34,6 @@
#include <asm/entry.h>
#include <asm/cpuinfo.h>
-#include <asm/pgtable.h>
DEFINE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
DEFINE_PER_CPU(unsigned int, KM); /* Kernel/user mode */
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index c9125c328949..bdd6d0c86e16 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -35,7 +35,6 @@
#include <asm/entry.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <linux/syscalls.h>
#include <asm/cacheflush.h>
@@ -160,9 +159,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
int err = 0, sig = ksig->sig;
unsigned long address = 0;
#ifdef CONFIG_MMU
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
#endif
@@ -198,10 +194,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
address = ((unsigned long)frame->tramp);
#ifdef CONFIG_MMU
- pgdp = pgd_offset(current->mm, address);
- p4dp = p4d_offset(pgdp, address);
- pudp = pud_offset(p4dp, address);
- pmdp = pmd_offset(pudp, address);
+ pmdp = pmd_off(current->mm, address);
preempt_disable();
ptep = pte_offset_map(pmdp, address);
diff --git a/arch/microblaze/kernel/stacktrace.c b/arch/microblaze/kernel/stacktrace.c
index b4debe283a79..b266c4d6ed9d 100644
--- a/arch/microblaze/kernel/stacktrace.c
+++ b/arch/microblaze/kernel/stacktrace.c
@@ -20,12 +20,12 @@ void save_stack_trace(struct stack_trace *trace)
{
/* Exclude our helper functions from the trace*/
trace->skip += 2;
- microblaze_unwind(NULL, trace);
+ microblaze_unwind(NULL, trace, "");
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
- microblaze_unwind(tsk, trace);
+ microblaze_unwind(tsk, trace, "");
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index 45bbba9d919f..94b6fe93147d 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -31,7 +31,7 @@ static int __init kstack_setup(char *s)
}
__setup("kstack=", kstack_setup);
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
unsigned long words_to_show;
u32 fp = (u32) sp;
@@ -50,7 +50,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
if (kstack_depth_to_print && (words_to_show > kstack_depth_to_print))
words_to_show = kstack_depth_to_print;
- pr_info("Kernel Stack:\n");
+ printk("%sKernel Stack:\n", loglvl);
/*
* Make the first line an 'odd' size if necessary to get
@@ -65,11 +65,11 @@ void show_stack(struct task_struct *task, unsigned long *sp)
words_to_show -= line1_words;
}
}
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp,
+ print_hex_dump(loglvl, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp,
words_to_show << 2, 0);
- pr_info("\n\nCall Trace:\n");
- microblaze_unwind(task, NULL);
- pr_info("\n");
+ printk("%s\n\nCall Trace:\n", loglvl);
+ microblaze_unwind(task, NULL, loglvl);
+ printk("%s\n", loglvl);
if (!task)
task = current;
diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c
index 34c270cb11fc..778a761af0a7 100644
--- a/arch/microblaze/kernel/unwind.c
+++ b/arch/microblaze/kernel/unwind.c
@@ -154,7 +154,8 @@ static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
static void microblaze_unwind_inner(struct task_struct *task,
unsigned long pc, unsigned long fp,
unsigned long leaf_return,
- struct stack_trace *trace);
+ struct stack_trace *trace,
+ const char *loglvl);
/**
* unwind_trap - Unwind through a system trap, that stored previous state
@@ -162,16 +163,18 @@ static void microblaze_unwind_inner(struct task_struct *task,
*/
#ifdef CONFIG_MMU
static inline void unwind_trap(struct task_struct *task, unsigned long pc,
- unsigned long fp, struct stack_trace *trace)
+ unsigned long fp, struct stack_trace *trace,
+ const char *loglvl)
{
/* To be implemented */
}
#else
static inline void unwind_trap(struct task_struct *task, unsigned long pc,
- unsigned long fp, struct stack_trace *trace)
+ unsigned long fp, struct stack_trace *trace,
+ const char *loglvl)
{
const struct pt_regs *regs = (const struct pt_regs *) fp;
- microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace);
+ microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace, loglvl);
}
#endif
@@ -184,11 +187,13 @@ static inline void unwind_trap(struct task_struct *task, unsigned long pc,
* the caller's return address.
* @trace : Where to store stack backtrace (PC values).
* NULL == print backtrace to kernel log
+ * @loglvl : Used for printk log level if (trace == NULL).
*/
static void microblaze_unwind_inner(struct task_struct *task,
unsigned long pc, unsigned long fp,
unsigned long leaf_return,
- struct stack_trace *trace)
+ struct stack_trace *trace,
+ const char *loglvl)
{
int ofs = 0;
@@ -214,11 +219,11 @@ static void microblaze_unwind_inner(struct task_struct *task,
const struct pt_regs *regs =
(const struct pt_regs *) fp;
#endif
- pr_info("HW EXCEPTION\n");
+ printk("%sHW EXCEPTION\n", loglvl);
#ifndef CONFIG_MMU
microblaze_unwind_inner(task, regs->r17 - 4,
fp + EX_HANDLER_STACK_SIZ,
- regs->r15, trace);
+ regs->r15, trace, loglvl);
#endif
return;
}
@@ -228,8 +233,8 @@ static void microblaze_unwind_inner(struct task_struct *task,
if ((return_to >= handler->start_addr)
&& (return_to <= handler->end_addr)) {
if (!trace)
- pr_info("%s\n", handler->trap_name);
- unwind_trap(task, pc, fp, trace);
+ printk("%s%s\n", loglvl, handler->trap_name);
+ unwind_trap(task, pc, fp, trace, loglvl);
return;
}
}
@@ -248,13 +253,13 @@ static void microblaze_unwind_inner(struct task_struct *task,
} else {
/* Have we reached userland? */
if (unlikely(pc == task_pt_regs(task)->pc)) {
- pr_info("[<%p>] PID %lu [%s]\n",
- (void *) pc,
+ printk("%s[<%p>] PID %lu [%s]\n",
+ loglvl, (void *) pc,
(unsigned long) task->pid,
task->comm);
break;
} else
- print_ip_sym(pc);
+ print_ip_sym(loglvl, pc);
}
/* Stop when we reach anything not part of the kernel */
@@ -282,14 +287,16 @@ static void microblaze_unwind_inner(struct task_struct *task,
* @task : Task whose stack we are to unwind (NULL == current)
* @trace : Where to store stack backtrace (PC values).
* NULL == print backtrace to kernel log
+ * @loglvl : Used for printk log level if (trace == NULL).
*/
-void microblaze_unwind(struct task_struct *task, struct stack_trace *trace)
+void microblaze_unwind(struct task_struct *task, struct stack_trace *trace,
+ const char *loglvl)
{
if (task) {
if (task == current) {
const struct pt_regs *regs = task_pt_regs(task);
microblaze_unwind_inner(task, regs->pc, regs->r1,
- regs->r15, trace);
+ regs->r15, trace, loglvl);
} else {
struct thread_info *thread_info =
(struct thread_info *)(task->stack);
@@ -299,7 +306,8 @@ void microblaze_unwind(struct task_struct *task, struct stack_trace *trace)
microblaze_unwind_inner(task,
(unsigned long) &_switch_to,
cpu_context->r1,
- cpu_context->r15, trace);
+ cpu_context->r15,
+ trace, loglvl);
}
} else {
unsigned long pc, fp;
@@ -314,7 +322,7 @@ void microblaze_unwind(struct task_struct *task, struct stack_trace *trace)
);
/* Since we are not a leaf function, use leaf_return = 0 */
- microblaze_unwind_inner(current, pc, fp, 0, trace);
+ microblaze_unwind_inner(current, pc, fp, 0, trace, loglvl);
}
}
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index 3248141f8ed5..a2bfe587b491 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -30,7 +30,6 @@
#include <linux/interrupt.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <linux/mmu_context.h>
#include <linux/uaccess.h>
@@ -125,7 +124,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
- * erroneous fault occurring in a code path which already holds mmap_sem
+ * erroneous fault occurring in a code path which already holds mmap_lock
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
@@ -137,12 +136,12 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mmap_read_trylock(mm))) {
if (kernel_mode(regs) && !search_exception_tables(regs->pc))
goto bad_area_nosemaphore;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
}
vma = find_vma(mm, address);
@@ -239,7 +238,7 @@ good_area:
flags |= FAULT_FLAG_TRIED;
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -248,7 +247,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* keep track of tlb+htab misses that are good addrs but
@@ -259,7 +258,7 @@ good_area:
return;
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
pte_errors++;
@@ -278,7 +277,7 @@ bad_area_nosemaphore:
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
bad_page_fault(regs, address, SIGKILL);
else
@@ -286,7 +285,7 @@ out_of_memory:
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (user_mode(regs)) {
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
return;
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index d943f69784b1..521b59ba716c 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -50,15 +50,6 @@ unsigned long lowmem_size;
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
-static inline pte_t *virt_to_kpte(unsigned long vaddr)
-{
- pgd_t *pgd = pgd_offset_k(vaddr);
- p4d_t *p4d = p4d_offset(pgd, vaddr);
- pud_t *pud = pud_offset(p4d, vaddr);
-
- return pte_offset_kernel(pmd_offset(pud, vaddr), vaddr);
-}
-
static void __init highmem_init(void)
{
pr_debug("%x\n", (u32)PKMAP_BASE);
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 68c26cacd930..38ccb909bc9d 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -32,8 +32,8 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <linux/io.h>
#include <asm/mmu.h>
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 9dc08ee3d6b9..26c63e8161f0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -184,7 +184,7 @@ config AR7
select SYS_SUPPORTS_ZBOOT_UART16550
select GPIOLIB
select VLYNQ
- select HAVE_CLK
+ select HAVE_LEGACY_CLK
help
Support for the Texas Instruments AR7 System-on-a-Chip
family: TNETD7100, 7200 and 7300.
@@ -212,9 +212,7 @@ config ATH79
select DMA_NONCOHERENT
select GPIOLIB
select PINCTRL
- select HAVE_CLK
select COMMON_CLK
- select CLKDEV_LOOKUP
select IRQ_MIPS_CPU
select SYS_HAS_CPU_MIPS32_R2
select SYS_HAS_EARLY_PRINTK
@@ -301,9 +299,9 @@ config BCM63XX
select SYS_HAS_EARLY_PRINTK
select SWAP_IO_SPACE
select GPIOLIB
- select HAVE_CLK
select MIPS_L1_CACHE_SHIFT_4
select CLKDEV_LOOKUP
+ select HAVE_LEGACY_CLK
help
Support for BCM63XX based boards
@@ -424,6 +422,7 @@ config LANTIQ
select SWAP_IO_SPACE
select BOOT_RAW
select CLKDEV_LOOKUP
+ select HAVE_LEGACY_CLK
select USE_OF
select PINCTRL
select PINCTRL_LANTIQ
@@ -1404,6 +1403,7 @@ config CPU_LOONGSON64
select MIPS_L1_CACHE_SHIFT_6
select GPIOLIB
select SWIOTLB
+ select HAVE_KVM
help
The Loongson GSx64(GS264/GS464/GS464E/GS464V) series of processor
cores implements the MIPS64R2 instruction set with many extensions,
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index 89fa6e62a3b3..da0712ad85f5 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -22,7 +22,6 @@
#include <asm/sgialib.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/bootinfo.h>
#undef DEBUG
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index caecbae4b599..724dfddcab92 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -682,6 +682,9 @@
#ifndef cpu_guest_has_htw
#define cpu_guest_has_htw (cpu_data[0].guest.options & MIPS_CPU_HTW)
#endif
+#ifndef cpu_guest_has_ldpte
+#define cpu_guest_has_ldpte (cpu_data[0].guest.options & MIPS_CPU_LDPTE)
+#endif
#ifndef cpu_guest_has_mvh
#define cpu_guest_has_mvh (cpu_data[0].guest.options & MIPS_CPU_MVH)
#endif
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h
index 1784d4348c36..743535be7528 100644
--- a/arch/mips/include/asm/fixmap.h
+++ b/arch/mips/include/asm/fixmap.h
@@ -69,9 +69,6 @@ enum fixed_addresses {
#include <asm-generic/fixmap.h>
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)), (vaddr))
-
/*
* Called from pgtable_init()
*/
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index e28b5a946e26..363e7a89d173 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -23,6 +23,8 @@
#include <asm/inst.h>
#include <asm/mipsregs.h>
+#include <kvm/iodev.h>
+
/* MIPS KVM register ids */
#define MIPS_CP0_32(_R, _S) \
(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
@@ -66,9 +68,11 @@
#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
+#define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
#define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
+#define KVM_REG_MIPS_CP0_DIAG MIPS_CP0_32(22, 0)
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
#define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
@@ -78,8 +82,8 @@
#define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
-#define KVM_MAX_VCPUS 8
-#define KVM_USER_MEM_SLOTS 8
+#define KVM_MAX_VCPUS 16
+#define KVM_USER_MEM_SLOTS 16
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 0
@@ -171,6 +175,9 @@ struct kvm_vcpu_stat {
u64 vz_ghfc_exits;
u64 vz_gpa_exits;
u64 vz_resvd_exits;
+#ifdef CONFIG_CPU_LOONGSON64
+ u64 vz_cpucfg_exits;
+#endif
#endif
u64 halt_successful_poll;
u64 halt_attempted_poll;
@@ -183,11 +190,39 @@ struct kvm_vcpu_stat {
struct kvm_arch_memory_slot {
};
+#ifdef CONFIG_CPU_LOONGSON64
+struct ipi_state {
+ uint32_t status;
+ uint32_t en;
+ uint32_t set;
+ uint32_t clear;
+ uint64_t buf[4];
+};
+
+struct loongson_kvm_ipi;
+
+struct ipi_io_device {
+ int node_id;
+ struct loongson_kvm_ipi *ipi;
+ struct kvm_io_device device;
+};
+
+struct loongson_kvm_ipi {
+ spinlock_t lock;
+ struct kvm *kvm;
+ struct ipi_state ipistate[16];
+ struct ipi_io_device dev_ipi[4];
+};
+#endif
+
struct kvm_arch {
/* Guest physical mm */
struct mm_struct gpa_mm;
/* Mask of CPUs needing GPA ASID flush */
cpumask_t asid_flush_mask;
+#ifdef CONFIG_CPU_LOONGSON64
+ struct loongson_kvm_ipi ipi;
+#endif
};
#define N_MIPS_COPROC_REGS 32
@@ -225,6 +260,7 @@ struct mips_coproc {
#define MIPS_CP0_WATCH_LO 18
#define MIPS_CP0_WATCH_HI 19
#define MIPS_CP0_TLB_XCONTEXT 20
+#define MIPS_CP0_DIAG 22
#define MIPS_CP0_ECC 26
#define MIPS_CP0_CACHE_ERR 27
#define MIPS_CP0_TAG_LO 28
@@ -276,8 +312,12 @@ enum emulation_result {
#define MIPS3_PG_SHIFT 6
#define MIPS3_PG_FRAME 0x3fffffc0
+#if defined(CONFIG_64BIT)
+#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13)
+#else
#define VPN2_MASK 0xffffe000
-#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
+#endif
+#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data)
#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
@@ -892,6 +932,10 @@ void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
unsigned int count);
void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
unsigned int count);
+#ifdef CONFIG_CPU_LOONGSON64
+void kvm_loongson_clear_guest_vtlb(void);
+void kvm_loongson_clear_guest_ftlb(void);
+#endif
#endif
void kvm_mips_suspend_mm(int cpu);
@@ -1131,6 +1175,8 @@ extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
/* Misc */
extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
+extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
diff --git a/arch/mips/include/asm/mach-generic/floppy.h b/arch/mips/include/asm/mach-generic/floppy.h
index e3f446d54827..e0c9cd41f9b9 100644
--- a/arch/mips/include/asm/mach-generic/floppy.h
+++ b/arch/mips/include/asm/mach-generic/floppy.h
@@ -21,7 +21,6 @@
#include <asm/floppy.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/pgtable.h>
/*
* How to access the FDC's registers.
diff --git a/arch/mips/include/asm/mach-jazz/floppy.h b/arch/mips/include/asm/mach-jazz/floppy.h
index 095000c290e5..294ebb834632 100644
--- a/arch/mips/include/asm/mach-jazz/floppy.h
+++ b/arch/mips/include/asm/mach-jazz/floppy.h
@@ -15,7 +15,6 @@
#include <asm/addrspace.h>
#include <asm/jazz.h>
#include <asm/jazzdma.h>
-#include <asm/pgtable.h>
static inline unsigned char fd_inb(unsigned int base, unsigned int reg)
{
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 796dbb86575b..20d6d40c59a4 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1038,6 +1038,8 @@
/* Disable Branch Return Cache */
#define R10K_DIAG_D_BRC (_ULCAST_(1) << 22)
+/* Flush BTB */
+#define LOONGSON_DIAG_BTB (_ULCAST_(1) << 1)
/* Flush ITLB */
#define LOONGSON_DIAG_ITLB (_ULCAST_(1) << 2)
/* Flush DTLB */
@@ -2874,7 +2876,9 @@ __BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
__BUILD_SET_C0(config5)
+__BUILD_SET_C0(config6)
__BUILD_SET_C0(config7)
+__BUILD_SET_C0(diag)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 1945c8970141..a950fc1ddb4d 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -195,28 +195,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
#define pte_page(x) pfn_to_page(pte_pfn(x))
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/* to find an entry in a page-table-directory */
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-
-/* Find an entry in the third-level page table.. */
-#define __pte_offset(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
-
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_unmap(pte) ((void)(pte))
-
#if defined(CONFIG_CPU_R3K_TLB)
/* Swap entries must have VALID bit cleared. */
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index ee5dc0c145b9..1e7d6ce9d8d6 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -172,8 +172,6 @@
extern pte_t invalid_pte_table[PTRS_PER_PTE];
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-
#ifndef __PAGETABLE_PUD_FOLDED
/*
* For 4-level pagetables we defines these ourselves, for 3-level the
@@ -222,11 +220,6 @@ static inline unsigned long p4d_page_vaddr(p4d_t p4d)
#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
-{
- return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
-}
-
static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
{
*p4d = p4dval;
@@ -320,15 +313,6 @@ static inline void pud_clear(pud_t *pudp)
#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
#endif
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/* to find an entry in a page-table-directory */
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-
#ifndef __PAGETABLE_PMD_FOLDED
static inline unsigned long pud_page_vaddr(pud_t pud)
{
@@ -337,24 +321,8 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
#define pud_phys(pud) virt_to_phys((void *)pud_val(pud))
#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
-/* Find an entry in the second-level page table.. */
-static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
-{
- return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
-}
#endif
-/* Find an entry in the third-level page table.. */
-#define __pte_offset(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_unmap(pte) ((void)(pte))
-
/*
* Initialize a new pgd / pmd table with invalid pointers.
*/
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 32760b41aa31..dd7a0f552cac 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -736,8 +736,6 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
#define gup_fast_permitted(start, end) (!cpu_has_dc_aliases)
-#include <asm-generic/pgtable.h>
-
/*
* We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture.
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 98f97c85e059..43d1faa02933 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -1012,6 +1012,16 @@ struct loongson3_lsdc2_format { /* Loongson-3 overridden ldc2/sdc2 Load/Store fo
;))))))
};
+struct loongson3_lscsr_format { /* Loongson-3 CPUCFG&CSR read/write format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int fr : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
/*
* MIPS16e instruction formats (16-bit length)
*/
@@ -1114,6 +1124,7 @@ union mips_instruction {
struct mm16_r5_format mm16_r5_format;
struct loongson3_lswc2_format loongson3_lswc2_format;
struct loongson3_lsdc2_format loongson3_lsdc2_format;
+ struct loongson3_lscsr_format loongson3_lscsr_format;
};
union mips16e_instruction {
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index 04b9c4068493..495ba7cc56ec 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -14,12 +14,12 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
+#include <linux/pgtable.h>
#include <asm/irq_cpu.h>
#include <asm/i8259.h>
#include <asm/io.h>
#include <asm/jazz.h>
-#include <asm/pgtable.h>
#include <asm/tlbmisc.h>
static DEFINE_RAW_SPINLOCK(r4030_lock);
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index c64a297e82b3..014773f0bfcd 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -24,7 +24,6 @@
#include <linux/uaccess.h>
#include <asm/dma.h>
#include <asm/jazzdma.h>
-#include <asm/pgtable.h>
/*
* Set this to one to enable additional vdma debug code.
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index 1b5e121c3f0d..04aab419a0fc 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -17,11 +17,11 @@
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/dma-mapping.h>
+#include <linux/pgtable.h>
#include <asm/jazz.h>
#include <asm/jazzdma.h>
#include <asm/reboot.h>
-#include <asm/pgtable.h>
#include <asm/tlbmisc.h>
extern asmlinkage void jazz_handle_int(void);
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 6b93162d7c5a..def1659fe262 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -2017,8 +2017,10 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
if (cfg2 & LOONGSON_CFG2_LEXT2)
c->ases |= MIPS_ASE_LOONGSON_EXT2;
- if (cfg2 & LOONGSON_CFG2_LSPW)
+ if (cfg2 & LOONGSON_CFG2_LSPW) {
c->options |= MIPS_CPU_LDPTE;
+ c->guest.options |= MIPS_CPU_LDPTE;
+ }
if (cfg3 & LOONGSON_CFG3_LCAMP)
c->ases |= MIPS_ASE_LOONGSON_CAM;
@@ -2074,6 +2076,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
+ c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
break;
case PRID_IMP_LOONGSON_64G:
c->cputype = CPU_LOONGSON64;
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 8713b69c5048..3c0c3d1260c1 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -21,7 +21,6 @@
#include <linux/spinlock.h>
#include <linux/jump_label.h>
-#include <asm/pgtable.h> /* MODULE_START */
struct mips_hi16 {
struct mips_hi16 *next;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index b2a797557825..ff5320b79100 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -42,7 +42,6 @@
#include <asm/irq.h>
#include <asm/mips-cps.h>
#include <asm/msa.h>
-#include <asm/pgtable.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
#include <asm/reg.h>
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 414b6e9c900b..2a61641c680b 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -39,7 +39,6 @@
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/syscall.h>
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 2525eca9c962..afcf27a877cb 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -30,7 +30,6 @@
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/reg.h>
#include <asm/syscall.h>
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 9058e9dcf080..2f513506a3d5 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -28,7 +28,6 @@
#include <linux/kexec.h>
#include <asm/time.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/bootinfo.h>
#include <asm/cacheflush.h>
diff --git a/arch/mips/kernel/sysrq.c b/arch/mips/kernel/sysrq.c
index e5a2a6ab71ac..9c1a2019113b 100644
--- a/arch/mips/kernel/sysrq.c
+++ b/arch/mips/kernel/sysrq.c
@@ -52,7 +52,7 @@ static void sysrq_handle_tlbdump(int key)
#endif
}
-static struct sysrq_key_op sysrq_tlbdump_op = {
+static const struct sysrq_key_op sysrq_tlbdump_op = {
.handler = sysrq_handle_tlbdump,
.help_msg = "show-tlbs(x)",
.action_msg = "Show TLB entries",
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 22f805a73921..7c32c956156a 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -57,7 +57,6 @@
#include <asm/mipsmtregs.h>
#include <asm/module.h>
#include <asm/msa.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/siginfo.h>
@@ -108,26 +107,26 @@ void (*board_bind_eic_interrupt)(int irq, int regset);
void (*board_ebase_setup)(void);
void(*board_cache_error_setup)(void);
-static void show_raw_backtrace(unsigned long reg29)
+static void show_raw_backtrace(unsigned long reg29, const char *loglvl)
{
unsigned long *sp = (unsigned long *)(reg29 & ~3);
unsigned long addr;
- printk("Call Trace:");
+ printk("%sCall Trace:", loglvl);
#ifdef CONFIG_KALLSYMS
- printk("\n");
+ printk("%s\n", loglvl);
#endif
while (!kstack_end(sp)) {
unsigned long __user *p =
(unsigned long __user *)(unsigned long)sp++;
if (__get_user(addr, p)) {
- printk(" (Bad stack address)");
+ printk("%s (Bad stack address)", loglvl);
break;
}
if (__kernel_text_address(addr))
- print_ip_sym(addr);
+ print_ip_sym(loglvl, addr);
}
- printk("\n");
+ printk("%s\n", loglvl);
}
#ifdef CONFIG_KALLSYMS
@@ -140,7 +139,8 @@ static int __init set_raw_show_trace(char *str)
__setup("raw_show_trace", set_raw_show_trace);
#endif
-static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
+static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
+ const char *loglvl)
{
unsigned long sp = regs->regs[29];
unsigned long ra = regs->regs[31];
@@ -150,12 +150,12 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
task = current;
if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
- show_raw_backtrace(sp);
+ show_raw_backtrace(sp, loglvl);
return;
}
- printk("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
do {
- print_ip_sym(pc);
+ print_ip_sym(loglvl, pc);
pc = unwind_stack(task, &sp, pc, &ra);
} while (pc);
pr_cont("\n");
@@ -166,19 +166,19 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
* with at least a bit of error checking ...
*/
static void show_stacktrace(struct task_struct *task,
- const struct pt_regs *regs)
+ const struct pt_regs *regs, const char *loglvl)
{
const int field = 2 * sizeof(unsigned long);
long stackdata;
int i;
unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
- printk("Stack :");
+ printk("%sStack :", loglvl);
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
if (i && ((i % (64 / field)) == 0)) {
pr_cont("\n");
- printk(" ");
+ printk("%s ", loglvl);
}
if (i > 39) {
pr_cont(" ...");
@@ -194,10 +194,10 @@ static void show_stacktrace(struct task_struct *task,
i++;
}
pr_cont("\n");
- show_backtrace(task, regs);
+ show_backtrace(task, regs, loglvl);
}
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
struct pt_regs regs;
mm_segment_t old_fs = get_fs();
@@ -221,7 +221,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
* the stack in the kernel (not user) address space.
*/
set_fs(KERNEL_DS);
- show_stacktrace(task, &regs);
+ show_stacktrace(task, &regs, loglvl);
set_fs(old_fs);
}
@@ -373,7 +373,7 @@ void show_registers(struct pt_regs *regs)
if (!user_mode(regs))
/* Necessary for getting the correct stack content */
set_fs(KERNEL_DS);
- show_stacktrace(current, regs);
+ show_stacktrace(current, regs, KERN_DEFAULT);
show_code((unsigned int __user *) regs->cp0_epc);
printk("\n");
set_fs(old_fs);
@@ -793,13 +793,13 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
return 1;
case SIGSEGV:
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm, (unsigned long)fault_addr);
if (vma && (vma->vm_start <= (unsigned long)fault_addr))
si_code = SEGV_ACCERR;
else
si_code = SEGV_MAPERR;
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
force_sig_fault(SIGSEGV, si_code, fault_addr);
return 1;
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 3adb7354bc01..242dc5e83847 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -94,7 +94,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct vm_area_struct *vma;
int ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
@@ -187,6 +187,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
ret = 0;
out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index b91d145aa2d5..d697752a5723 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -22,6 +22,7 @@ config KVM
select EXPORT_UASM
select PREEMPT_NOTIFIERS
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ select HAVE_KVM_EVENTFD
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_MMIO
select MMU_NOTIFIER
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 01affc1d21c5..506c4ac0ba1c 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -2,7 +2,7 @@
# Makefile for KVM support for MIPS
#
-common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o)
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
@@ -13,6 +13,9 @@ kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
fpu.o
kvm-objs += hypcall.o
kvm-objs += mmu.o
+ifdef CONFIG_CPU_LOONGSON64
+kvm-objs += loongson_ipi.o
+endif
ifdef CONFIG_KVM_MIPS_VZ
kvm-objs += vz.o
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 7ccf9b096783..5ae82d925197 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1600,9 +1600,11 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
+ int r;
enum emulation_result er;
u32 rt;
void *data = run->mmio.data;
+ unsigned int imme;
unsigned long curr_pc;
/*
@@ -1660,15 +1662,229 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
vcpu->arch.gprs[rt], *(u8 *)data);
break;
+ case swl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ *(u32 *)data = ((*(u32 *)data) & 0xffffff00) |
+ (vcpu->arch.gprs[rt] >> 24);
+ break;
+ case 1:
+ *(u32 *)data = ((*(u32 *)data) & 0xffff0000) |
+ (vcpu->arch.gprs[rt] >> 16);
+ break;
+ case 2:
+ *(u32 *)data = ((*(u32 *)data) & 0xff000000) |
+ (vcpu->arch.gprs[rt] >> 8);
+ break;
+ case 3:
+ *(u32 *)data = vcpu->arch.gprs[rt];
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+
+ case swr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ *(u32 *)data = vcpu->arch.gprs[rt];
+ break;
+ case 1:
+ *(u32 *)data = ((*(u32 *)data) & 0xff) |
+ (vcpu->arch.gprs[rt] << 8);
+ break;
+ case 2:
+ *(u32 *)data = ((*(u32 *)data) & 0xffff) |
+ (vcpu->arch.gprs[rt] << 16);
+ break;
+ case 3:
+ *(u32 *)data = ((*(u32 *)data) & 0xffffff) |
+ (vcpu->arch.gprs[rt] << 24);
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+
+ case sdl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) |
+ ((vcpu->arch.gprs[rt] >> 56) & 0xff);
+ break;
+ case 1:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) |
+ ((vcpu->arch.gprs[rt] >> 48) & 0xffff);
+ break;
+ case 2:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) |
+ ((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
+ break;
+ case 3:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) |
+ ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
+ break;
+ case 4:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) |
+ ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
+ break;
+ case 5:
+ *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) |
+ ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
+ break;
+ case 6:
+ *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) |
+ ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
+ break;
+ case 7:
+ *(u64 *)data = vcpu->arch.gprs[rt];
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+
+ case sdr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ *(u64 *)data = vcpu->arch.gprs[rt];
+ break;
+ case 1:
+ *(u64 *)data = ((*(u64 *)data) & 0xff) |
+ (vcpu->arch.gprs[rt] << 8);
+ break;
+ case 2:
+ *(u64 *)data = ((*(u64 *)data) & 0xffff) |
+ (vcpu->arch.gprs[rt] << 16);
+ break;
+ case 3:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffff) |
+ (vcpu->arch.gprs[rt] << 24);
+ break;
+ case 4:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffff) |
+ (vcpu->arch.gprs[rt] << 32);
+ break;
+ case 5:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) |
+ (vcpu->arch.gprs[rt] << 40);
+ break;
+ case 6:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) |
+ (vcpu->arch.gprs[rt] << 48);
+ break;
+ case 7:
+ *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) |
+ (vcpu->arch.gprs[rt] << 56);
+ break;
+ default:
+ break;
+ }
+
+ kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+
+#ifdef CONFIG_CPU_LOONGSON64
+ case sdc2_op:
+ rt = inst.loongson3_lsdc2_format.rt;
+ switch (inst.loongson3_lsdc2_format.opcode1) {
+ /*
+ * Loongson-3 overridden sdc2 instructions.
+ * opcode1 instruction
+ * 0x0 gssbx: store 1 bytes from GPR
+ * 0x1 gsshx: store 2 bytes from GPR
+ * 0x2 gsswx: store 4 bytes from GPR
+ * 0x3 gssdx: store 8 bytes from GPR
+ */
+ case 0x0:
+ run->mmio.len = 1;
+ *(u8 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u8 *)data);
+ break;
+ case 0x1:
+ run->mmio.len = 2;
+ *(u16 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u16 *)data);
+ break;
+ case 0x2:
+ run->mmio.len = 4;
+ *(u32 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u32 *)data);
+ break;
+ case 0x3:
+ run->mmio.len = 8;
+ *(u64 *)data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(u64 *)data);
+ break;
+ default:
+ kvm_err("Godson Exteneded GS-Store not yet supported (inst=0x%08x)\n",
+ inst.word);
+ break;
+ }
+ break;
+#endif
default:
kvm_err("Store not yet supported (inst=0x%08x)\n",
inst.word);
goto out_fail;
}
- run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
+ run->mmio.is_write = 1;
vcpu->mmio_is_write = 1;
+
+ r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
+ run->mmio.phys_addr, run->mmio.len, data);
+
+ if (!r) {
+ vcpu->mmio_needed = 0;
+ return EMULATE_DONE;
+ }
+
return EMULATE_DO_MMIO;
out_fail:
@@ -1681,9 +1897,11 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
u32 cause, struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
+ int r;
enum emulation_result er;
unsigned long curr_pc;
u32 op, rt;
+ unsigned int imme;
rt = inst.i_format.rt;
op = inst.i_format.opcode;
@@ -1736,6 +1954,162 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
run->mmio.len = 1;
break;
+ case lwl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 3; /* 1 byte */
+ break;
+ case 1:
+ vcpu->mmio_needed = 4; /* 2 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 5; /* 3 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 6; /* 4 bytes */
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case lwr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x3);
+
+ run->mmio.len = 4;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x3;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 7; /* 4 bytes */
+ break;
+ case 1:
+ vcpu->mmio_needed = 8; /* 3 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 9; /* 2 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 10; /* 1 byte */
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case ldl_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 11; /* 1 byte */
+ break;
+ case 1:
+ vcpu->mmio_needed = 12; /* 2 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 13; /* 3 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 14; /* 4 bytes */
+ break;
+ case 4:
+ vcpu->mmio_needed = 15; /* 5 bytes */
+ break;
+ case 5:
+ vcpu->mmio_needed = 16; /* 6 bytes */
+ break;
+ case 6:
+ vcpu->mmio_needed = 17; /* 7 bytes */
+ break;
+ case 7:
+ vcpu->mmio_needed = 18; /* 8 bytes */
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case ldr_op:
+ run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
+ vcpu->arch.host_cp0_badvaddr) & (~0x7);
+
+ run->mmio.len = 8;
+ imme = vcpu->arch.host_cp0_badvaddr & 0x7;
+ switch (imme) {
+ case 0:
+ vcpu->mmio_needed = 19; /* 8 bytes */
+ break;
+ case 1:
+ vcpu->mmio_needed = 20; /* 7 bytes */
+ break;
+ case 2:
+ vcpu->mmio_needed = 21; /* 6 bytes */
+ break;
+ case 3:
+ vcpu->mmio_needed = 22; /* 5 bytes */
+ break;
+ case 4:
+ vcpu->mmio_needed = 23; /* 4 bytes */
+ break;
+ case 5:
+ vcpu->mmio_needed = 24; /* 3 bytes */
+ break;
+ case 6:
+ vcpu->mmio_needed = 25; /* 2 bytes */
+ break;
+ case 7:
+ vcpu->mmio_needed = 26; /* 1 byte */
+ break;
+ default:
+ break;
+ }
+ break;
+
+#ifdef CONFIG_CPU_LOONGSON64
+ case ldc2_op:
+ rt = inst.loongson3_lsdc2_format.rt;
+ switch (inst.loongson3_lsdc2_format.opcode1) {
+ /*
+ * Loongson-3 overridden ldc2 instructions.
+ * opcode1 instruction
+ * 0x0 gslbx: store 1 bytes from GPR
+ * 0x1 gslhx: store 2 bytes from GPR
+ * 0x2 gslwx: store 4 bytes from GPR
+ * 0x3 gsldx: store 8 bytes from GPR
+ */
+ case 0x0:
+ run->mmio.len = 1;
+ vcpu->mmio_needed = 27; /* signed */
+ break;
+ case 0x1:
+ run->mmio.len = 2;
+ vcpu->mmio_needed = 28; /* signed */
+ break;
+ case 0x2:
+ run->mmio.len = 4;
+ vcpu->mmio_needed = 29; /* signed */
+ break;
+ case 0x3:
+ run->mmio.len = 8;
+ vcpu->mmio_needed = 30; /* signed */
+ break;
+ default:
+ kvm_err("Godson Exteneded GS-Load for float not yet supported (inst=0x%08x)\n",
+ inst.word);
+ break;
+ }
+ break;
+#endif
+
default:
kvm_err("Load not yet supported (inst=0x%08x)\n",
inst.word);
@@ -1745,6 +2119,16 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
run->mmio.is_write = 0;
vcpu->mmio_is_write = 0;
+
+ r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
+ run->mmio.phys_addr, run->mmio.len, run->mmio.data);
+
+ if (!r) {
+ kvm_mips_complete_mmio_load(vcpu, run);
+ vcpu->mmio_needed = 0;
+ return EMULATE_DONE;
+ }
+
return EMULATE_DO_MMIO;
}
@@ -2591,28 +2975,125 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
switch (run->mmio.len) {
case 8:
- *gpr = *(s64 *)run->mmio.data;
+ switch (vcpu->mmio_needed) {
+ case 11:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xff) << 56);
+ break;
+ case 12:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffff) << 48);
+ break;
+ case 13:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffff) << 40);
+ break;
+ case 14:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffff) << 32);
+ break;
+ case 15:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24);
+ break;
+ case 16:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16);
+ break;
+ case 17:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
+ (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8);
+ break;
+ case 18:
+ case 19:
+ *gpr = *(s64 *)run->mmio.data;
+ break;
+ case 20:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff);
+ break;
+ case 21:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff);
+ break;
+ case 22:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
+ ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff);
+ break;
+ case 23:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
+ ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff);
+ break;
+ case 24:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
+ ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff);
+ break;
+ case 25:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
+ ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff);
+ break;
+ case 26:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
+ ((((*(s64 *)run->mmio.data)) >> 56) & 0xff);
+ break;
+ default:
+ *gpr = *(s64 *)run->mmio.data;
+ }
break;
case 4:
- if (vcpu->mmio_needed == 2)
- *gpr = *(s32 *)run->mmio.data;
- else
+ switch (vcpu->mmio_needed) {
+ case 1:
*gpr = *(u32 *)run->mmio.data;
+ break;
+ case 2:
+ *gpr = *(s32 *)run->mmio.data;
+ break;
+ case 3:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
+ (((*(s32 *)run->mmio.data) & 0xff) << 24);
+ break;
+ case 4:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
+ (((*(s32 *)run->mmio.data) & 0xffff) << 16);
+ break;
+ case 5:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
+ (((*(s32 *)run->mmio.data) & 0xffffff) << 8);
+ break;
+ case 6:
+ case 7:
+ *gpr = *(s32 *)run->mmio.data;
+ break;
+ case 8:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
+ ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff);
+ break;
+ case 9:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
+ ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff);
+ break;
+ case 10:
+ *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
+ ((((*(s32 *)run->mmio.data)) >> 24) & 0xff);
+ break;
+ default:
+ *gpr = *(s32 *)run->mmio.data;
+ }
break;
case 2:
- if (vcpu->mmio_needed == 2)
- *gpr = *(s16 *) run->mmio.data;
- else
+ if (vcpu->mmio_needed == 1)
*gpr = *(u16 *)run->mmio.data;
+ else
+ *gpr = *(s16 *)run->mmio.data;
break;
case 1:
- if (vcpu->mmio_needed == 2)
- *gpr = *(s8 *) run->mmio.data;
+ if (vcpu->mmio_needed == 1)
+ *gpr = *(u8 *)run->mmio.data;
else
- *gpr = *(u8 *) run->mmio.data;
+ *gpr = *(s8 *)run->mmio.data;
break;
}
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 16e1c93b484f..fd716942e302 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -56,6 +56,7 @@
#define C0_BADVADDR 8, 0
#define C0_BADINSTR 8, 1
#define C0_BADINSTRP 8, 2
+#define C0_PGD 9, 7
#define C0_ENTRYHI 10, 0
#define C0_GUESTCTL1 10, 4
#define C0_STATUS 12, 0
@@ -307,7 +308,10 @@ static void *kvm_mips_build_enter_guest(void *addr)
#ifdef CONFIG_KVM_MIPS_VZ
/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
- UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
+ if (cpu_has_ldpte)
+ UASM_i_MFC0(&p, K0, C0_PWBASE);
+ else
+ UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
/*
@@ -469,8 +473,10 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
u32 *p = addr;
struct uasm_label labels[2];
struct uasm_reloc relocs[2];
+#ifndef CONFIG_CPU_LOONGSON64
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
+#endif
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
@@ -490,6 +496,16 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
*/
preempt_disable();
+#ifdef CONFIG_CPU_LOONGSON64
+ UASM_i_MFC0(&p, K1, C0_PGD);
+ uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
+#ifndef __PAGETABLE_PMD_FOLDED
+ uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
+#endif
+ uasm_i_ldpte(&p, K1, 0); /* even */
+ uasm_i_ldpte(&p, K1, 1); /* odd */
+ uasm_i_tlbwr(&p);
+#else
/*
* Now for the actual refill bit. A lot of this can be common with the
* Linux TLB refill handler, however we don't need to handle so many
@@ -512,6 +528,7 @@ void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
build_get_ptep(&p, K0, K1);
build_update_entries(&p, K0, K1);
build_tlb_write_entry(&p, &l, &r, tlb_random);
+#endif
preempt_enable();
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
index 7257e8b6f5a9..d28c2c9c343e 100644
--- a/arch/mips/kvm/interrupt.c
+++ b/arch/mips/kvm/interrupt.c
@@ -61,27 +61,8 @@ void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
* the EXC code will be set when we are actually
* delivering the interrupt:
*/
- switch (intr) {
- case 2:
- kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
- /* Queue up an INT exception for the core */
- kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case 3:
- kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
- kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case 4:
- kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
- kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
+ kvm_set_c0_guest_cause(vcpu->arch.cop0, 1 << (intr + 8));
+ kvm_mips_queue_irq(vcpu, kvm_irq_to_priority(intr));
}
void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
@@ -89,26 +70,8 @@ void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
{
int intr = (int)irq->irq;
- switch (intr) {
- case -2:
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
- kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case -3:
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
- kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case -4:
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
- kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
+ kvm_clear_c0_guest_cause(vcpu->arch.cop0, 1 << (-intr + 8));
+ kvm_mips_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
}
/* Deliver the interrupt of the corresponding priority, if possible. */
@@ -116,50 +79,20 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
int allowed = 0;
- u32 exccode;
+ u32 exccode, ie;
struct kvm_vcpu_arch *arch = &vcpu->arch;
struct mips_coproc *cop0 = vcpu->arch.cop0;
- switch (priority) {
- case MIPS_EXC_INT_TIMER:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
-
- case MIPS_EXC_INT_IO:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
-
- case MIPS_EXC_INT_IPI_1:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
-
- case MIPS_EXC_INT_IPI_2:
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
- break;
+ if (priority == MIPS_EXC_MAX)
+ return 0;
- default:
- break;
+ ie = 1 << (kvm_priority_to_irq[priority] + 8);
+ if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+ && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+ && (kvm_read_c0_guest_status(cop0) & ie)) {
+ allowed = 1;
+ exccode = EXCCODE_INT;
}
/* Are we allowed to deliver the interrupt ??? */
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
index 3bf0a49725e8..c3e878ca3e07 100644
--- a/arch/mips/kvm/interrupt.h
+++ b/arch/mips/kvm/interrupt.h
@@ -21,11 +21,12 @@
#define MIPS_EXC_NMI 5
#define MIPS_EXC_MCHK 6
#define MIPS_EXC_INT_TIMER 7
-#define MIPS_EXC_INT_IO 8
-#define MIPS_EXC_EXECUTE 9
-#define MIPS_EXC_INT_IPI_1 10
-#define MIPS_EXC_INT_IPI_2 11
-#define MIPS_EXC_MAX 12
+#define MIPS_EXC_INT_IO_1 8
+#define MIPS_EXC_INT_IO_2 9
+#define MIPS_EXC_EXECUTE 10
+#define MIPS_EXC_INT_IPI_1 11
+#define MIPS_EXC_INT_IPI_2 12
+#define MIPS_EXC_MAX 13
/* XXXSL More to follow */
#define C_TI (_ULCAST_(1) << 30)
@@ -38,6 +39,9 @@
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
#endif
+extern u32 *kvm_priority_to_irq;
+u32 kvm_irq_to_priority(u32 irq);
+
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/mips/kvm/loongson_ipi.c b/arch/mips/kvm/loongson_ipi.c
new file mode 100644
index 000000000000..3681fc8fba38
--- /dev/null
+++ b/arch/mips/kvm/loongson_ipi.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Loongson-3 Virtual IPI interrupt support.
+ *
+ * Copyright (C) 2019 Loongson Technologies, Inc. All rights reserved.
+ *
+ * Authors: Chen Zhu <zhuchen@loongson.cn>
+ * Authors: Huacai Chen <chenhc@lemote.com>
+ */
+
+#include <linux/kvm_host.h>
+
+#define IPI_BASE 0x3ff01000ULL
+
+#define CORE0_STATUS_OFF 0x000
+#define CORE0_EN_OFF 0x004
+#define CORE0_SET_OFF 0x008
+#define CORE0_CLEAR_OFF 0x00c
+#define CORE0_BUF_20 0x020
+#define CORE0_BUF_28 0x028
+#define CORE0_BUF_30 0x030
+#define CORE0_BUF_38 0x038
+
+#define CORE1_STATUS_OFF 0x100
+#define CORE1_EN_OFF 0x104
+#define CORE1_SET_OFF 0x108
+#define CORE1_CLEAR_OFF 0x10c
+#define CORE1_BUF_20 0x120
+#define CORE1_BUF_28 0x128
+#define CORE1_BUF_30 0x130
+#define CORE1_BUF_38 0x138
+
+#define CORE2_STATUS_OFF 0x200
+#define CORE2_EN_OFF 0x204
+#define CORE2_SET_OFF 0x208
+#define CORE2_CLEAR_OFF 0x20c
+#define CORE2_BUF_20 0x220
+#define CORE2_BUF_28 0x228
+#define CORE2_BUF_30 0x230
+#define CORE2_BUF_38 0x238
+
+#define CORE3_STATUS_OFF 0x300
+#define CORE3_EN_OFF 0x304
+#define CORE3_SET_OFF 0x308
+#define CORE3_CLEAR_OFF 0x30c
+#define CORE3_BUF_20 0x320
+#define CORE3_BUF_28 0x328
+#define CORE3_BUF_30 0x330
+#define CORE3_BUF_38 0x338
+
+static int loongson_vipi_read(struct loongson_kvm_ipi *ipi,
+ gpa_t addr, int len, void *val)
+{
+ uint32_t core = (addr >> 8) & 3;
+ uint32_t node = (addr >> 44) & 3;
+ uint32_t id = core + node * 4;
+ uint64_t offset = addr & 0xff;
+ void *pbuf;
+ struct ipi_state *s = &(ipi->ipistate[id]);
+
+ BUG_ON(offset & (len - 1));
+
+ switch (offset) {
+ case CORE0_STATUS_OFF:
+ *(uint64_t *)val = s->status;
+ break;
+
+ case CORE0_EN_OFF:
+ *(uint64_t *)val = s->en;
+ break;
+
+ case CORE0_SET_OFF:
+ *(uint64_t *)val = 0;
+ break;
+
+ case CORE0_CLEAR_OFF:
+ *(uint64_t *)val = 0;
+ break;
+
+ case CORE0_BUF_20 ... CORE0_BUF_38:
+ pbuf = (void *)s->buf + (offset - 0x20);
+ if (len == 8)
+ *(uint64_t *)val = *(uint64_t *)pbuf;
+ else /* Assume len == 4 */
+ *(uint32_t *)val = *(uint32_t *)pbuf;
+ break;
+
+ default:
+ pr_notice("%s with unknown addr %llx\n", __func__, addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int loongson_vipi_write(struct loongson_kvm_ipi *ipi,
+ gpa_t addr, int len, const void *val)
+{
+ uint32_t core = (addr >> 8) & 3;
+ uint32_t node = (addr >> 44) & 3;
+ uint32_t id = core + node * 4;
+ uint64_t data, offset = addr & 0xff;
+ void *pbuf;
+ struct kvm *kvm = ipi->kvm;
+ struct kvm_mips_interrupt irq;
+ struct ipi_state *s = &(ipi->ipistate[id]);
+
+ data = *(uint64_t *)val;
+ BUG_ON(offset & (len - 1));
+
+ switch (offset) {
+ case CORE0_STATUS_OFF:
+ break;
+
+ case CORE0_EN_OFF:
+ s->en = data;
+ break;
+
+ case CORE0_SET_OFF:
+ s->status |= data;
+ irq.cpu = id;
+ irq.irq = 6;
+ kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq);
+ break;
+
+ case CORE0_CLEAR_OFF:
+ s->status &= ~data;
+ if (!s->status) {
+ irq.cpu = id;
+ irq.irq = -6;
+ kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq);
+ }
+ break;
+
+ case CORE0_BUF_20 ... CORE0_BUF_38:
+ pbuf = (void *)s->buf + (offset - 0x20);
+ if (len == 8)
+ *(uint64_t *)pbuf = (uint64_t)data;
+ else /* Assume len == 4 */
+ *(uint32_t *)pbuf = (uint32_t)data;
+ break;
+
+ default:
+ pr_notice("%s with unknown addr %llx\n", __func__, addr);
+ break;
+ }
+
+ return 0;
+}
+
+static int kvm_ipi_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ unsigned long flags;
+ struct loongson_kvm_ipi *ipi;
+ struct ipi_io_device *ipi_device;
+
+ ipi_device = container_of(dev, struct ipi_io_device, device);
+ ipi = ipi_device->ipi;
+
+ spin_lock_irqsave(&ipi->lock, flags);
+ loongson_vipi_read(ipi, addr, len, val);
+ spin_unlock_irqrestore(&ipi->lock, flags);
+
+ return 0;
+}
+
+static int kvm_ipi_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ unsigned long flags;
+ struct loongson_kvm_ipi *ipi;
+ struct ipi_io_device *ipi_device;
+
+ ipi_device = container_of(dev, struct ipi_io_device, device);
+ ipi = ipi_device->ipi;
+
+ spin_lock_irqsave(&ipi->lock, flags);
+ loongson_vipi_write(ipi, addr, len, val);
+ spin_unlock_irqrestore(&ipi->lock, flags);
+
+ return 0;
+}
+
+static const struct kvm_io_device_ops kvm_ipi_ops = {
+ .read = kvm_ipi_read,
+ .write = kvm_ipi_write,
+};
+
+void kvm_init_loongson_ipi(struct kvm *kvm)
+{
+ int i;
+ unsigned long addr;
+ struct loongson_kvm_ipi *s;
+ struct kvm_io_device *device;
+
+ s = &kvm->arch.ipi;
+ s->kvm = kvm;
+ spin_lock_init(&s->lock);
+
+ /*
+ * Initialize IPI device
+ */
+ for (i = 0; i < 4; i++) {
+ device = &s->dev_ipi[i].device;
+ kvm_iodevice_init(device, &kvm_ipi_ops);
+ addr = (((unsigned long)i) << 44) + IPI_BASE;
+ mutex_lock(&kvm->slots_lock);
+ kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, 0x400, device);
+ mutex_unlock(&kvm->slots_lock);
+ s->dev_ipi[i].ipi = s;
+ s->dev_ipi[i].node_id = i;
+ }
+}
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 3b0148c99c0d..521bd5891e84 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -19,13 +19,13 @@
#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/fpu.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <linux/kvm_host.h>
@@ -67,6 +67,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT("vz_ghfc", vz_ghfc_exits),
VCPU_STAT("vz_gpa", vz_gpa_exits),
VCPU_STAT("vz_resvd", vz_resvd_exits),
+ VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
#endif
VCPU_STAT("halt_successful_poll", halt_successful_poll),
VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
@@ -129,6 +130,8 @@ int kvm_arch_check_processor_compat(void *opaque)
return 0;
}
+extern void kvm_init_loongson_ipi(struct kvm *kvm);
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
@@ -148,6 +151,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.gpa_mm.pgd)
return -ENOMEM;
+#ifdef CONFIG_CPU_LOONGSON64
+ kvm_init_loongson_ipi(kvm);
+#endif
+
return 0;
}
@@ -490,7 +497,10 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
int intr = (int)irq->irq;
struct kvm_vcpu *dvcpu = NULL;
- if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+ if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
+ intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
+ intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
+ intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
(int)intr);
@@ -499,10 +509,10 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
else
dvcpu = vcpu->kvm->vcpus[irq->cpu];
- if (intr == 2 || intr == 3 || intr == 4) {
+ if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
kvm_mips_callbacks->queue_io_int(dvcpu, irq);
- } else if (intr == -2 || intr == -3 || intr == -4) {
+ } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
} else {
kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
@@ -1620,6 +1630,34 @@ static struct notifier_block kvm_mips_csr_die_notifier = {
.notifier_call = kvm_mips_csr_die_notify,
};
+static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
+ [MIPS_EXC_INT_IO_1] = C_IRQ0,
+ [MIPS_EXC_INT_IPI_1] = C_IRQ1,
+ [MIPS_EXC_INT_IPI_2] = C_IRQ2,
+};
+
+static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
+ [MIPS_EXC_INT_TIMER] = C_IRQ5,
+ [MIPS_EXC_INT_IO_1] = C_IRQ0,
+ [MIPS_EXC_INT_IO_2] = C_IRQ1,
+ [MIPS_EXC_INT_IPI_1] = C_IRQ4,
+};
+
+u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
+
+u32 kvm_irq_to_priority(u32 irq)
+{
+ int i;
+
+ for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
+ if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
+ return i;
+ }
+
+ return MIPS_EXC_MAX;
+}
+
static int __init kvm_mips_init(void)
{
int ret;
@@ -1638,6 +1676,9 @@ static int __init kvm_mips_init(void)
if (ret)
return ret;
+ if (boot_cpu_type() == CPU_LOONGSON64)
+ kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
+
register_die_notifier(&kvm_mips_csr_die_notifier);
return 0;
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 7dad7a293eae..49bd160f4d85 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -168,7 +168,7 @@ static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
clear_page(new_pte);
pmd_populate_kernel(NULL, pmd, new_pte);
}
- return pte_offset(pmd, addr);
+ return pte_offset_kernel(pmd, addr);
}
/* Caller must hold kvm->mm_lock */
@@ -187,8 +187,8 @@ static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa,
unsigned long end_gpa)
{
- int i_min = __pte_offset(start_gpa);
- int i_max = __pte_offset(end_gpa);
+ int i_min = pte_index(start_gpa);
+ int i_max = pte_index(end_gpa);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
int i;
@@ -215,7 +215,7 @@ static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
if (!pmd_present(pmd[i]))
continue;
- pte = pte_offset(pmd + i, 0);
+ pte = pte_offset_kernel(pmd + i, 0);
if (i == i_max)
end = end_gpa;
@@ -312,8 +312,8 @@ static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
unsigned long end) \
{ \
int ret = 0; \
- int i_min = __pte_offset(start); \
- int i_max = __pte_offset(end); \
+ int i_min = pte_index(start); \
+ int i_max = pte_index(end); \
int i; \
pte_t old, new; \
\
@@ -346,7 +346,7 @@ static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
if (!pmd_present(pmd[i])) \
continue; \
\
- pte = pte_offset(pmd + i, 0); \
+ pte = pte_offset_kernel(pmd + i, 0); \
if (i == i_max) \
cur_end = end; \
\
@@ -842,8 +842,8 @@ void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
unsigned long end_gva)
{
- int i_min = __pte_offset(start_gva);
- int i_max = __pte_offset(end_gva);
+ int i_min = pte_index(start_gva);
+ int i_max = pte_index(end_gva);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
int i;
@@ -877,7 +877,7 @@ static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
if (!pmd_present(pmd[i]))
continue;
- pte = pte_offset(pmd + i, 0);
+ pte = pte_offset_kernel(pmd + i, 0);
if (i == i_max)
end = end_gva;
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 5d436c5216cc..1c1fbce3f566 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -20,8 +20,8 @@
#include <asm/cpu.h>
#include <asm/bootinfo.h>
+#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
#include <asm/tlbdebug.h>
@@ -622,6 +622,46 @@ void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
}
EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
+#ifdef CONFIG_CPU_LOONGSON64
+void kvm_loongson_clear_guest_vtlb(void)
+{
+ int idx = read_gc0_index();
+
+ /* Set root GuestID for root probe and write of guest TLB entry */
+ set_root_gid_to_guest_gid();
+
+ write_gc0_index(0);
+ guest_tlbinvf();
+ write_gc0_index(idx);
+
+ clear_root_gid();
+ set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
+}
+EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_vtlb);
+
+void kvm_loongson_clear_guest_ftlb(void)
+{
+ int i;
+ int idx = read_gc0_index();
+
+ /* Set root GuestID for root probe and write of guest TLB entry */
+ set_root_gid_to_guest_gid();
+
+ for (i = current_cpu_data.tlbsizevtlb;
+ i < (current_cpu_data.tlbsizevtlb +
+ current_cpu_data.tlbsizeftlbsets);
+ i++) {
+ write_gc0_index(i);
+ guest_tlbinvf();
+ }
+ write_gc0_index(idx);
+
+ clear_root_gid();
+ set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
+}
+EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb);
+#endif
+
#endif
/**
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 5a11e83dffe6..34ad0b46e610 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -529,6 +529,9 @@ static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MIPS_TE:
r = 1;
break;
+ case KVM_CAP_IOEVENTFD:
+ r = 1;
+ break;
default:
r = 0;
break;
@@ -594,7 +597,7 @@ static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
pmd_va = pud_va | (k << PMD_SHIFT);
if (pmd_va >= end)
break;
- pte = pte_offset(pmd + k, 0);
+ pte = pte_offset_kernel(pmd + k, 0);
pte_free_kernel(NULL, pte);
}
pmd_free(NULL, pmd);
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 51f51009a53f..d9c462c14163 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -29,6 +29,7 @@
#include <linux/kvm_host.h>
#include "interrupt.h"
+#include "loongson_regs.h"
#include "trace.h"
@@ -126,6 +127,11 @@ static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
return mask;
}
+static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
+{
+ return MIPS_CONF6_LOONGSON_INTIMER | MIPS_CONF6_LOONGSON_EXTIMER;
+}
+
/*
* VZ optionally allows these additional Config bits to be written by root:
* Config: M, [MT]
@@ -180,6 +186,12 @@ static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
}
+static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
+{
+ return kvm_vz_config6_guest_wrmask(vcpu) |
+ MIPS_CONF6_LOONGSON_SFBEN | MIPS_CONF6_LOONGSON_FTLBDIS;
+}
+
static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
{
/* VZ guest has already converted gva to gpa */
@@ -225,23 +237,7 @@ static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
* interrupts are asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
- switch (intr) {
- case 2:
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case 3:
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case 4:
- kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
+ kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
}
static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
@@ -253,44 +249,22 @@ static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
* interrupts are asynchronous to vcpu execution therefore defer guest
* cp0 accesses
*/
- switch (intr) {
- case -2:
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
- break;
-
- case -3:
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
- break;
-
- case -4:
- kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
- break;
-
- default:
- break;
- }
-
+ kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
}
-static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = {
- [MIPS_EXC_INT_TIMER] = C_IRQ5,
- [MIPS_EXC_INT_IO] = C_IRQ0,
- [MIPS_EXC_INT_IPI_1] = C_IRQ1,
- [MIPS_EXC_INT_IPI_2] = C_IRQ2,
-};
-
static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
u32 irq = (priority < MIPS_EXC_MAX) ?
- kvm_vz_priority_to_irq[priority] : 0;
+ kvm_priority_to_irq[priority] : 0;
switch (priority) {
case MIPS_EXC_INT_TIMER:
set_gc0_cause(C_TI);
break;
- case MIPS_EXC_INT_IO:
+ case MIPS_EXC_INT_IO_1:
+ case MIPS_EXC_INT_IO_2:
case MIPS_EXC_INT_IPI_1:
case MIPS_EXC_INT_IPI_2:
if (cpu_has_guestctl2)
@@ -311,7 +285,7 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
u32 cause)
{
u32 irq = (priority < MIPS_EXC_MAX) ?
- kvm_vz_priority_to_irq[priority] : 0;
+ kvm_priority_to_irq[priority] : 0;
switch (priority) {
case MIPS_EXC_INT_TIMER:
@@ -329,7 +303,8 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
}
break;
- case MIPS_EXC_INT_IO:
+ case MIPS_EXC_INT_IO_1:
+ case MIPS_EXC_INT_IO_2:
case MIPS_EXC_INT_IPI_1:
case MIPS_EXC_INT_IPI_2:
/* Clear GuestCtl2.VIP irq if not using Hardware Clear */
@@ -966,7 +941,8 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
(sel == 2 || /* SRSCtl */
sel == 3)) || /* SRSMap */
(rd == MIPS_CP0_CONFIG &&
- (sel == 7)) || /* Config7 */
+ (sel == 6 || /* Config6 */
+ sel == 7)) || /* Config7 */
(rd == MIPS_CP0_LLADDR &&
(sel == 2) && /* MAARI */
cpu_guest_has_maar &&
@@ -974,6 +950,11 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
(rd == MIPS_CP0_ERRCTL &&
(sel == 0))) { /* ErrCtl */
val = cop0->reg[rd][sel];
+#ifdef CONFIG_CPU_LOONGSON64
+ } else if (rd == MIPS_CP0_DIAG &&
+ (sel == 0)) { /* Diag */
+ val = cop0->reg[rd][sel];
+#endif
} else {
val = 0;
er = EMULATE_FAIL;
@@ -1036,9 +1017,40 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) {
kvm_write_maari(vcpu, val);
+ } else if (rd == MIPS_CP0_CONFIG &&
+ (sel == 6)) {
+ cop0->reg[rd][sel] = (int)val;
} else if (rd == MIPS_CP0_ERRCTL &&
(sel == 0)) { /* ErrCtl */
/* ignore the written value */
+#ifdef CONFIG_CPU_LOONGSON64
+ } else if (rd == MIPS_CP0_DIAG &&
+ (sel == 0)) { /* Diag */
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (val & LOONGSON_DIAG_BTB) {
+ /* Flush BTB */
+ set_c0_diag(LOONGSON_DIAG_BTB);
+ }
+ if (val & LOONGSON_DIAG_ITLB) {
+ /* Flush ITLB */
+ set_c0_diag(LOONGSON_DIAG_ITLB);
+ }
+ if (val & LOONGSON_DIAG_DTLB) {
+ /* Flush DTLB */
+ set_c0_diag(LOONGSON_DIAG_DTLB);
+ }
+ if (val & LOONGSON_DIAG_VTLB) {
+ /* Flush VTLB */
+ kvm_loongson_clear_guest_vtlb();
+ }
+ if (val & LOONGSON_DIAG_FTLB) {
+ /* Flush FTLB */
+ kvm_loongson_clear_guest_ftlb();
+ }
+ local_irq_restore(flags);
+#endif
} else {
er = EMULATE_FAIL;
}
@@ -1129,6 +1141,77 @@ static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
return EMULATE_FAIL;
}
+#ifdef CONFIG_CPU_LOONGSON64
+static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
+ u32 *opc, u32 cause,
+ struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ unsigned int rs, rd;
+ unsigned int hostcfg;
+ unsigned long curr_pc;
+ enum emulation_result er = EMULATE_DONE;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ rs = inst.loongson3_lscsr_format.rs;
+ rd = inst.loongson3_lscsr_format.rd;
+ switch (inst.loongson3_lscsr_format.fr) {
+ case 0x8: /* Read CPUCFG */
+ ++vcpu->stat.vz_cpucfg_exits;
+ hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
+
+ switch (vcpu->arch.gprs[rs]) {
+ case LOONGSON_CFG0:
+ vcpu->arch.gprs[rd] = 0x14c000;
+ break;
+ case LOONGSON_CFG1:
+ hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
+ LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
+ LOONGSON_CFG1_SFBP);
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ case LOONGSON_CFG2:
+ hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
+ LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ case LOONGSON_CFG3:
+ vcpu->arch.gprs[rd] = hostcfg;
+ break;
+ default:
+ /* Don't export any other advanced features to guest */
+ vcpu->arch.gprs[rd] = 0;
+ break;
+ }
+ break;
+
+ default:
+ kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
+ inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ /* Rollback PC only if emulation was unsuccessful */
+ if (er == EMULATE_FAIL) {
+ kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
+ curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
+
+ vcpu->arch.pc = curr_pc;
+ }
+
+ return er;
+}
+#endif
+
static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
@@ -1158,6 +1241,11 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
break;
#endif
+#ifdef CONFIG_CPU_LOONGSON64
+ case lwc2_op:
+ er = kvm_vz_gpsi_lwc2(inst, opc, cause, run, vcpu);
+ break;
+#endif
case spec3_op:
switch (inst.spec3_format.func) {
#ifdef CONFIG_CPU_MIPSR6
@@ -1652,6 +1740,7 @@ static u64 kvm_vz_get_one_regs[] = {
KVM_REG_MIPS_CP0_CONFIG3,
KVM_REG_MIPS_CP0_CONFIG4,
KVM_REG_MIPS_CP0_CONFIG5,
+ KVM_REG_MIPS_CP0_CONFIG6,
#ifdef CONFIG_64BIT
KVM_REG_MIPS_CP0_XCONTEXT,
#endif
@@ -1706,7 +1795,7 @@ static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
if (cpu_guest_has_segments)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
- if (cpu_guest_has_htw)
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
@@ -1755,7 +1844,7 @@ static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
}
- if (cpu_guest_has_htw) {
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
sizeof(kvm_vz_get_one_regs_htw)))
return -EFAULT;
@@ -1878,17 +1967,17 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
*v = read_gc0_segctl2();
break;
case KVM_REG_MIPS_CP0_PWBASE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwbase();
break;
case KVM_REG_MIPS_CP0_PWFIELD:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwfield();
break;
case KVM_REG_MIPS_CP0_PWSIZE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwsize();
break;
@@ -1896,7 +1985,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
*v = (long)read_gc0_wired();
break;
case KVM_REG_MIPS_CP0_PWCTL:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwctl();
break;
@@ -1979,6 +2068,9 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
return -EINVAL;
*v = read_gc0_config5();
break;
+ case KVM_REG_MIPS_CP0_CONFIG6:
+ *v = kvm_read_sw_gc0_config6(cop0);
+ break;
case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
@@ -2101,17 +2193,17 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
write_gc0_segctl2(v);
break;
case KVM_REG_MIPS_CP0_PWBASE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwbase(v);
break;
case KVM_REG_MIPS_CP0_PWFIELD:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwfield(v);
break;
case KVM_REG_MIPS_CP0_PWSIZE:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwsize(v);
break;
@@ -2119,7 +2211,7 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
change_gc0_wired(MIPSR6_WIRED_WIRED, v);
break;
case KVM_REG_MIPS_CP0_PWCTL:
- if (!cpu_guest_has_htw)
+ if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
write_gc0_pwctl(v);
break;
@@ -2248,6 +2340,14 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
write_gc0_config5(v);
}
break;
+ case KVM_REG_MIPS_CP0_CONFIG6:
+ cur = kvm_read_sw_gc0_config6(cop0);
+ change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
+ if (change) {
+ v = cur ^ change;
+ kvm_write_sw_gc0_config6(cop0, (int)v);
+ }
+ break;
case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
@@ -2580,7 +2680,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
/* restore HTW registers */
- if (cpu_guest_has_htw) {
+ if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
kvm_restore_gc0_pwbase(cop0);
kvm_restore_gc0_pwfield(cop0);
kvm_restore_gc0_pwsize(cop0);
@@ -2597,7 +2697,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* prevents a SC on the next VCPU from succeeding by matching a LL on
* the previous VCPU.
*/
- if (cpu_guest_has_rw_llb)
+ if (vcpu->kvm->created_vcpus > 1)
write_gc0_lladdr(0);
return 0;
@@ -2685,8 +2785,8 @@ static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
}
/* save HTW registers if enabled in guest */
- if (cpu_guest_has_htw &&
- kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) {
+ if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
+ kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
kvm_save_gc0_pwbase(cop0);
kvm_save_gc0_pwfield(cop0);
kvm_save_gc0_pwsize(cop0);
@@ -2853,8 +2953,12 @@ static int kvm_vz_hardware_enable(void)
write_c0_guestctl0(MIPS_GCTL0_CP0 |
(MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
MIPS_GCTL0_CG | MIPS_GCTL0_CF);
- if (cpu_has_guestctl0ext)
- set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+ if (cpu_has_guestctl0ext) {
+ if (current_cpu_type() != CPU_LOONGSON64)
+ set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+ else
+ clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
+ }
if (cpu_has_guestid) {
write_c0_guestctl1(0);
@@ -2871,6 +2975,12 @@ static int kvm_vz_hardware_enable(void)
if (cpu_has_guestctl2)
clear_c0_guestctl2(0x3f << 10);
+#ifdef CONFIG_CPU_LOONGSON64
+ /* Control guest CCA attribute */
+ if (cpu_has_csr())
+ csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
+#endif
+
return 0;
}
@@ -2927,6 +3037,9 @@ static int kvm_vz_check_extension(struct kvm *kvm, long ext)
r = 2;
break;
#endif
+ case KVM_CAP_IOEVENTFD:
+ r = 1;
+ break;
default:
r = 0;
break;
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 83ed37298e66..5a418ba5e75f 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -12,7 +12,6 @@
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
void dump_tlb_regs(void)
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index b97d9c5d8323..10b4bf7f70a3 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -12,7 +12,6 @@
#include <asm/mipsregs.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
extern int r3k_have_wired_reg;
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 4baf965e6fe8..8ae181e08311 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -20,7 +20,6 @@
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/r4kcache.h>
#include <asm/traps.h>
#include <asm/mmu_context.h>
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 780dd2a567c1..df6755ca1892 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -16,7 +16,6 @@
#include <linux/mm.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/isadep.h>
#include <asm/io.h>
@@ -240,9 +239,6 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
unsigned long kaddr = KSEG0ADDR(pfn << PAGE_SHIFT);
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -253,11 +249,8 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
if (cpu_context(smp_processor_id(), mm) == 0)
return;
- pgdp = pgd_offset(mm, addr);
- p4dp = p4d_offset(pgdp, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset(pmdp, addr);
+ pmdp = pmd_off(mm, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
/* Invalid => no such page in the cache. */
if (!(pte_val(*ptep) & _PAGE_PRESENT))
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 6fb83ac7c475..49569e5666d7 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -29,7 +29,6 @@
#include <asm/cpu-type.h>
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/r4kcache.h>
#include <asm/sections.h>
#include <asm/mmu_context.h>
@@ -653,9 +652,6 @@ static inline void local_r4k_flush_cache_page(void *args)
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
int map_coherent = 0;
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
void *vaddr;
@@ -668,11 +664,8 @@ static inline void local_r4k_flush_cache_page(void *args)
return;
addr &= PAGE_MASK;
- pgdp = pgd_offset(mm, addr);
- p4dp = p4d_offset(pgdp, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
- ptep = pte_offset(pmdp, addr);
+ pmdp = pmd_off(mm, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
/*
* If the page isn't marked valid, the page cannot possibly be
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 2d479cc7e66b..03dfbb40ec73 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -17,7 +17,6 @@
#include <asm/cacheops.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/isadep.h>
#include <asm/io.h>
@@ -169,9 +168,6 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
{
int exec = vma->vm_flags & VM_EXEC;
struct mm_struct *mm = vma->vm_mm;
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -183,11 +179,8 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
return;
page &= PAGE_MASK;
- pgdp = pgd_offset(mm, page);
- p4dp = p4d_offset(pgdp, page);
- pudp = pud_offset(p4dp, page);
- pmdp = pmd_offset(pudp, page);
- ptep = pte_offset(pmdp, page);
+ pmdp = pmd_off(mm, page);
+ ptep = pte_offset_kernel(pmdp, page);
/*
* If the page isn't marked valid, the page cannot possibly be
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index f8d62cd83b36..01b168a90434 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -97,7 +97,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -181,7 +181,7 @@ good_area:
flags |= FAULT_FLAG_TRIED;
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -190,7 +190,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -198,7 +198,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -250,14 +250,14 @@ out_of_memory:
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 8e8726992720..5fec7f45d79a 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -90,5 +90,5 @@ void __init kmap_init(void)
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+ kmap_pte = virt_to_kpte(kmap_vstart);
}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 7c9f0c0a6cd3..336b58173dc7 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -40,7 +40,6 @@
#include <asm/maar.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index c5578897a4fa..cd805b005509 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -20,7 +20,6 @@
#include <asm/inst.h>
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/prefetch.h>
#include <asm/bootinfo.h>
#include <asm/mipsregs.h>
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index 37c7a01427d2..bd4b0656add3 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -10,7 +10,6 @@
#include <linux/memblock.h>
#include <linux/highmem.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 6fd6e96fdebb..183ff9f9c026 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -10,7 +10,6 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index ea059cd86496..d7238687d790 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -12,7 +12,6 @@
#include <asm/bcache.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/bootinfo.h>
#include <asm/sgi/ip22.h>
#include <asm/sgi/mc.h>
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index eedad47df24f..97dc0511e63f 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -12,7 +12,6 @@
#include <asm/bcache.h>
#include <asm/cacheops.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/r4kcache.h>
#include <asm/mips-cps.h>
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index c7b94c951d98..736615d68f7a 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -12,7 +12,6 @@
#include <asm/bcache.h>
#include <asm/cacheops.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/r4kcache.h>
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 50f207591b6d..a36622ebea55 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -17,7 +17,6 @@
#include <linux/mm.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/tlbmisc.h>
#include <asm/isadep.h>
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index d7a9d5f211f0..6677dcb72580 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -21,7 +21,6 @@
#include <asm/bootinfo.h>
#include <asm/hazards.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/tlbmisc.h>
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 38c204204529..14f8ba93367f 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -28,11 +28,11 @@
#include <linux/smp.h>
#include <linux/string.h>
#include <linux/cache.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/cpu-type.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/war.h>
#include <asm/uasm.h>
#include <asm/setup.h>
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index 35c2ebd8f094..c10d8b233ab1 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -27,18 +27,22 @@ choice
config SOC_RT288X
bool "RT288x"
select MIPS_L1_CACHE_SHIFT_4
+ select HAVE_LEGACY_CLK
select HAVE_PCI
config SOC_RT305X
bool "RT305x"
+ select HAVE_LEGACY_CLK
config SOC_RT3883
bool "RT3883"
+ select HAVE_LEGACY_CLK
select HAVE_PCI
config SOC_MT7620
bool "MT7620/8"
select CPU_MIPSR2_IRQ_VI
+ select HAVE_LEGACY_CLK
select HAVE_PCI
config SOC_MT7621
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index 32bcb8d1dd88..a4daf8ccd16c 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -16,7 +16,6 @@
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/sgialib.h>
#include <asm/time.h>
#include <asm/sn/agent.h>
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index c0e33632bc37..79c434fece52 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -19,7 +19,6 @@
#include <linux/platform_device.h>
#include <asm/time.h>
-#include <asm/pgtable.h>
#include <asm/sgialib.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/arch.h>
diff --git a/arch/mips/sgi-ip32/ip32-memory.c b/arch/mips/sgi-ip32/ip32-memory.c
index 828ce131c228..be1b2cfc4c3e 100644
--- a/arch/mips/sgi-ip32/ip32-memory.c
+++ b/arch/mips/sgi-ip32/ip32-memory.c
@@ -14,7 +14,6 @@
#include <asm/ip32/crime.h>
#include <asm/bootinfo.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
extern void crime_init(void);
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
index caddded56e77..7d6824f7c0e8 100644
--- a/arch/nds32/include/asm/cacheflush.h
+++ b/arch/nds32/include/asm/cacheflush.h
@@ -44,9 +44,9 @@ void invalidate_kernel_vmap_range(void *addr, int size);
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
#else
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len);
-#define flush_icache_user_range flush_icache_user_range
+#define flush_icache_user_page flush_icache_user_page
#include <asm-generic/cacheflush.h>
#endif
diff --git a/arch/nds32/include/asm/highmem.h b/arch/nds32/include/asm/highmem.h
index 5717647d14d1..fe986d0e6e3f 100644
--- a/arch/nds32/include/asm/highmem.h
+++ b/arch/nds32/include/asm/highmem.h
@@ -7,7 +7,6 @@
#include <asm/proc-fns.h>
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
/*
* Right now we initialize only a single pte table. It can be extended
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index 476cc4dd1709..419f984eef70 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -186,16 +186,10 @@ extern void paging_init(void);
#define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) ((pte_t *)pmd_page_kernel(*(dir)) + pte_index(address))
-#define pte_offset_map(dir, address) ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
-#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-
-#define pmd_off_k(address) pmd_offset(pud_offset(p4d_offset(pgd_offset_k(address), (address)), (address)), (address))
+static unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK));
+}
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/*
@@ -346,12 +340,6 @@ static inline pmd_t __mk_pmd(pte_t * ptep, unsigned long prot)
*
*/
-/* to find an entry in a page-table-directory */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const unsigned long mask = 0xfff;
@@ -374,8 +362,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
/*
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
*/
diff --git a/arch/nds32/kernel/head.S b/arch/nds32/kernel/head.S
index fcefb62606ca..7347f00451a9 100644
--- a/arch/nds32/kernel/head.S
+++ b/arch/nds32/kernel/head.S
@@ -3,10 +3,10 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <linux/sizes.h>
#include <asm/thread_info.h>
diff --git a/arch/nds32/kernel/module.c b/arch/nds32/kernel/module.c
index 1e31829cbc2a..3897fd14a21d 100644
--- a/arch/nds32/kernel/module.c
+++ b/arch/nds32/kernel/module.c
@@ -5,7 +5,7 @@
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/moduleloader.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
void *module_alloc(unsigned long size)
{
diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c
index f4d386b52622..6a9772ba7392 100644
--- a/arch/nds32/kernel/traps.c
+++ b/arch/nds32/kernel/traps.c
@@ -97,18 +97,19 @@ static void dump_instr(struct pt_regs *regs)
}
#define LOOP_TIMES (100)
-static void __dump(struct task_struct *tsk, unsigned long *base_reg)
+static void __dump(struct task_struct *tsk, unsigned long *base_reg,
+ const char *loglvl)
{
unsigned long ret_addr;
int cnt = LOOP_TIMES, graph = 0;
- pr_emerg("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
while (!kstack_end(base_reg)) {
ret_addr = *base_reg++;
if (__kernel_text_address(ret_addr)) {
ret_addr = ftrace_graph_ret_addr(
tsk, &graph, ret_addr, NULL);
- print_ip_sym(ret_addr);
+ print_ip_sym(loglvl, ret_addr);
}
if (--cnt < 0)
break;
@@ -124,17 +125,17 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
ret_addr = ftrace_graph_ret_addr(
tsk, &graph, ret_addr, NULL);
- print_ip_sym(ret_addr);
+ print_ip_sym(loglvl, ret_addr);
}
if (--cnt < 0)
break;
base_reg = (unsigned long *)next_fp;
}
}
- pr_emerg("\n");
+ printk("%s\n", loglvl);
}
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
unsigned long *base_reg;
@@ -151,7 +152,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
else
__asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
}
- __dump(tsk, base_reg);
+ __dump(tsk, base_reg, loglvl);
barrier();
}
diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c
index 90bcae6f8554..e16009a07971 100644
--- a/arch/nds32/kernel/vdso.c
+++ b/arch/nds32/kernel/vdso.c
@@ -130,7 +130,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_mapping_len += L1_cache_info[DCACHE].aliasing_num - 1;
#endif
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
addr = vdso_random_addr(vdso_mapping_len);
@@ -185,12 +185,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto up_fail;
}
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return 0;
up_fail:
mm->context.vdso = NULL;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c
index 254703653b6f..6eb98a7ad27d 100644
--- a/arch/nds32/mm/cacheflush.c
+++ b/arch/nds32/mm/cacheflush.c
@@ -35,9 +35,8 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page)
kunmap_atomic((void *)kaddr);
local_irq_restore(flags);
}
-EXPORT_SYMBOL(flush_icache_page);
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
unsigned long kaddr;
diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c
index f331e533edc2..8fb73f6401a0 100644
--- a/arch/nds32/mm/fault.c
+++ b/arch/nds32/mm/fault.c
@@ -11,7 +11,6 @@
#include <linux/uaccess.h>
#include <linux/perf_event.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
extern void die(const char *str, struct pt_regs *regs, long err);
@@ -127,12 +126,12 @@ void do_page_fault(unsigned long entry, unsigned long addr,
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mmap_read_trylock(mm))) {
if (!user_mode(regs) &&
!search_exception_tables(instruction_pointer(regs)))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in which
@@ -211,7 +210,7 @@ good_area:
/*
* If we need to retry but a fatal signal is pending, handle the
- * signal first. We do not need to release the mmap_sem because it
+ * signal first. We do not need to release the mmap_lock because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
if (fault_signal_pending(fault, regs)) {
@@ -248,7 +247,7 @@ good_area:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -256,7 +255,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -264,7 +263,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
@@ -324,14 +323,14 @@ no_context:
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
index 91147cca4b64..fa86f7b2f416 100644
--- a/arch/nds32/mm/init.c
+++ b/arch/nds32/mm/init.c
@@ -98,9 +98,6 @@ static pmd_t *fixmap_pmd_p;
static void __init fixedrange_init(void)
{
unsigned long vaddr;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
pmd_t *pmd;
#ifdef CONFIG_HIGHMEM
pte_t *pte;
@@ -110,10 +107,7 @@ static void __init fixedrange_init(void)
* Fixed mappings:
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
- pgd = swapper_pg_dir + pgd_index(vaddr);
- p4d = p4d_offset(pgd, vaddr);
- pud = pud_offset(p4d, vaddr);
- pmd = pmd_offset(pud, vaddr);
+ pmd = pmd_off_k(vaddr);
fixmap_pmd_p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!fixmap_pmd_p)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
@@ -126,10 +120,7 @@ static void __init fixedrange_init(void)
*/
vaddr = PKMAP_BASE;
- pgd = swapper_pg_dir + pgd_index(vaddr);
- p4d = p4d_offset(pgd, vaddr);
- pud = pud_offset(p4d, vaddr);
- pmd = pmd_offset(pud, vaddr);
+ pmd = pmd_off_k(vaddr);
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pte)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
diff --git a/arch/nds32/mm/proc.c b/arch/nds32/mm/proc.c
index 837ae7728830..848c845f5f33 100644
--- a/arch/nds32/mm/proc.c
+++ b/arch/nds32/mm/proc.c
@@ -5,7 +5,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/nds32.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/l2_cache.h>
@@ -16,14 +15,10 @@ extern struct cache_info L1_cache_info[2];
int va_kernel_present(unsigned long addr)
{
- p4d_t *p4d;
- pud_t *pud;
pmd_t *pmd;
pte_t *ptep, pte;
- p4d = p4d_offset(pgd_offset_k(addr), addr);
- pud = pud_offset(p4d, addr);
- pmd = pmd_offset(pud, addr);
+ pmd = pmd_off_k(addr);
if (!pmd_none(*pmd)) {
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 47a1a3ea5734..2600d76c310c 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -102,10 +102,6 @@ static inline void set_pmd(pmd_t *pmdptr, pmd_t pmdval)
*pmdptr = pmdval;
}
-/* to find an entry in a page-table-directory */
-#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
-
static inline int pte_write(pte_t pte) \
{ return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_dirty(pte_t pte) \
@@ -236,27 +232,17 @@ static inline void pte_clear(struct mm_struct *mm,
*/
#define mk_pte(page, prot) (pfn_pte(page_to_pfn(page), prot))
-#define pte_unmap(pte) do { } while (0)
-
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
-#define pmd_page_vaddr(pmd) pmd_val(pmd)
-
-#define pte_offset_map(dir, addr) \
- ((pte_t *) page_address(pmd_page(*dir)) + \
- (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-/* Get the address to the PTE for a vaddr in specific directory */
-#define pte_offset_kernel(dir, addr) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + \
- (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return pmd_val(pmd);
+}
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", \
@@ -285,8 +271,6 @@ static inline void pte_clear(struct mm_struct *mm,
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
extern void __init paging_init(void);
extern void __init mmu_init(void);
diff --git a/arch/nios2/kernel/module.c b/arch/nios2/kernel/module.c
index e2e3f13f98d5..76e0a42d6e36 100644
--- a/arch/nios2/kernel/module.c
+++ b/arch/nios2/kernel/module.c
@@ -19,7 +19,6 @@
#include <linux/string.h>
#include <linux/kernel.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
/*
diff --git a/arch/nios2/kernel/nios2_ksyms.c b/arch/nios2/kernel/nios2_ksyms.c
index 4e704046a150..54f7b23df1bf 100644
--- a/arch/nios2/kernel/nios2_ksyms.c
+++ b/arch/nios2/kernel/nios2_ksyms.c
@@ -8,9 +8,9 @@
#include <linux/export.h>
#include <linux/string.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
/* string functions */
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
index a42dd09c6578..d8a087cf2b42 100644
--- a/arch/nios2/kernel/signal.c
+++ b/arch/nios2/kernel/signal.c
@@ -252,6 +252,7 @@ static int do_signal(struct pt_regs *regs)
switch (retval) {
case ERESTART_RESTARTBLOCK:
restart = -2;
+ fallthrough;
case ERESTARTNOHAND:
case ERESTARTSYS:
case ERESTARTNOINTR:
diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c
index 486db793923c..b172da4eb1a9 100644
--- a/arch/nios2/kernel/traps.c
+++ b/arch/nios2/kernel/traps.c
@@ -52,12 +52,13 @@ void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr)
}
/*
- * The show_stack is an external API which we do not use ourselves.
+ * The show_stack() is external API which we do not use ourselves.
*/
int kstack_depth_to_print = 48;
-void show_stack(struct task_struct *task, unsigned long *stack)
+void show_stack(struct task_struct *task, unsigned long *stack,
+ const char *loglvl)
{
unsigned long *endstack, addr;
int i;
@@ -72,16 +73,16 @@ void show_stack(struct task_struct *task, unsigned long *stack)
addr = (unsigned long) stack;
endstack = (unsigned long *) PAGE_ALIGN(addr);
- pr_emerg("Stack from %08lx:", (unsigned long)stack);
+ printk("%sStack from %08lx:", loglvl, (unsigned long)stack);
for (i = 0; i < kstack_depth_to_print; i++) {
if (stack + 1 > endstack)
break;
if (i % 8 == 0)
- pr_emerg("\n ");
- pr_emerg(" %08lx", *stack++);
+ printk("%s\n ", loglvl);
+ printk("%s %08lx", loglvl, *stack++);
}
- pr_emerg("\nCall Trace:");
+ printk("%s\nCall Trace:", loglvl);
i = 0;
while (stack + 1 <= endstack) {
addr = *stack++;
@@ -97,11 +98,11 @@ void show_stack(struct task_struct *task, unsigned long *stack)
(addr <= (unsigned long) _etext))) {
if (i % 4 == 0)
pr_emerg("\n ");
- pr_emerg(" [<%08lx>]", addr);
+ printk("%s [<%08lx>]", loglvl, addr);
i++;
}
}
- pr_emerg("\n");
+ printk("%s\n", loglvl);
}
void __init trap_init(void)
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index 964eac1a21d0..4112ef0e247e 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -83,11 +83,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mmap_read_trylock(mm)) {
if (!user_mode(regs) && !search_exception_tables(regs->ea))
goto bad_area_nosemaphore;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
}
vma = find_vma(mm, address);
@@ -160,7 +160,7 @@ good_area:
flags |= FAULT_FLAG_TRIED;
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -169,7 +169,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -177,7 +177,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -215,14 +215,14 @@ no_context:
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index 9afca77d10b1..61862dbb0e32 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -29,7 +29,6 @@
#include <asm/setup.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
@@ -110,14 +109,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct mm_struct *mm = current->mm;
int ret;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
/* Map kuser helpers to user space address */
ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
VM_READ | VM_EXEC | VM_MAYREAD |
VM_MAYEXEC, kuser_page);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/nios2/mm/pgtable.c b/arch/nios2/mm/pgtable.c
index 61e24a25f71a..9b587fd592dd 100644
--- a/arch/nios2/mm/pgtable.c
+++ b/arch/nios2/mm/pgtable.c
@@ -10,7 +10,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
-#include <asm/pgtable.h>
#include <asm/cpuinfo.h>
/* pteaddr:
diff --git a/arch/nios2/mm/tlb.c b/arch/nios2/mm/tlb.c
index 7fea59e53f94..f90ac35f05f3 100644
--- a/arch/nios2/mm/tlb.c
+++ b/arch/nios2/mm/tlb.c
@@ -16,7 +16,6 @@
#include <asm/tlb.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/cpuinfo.h>
#define TLB_INDEX_MASK \
diff --git a/arch/openrisc/include/asm/cacheflush.h b/arch/openrisc/include/asm/cacheflush.h
index 79d5d7753fe4..eeac40d4a854 100644
--- a/arch/openrisc/include/asm/cacheflush.h
+++ b/arch/openrisc/include/asm/cacheflush.h
@@ -62,31 +62,12 @@ static inline void flush_dcache_page(struct page *page)
clear_bit(PG_dc_clean, &page->flags);
}
-/*
- * Other interfaces are not required since we do not have virtually
- * indexed or tagged caches. So we can use the default here.
- */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_icache_range(start, end) do { } while (0)
-#define flush_icache_page(vma, pg) do { } while (0)
-#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- if (vma->vm_flags & VM_EXEC) \
- sync_icache_dcache(page); \
- } while (0)
+#define flush_icache_user_page(vma, page, addr, len) \
+do { \
+ if (vma->vm_flags & VM_EXEC) \
+ sync_icache_dcache(page); \
+} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
#endif /* __ASM_CACHEFLUSH_H */
diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h
index e18f038b2a6d..db02fb2077d9 100644
--- a/arch/openrisc/include/asm/io.h
+++ b/arch/openrisc/include/asm/io.h
@@ -26,7 +26,6 @@
#define PIO_MASK 0
#include <asm-generic/io.h>
-#include <asm/pgtable.h>
void __iomem *ioremap(phys_addr_t offset, unsigned long size);
extern void iounmap(void *addr);
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 219979e57790..9425bedab4fc 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -363,38 +363,15 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
}
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-
-#define __pgd_offset(address) pgd_index(address)
-
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK));
+}
#define __pmd_offset(address) \
(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-/*
- * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- *
- * this macro returns the index of the entry in the pte page which would
- * control the given virtual address
- */
-#define __pte_offset(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(address))
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
-#define pte_offset_map_nested(dir, address) \
- pte_offset_map(dir, address)
-
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#define pte_pfn(x) ((unsigned long)(((x).pte)) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot))
@@ -438,8 +415,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
typedef pte_t *pte_addr_t;
#endif /* __ASSEMBLY__ */
diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h
index e9a7f0b35a15..4a4639c65cbb 100644
--- a/arch/openrisc/include/asm/tlbflush.h
+++ b/arch/openrisc/include/asm/tlbflush.h
@@ -17,7 +17,6 @@
#include <linux/mm.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/current.h>
#include <linux/sched.h>
diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c
index e435ae01c600..18c703d1d761 100644
--- a/arch/openrisc/kernel/asm-offsets.c
+++ b/arch/openrisc/kernel/asm-offsets.c
@@ -32,7 +32,6 @@
#include <linux/thread_info.h>
#include <linux/kbuild.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
int main(void)
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index e4a78571f883..bc657e55c15f 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -13,6 +13,7 @@
*/
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/unistd.h>
@@ -21,7 +22,6 @@
#include <asm/spr_defs.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/asm-offsets.h>
#define DISABLE_INTERRUPTS(t1,t2) \
@@ -1166,13 +1166,13 @@ ENTRY(__sys_clone)
l.movhi r29,hi(sys_clone)
l.ori r29,r29,lo(sys_clone)
l.j _fork_save_extra_regs_and_call
- l.addi r7,r1,0
+ l.nop
ENTRY(__sys_fork)
l.movhi r29,hi(sys_fork)
l.ori r29,r29,lo(sys_fork)
l.j _fork_save_extra_regs_and_call
- l.addi r3,r1,0
+ l.nop
ENTRY(sys_rt_sigreturn)
l.jal _sys_rt_sigreturn
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
index b0dc974f9a74..af355e3f4619 100644
--- a/arch/openrisc/kernel/head.S
+++ b/arch/openrisc/kernel/head.S
@@ -16,10 +16,10 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/serial_reg.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/spr_defs.h>
diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c
index 7d6a62eee2ef..277ac7a55752 100644
--- a/arch/openrisc/kernel/or32_ksyms.c
+++ b/arch/openrisc/kernel/or32_ksyms.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/vmalloc.h>
#include <linux/semaphore.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
@@ -26,7 +27,6 @@
#include <asm/hardirq.h>
#include <asm/delay.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 6bcdca424e11..d7010e72450c 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -36,7 +36,6 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/spr_defs.h>
diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c
index 6a5a91c76338..c8f47a623754 100644
--- a/arch/openrisc/kernel/ptrace.c
+++ b/arch/openrisc/kernel/ptrace.c
@@ -27,7 +27,6 @@
#include <asm/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
/*
* Copy the thread state to a regset that can be interpreted by userspace.
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index c0a774b51e45..8aa438e1f51f 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -35,7 +35,6 @@
#include <linux/device.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/io.h>
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index c11aa2e17ce0..206e5325e61b 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -31,7 +31,6 @@
#include <linux/uaccess.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/unwinder.h>
#include <asm/sections.h>
@@ -41,18 +40,20 @@ unsigned long __user *lwa_addr;
void print_trace(void *data, unsigned long addr, int reliable)
{
- pr_emerg("[<%p>] %s%pS\n", (void *) addr, reliable ? "" : "? ",
+ const char *loglvl = data;
+
+ printk("%s[<%p>] %s%pS\n", loglvl, (void *) addr, reliable ? "" : "? ",
(void *) addr);
}
/* displays a short stack trace */
-void show_stack(struct task_struct *task, unsigned long *esp)
+void show_stack(struct task_struct *task, unsigned long *esp, const char *loglvl)
{
if (esp == NULL)
esp = (unsigned long *)&esp;
- pr_emerg("Call trace:\n");
- unwind_stack(NULL, esp, print_trace);
+ printk("%sCall trace:\n", loglvl);
+ unwind_stack((void *)loglvl, esp, print_trace);
}
void show_registers(struct pt_regs *regs)
@@ -96,7 +97,7 @@ void show_registers(struct pt_regs *regs)
if (in_kernel) {
printk("\nStack: ");
- show_stack(NULL, (unsigned long *)esp);
+ show_stack(NULL, (unsigned long *)esp, KERN_EMERG);
printk("\nCode: ");
if (regs->pc < PAGE_OFFSET)
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 6e0a11ac4c00..d2224ccca294 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -104,7 +104,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
@@ -183,7 +183,7 @@ good_area:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -192,7 +192,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -201,7 +201,7 @@ good_area:
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
@@ -260,14 +260,14 @@ out_of_memory:
__asm__ __volatile__("l.nop 42");
__asm__ __volatile__("l.nop 1");
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Send a sigbus, regardless of whether we were in kernel
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index 3bcdc1c26b23..3d7c79c7745d 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -29,7 +29,6 @@
#include <linux/pagemap.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/tlb.h>
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 8f8e97f7eac9..a978590d802d 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -13,11 +13,11 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <linux/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/bug.h>
-#include <asm/pgtable.h>
#include <linux/sched.h>
#include <asm/tlbflush.h>
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
index dd4f2007f7c9..4b680aed8f5f 100644
--- a/arch/openrisc/mm/tlb.c
+++ b/arch/openrisc/mm/tlb.c
@@ -23,7 +23,6 @@
#include <linux/init.h>
#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/spr_defs.h>
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index fadbbd010337..182a5bca3e2c 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -162,7 +162,7 @@ vmlinuz: bzImage
$(OBJCOPY) $(boot)/bzImage $@
else
vmlinuz: vmlinux
- @gzip -cf -9 $< > $@
+ @$(_GZIP) -cf -9 $< > $@
endif
install:
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index cab8f64ca4a2..116effe26143 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -3,7 +3,7 @@
#define _ASM_IO_H
#include <linux/types.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#define virt_to_phys(a) ((unsigned long)__pa(a))
#define phys_to_virt(a) __va(a)
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index 697a906ab1b0..07b89c74abeb 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -6,7 +6,6 @@
#include <linux/sched.h>
#include <linux/atomic.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm-generic/mm_hooks.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index cd7df48dc874..75cf84070fc9 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -427,40 +427,16 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
-#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_address(pmd)))
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return ((unsigned long) __va(pmd_address(pmd)));
+}
#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-
-/* to find an entry in a page-table-directory */
-#define pgd_offset(mm, address) \
-((mm)->pgd + ((address) >> PGDIR_SHIFT))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
/* Find an entry in the second-level page table.. */
-#if CONFIG_PGTABLE_LEVELS == 3
-#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-#define pmd_offset(dir,address) \
-((pmd_t *) pud_page_vaddr(*(dir)) + pmd_index(address))
-#else
-#define pmd_offset(dir,addr) ((pmd_t *) dir)
-#endif
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
-#define pte_offset_kernel(pmd, address) \
- ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
-#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
-#define pte_unmap(pte) do { } while (0)
-
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-
extern void paging_init (void);
/* Used for deferring calls to flush_dcache_page() */
@@ -571,6 +547,5 @@ extern void arch_report_meminfo(struct seq_file *m);
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME
-#include <asm-generic/pgtable.h>
#endif /* _PARISC_PGTABLE_H */
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index aa79d35dedfa..305768a40773 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -20,8 +20,8 @@
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/kbuild.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/pdc.h>
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 9a03e29c8733..4b484ec7c7da 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -19,7 +19,6 @@
#include <asm/psw.h>
#include <asm/cache.h> /* for L1_CACHE_SHIFT */
#include <asm/assembly.h> /* for LDREG/STREG defines */
-#include <asm/pgtable.h>
#include <asm/signal.h>
#include <asm/unistd.h>
#include <asm/ldcw.h>
@@ -28,6 +27,7 @@
#include <asm/alternative.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#ifdef CONFIG_64BIT
.level 2.0w
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 951a339369dd..aa93d775c34d 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -17,10 +17,10 @@
#include <asm/pdc.h>
#include <asm/assembly.h>
-#include <asm/pgtable.h>
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
.level PA_ASM_LEVEL
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index fac18c623d16..7df140545b22 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -50,7 +50,6 @@
#include <linux/mm.h>
#include <linux/slab.h>
-#include <asm/pgtable.h>
#include <asm/unwind.h>
#include <asm/sections.h>
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index fa092ed1e837..b2ba6d633065 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -21,12 +21,12 @@
#include <asm/psw.h>
#include <asm/assembly.h>
-#include <asm/pgtable.h>
#include <asm/cache.h>
#include <asm/ldcw.h>
#include <asm/alternative.h>
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
.section .text.hot
.align 16
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 0f1b460ee715..70cd24bdcfec 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -201,7 +201,7 @@ static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
pgd_clear(dir);
return;
}
- pmd = pmd_offset(dir, vaddr);
+ pmd = pmd_offset(pud_offset(p4d_offset(dir, vaddr), vaddr), vaddr);
vaddr &= ~PGDIR_MASK;
end = vaddr + size;
if (end > PGDIR_SIZE)
diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c
index 749c4579db0d..6e8550fefad6 100644
--- a/arch/parisc/kernel/pdt.c
+++ b/arch/parisc/kernel/pdt.c
@@ -17,11 +17,11 @@
#include <linux/seq_file.h>
#include <linux/kthread.h>
#include <linux/initrd.h>
+#include <linux/pgtable.h>
#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
enum pdt_access_type {
PDT_NONE,
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index f8c07dcbfb49..b51418ad8655 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -26,7 +26,6 @@
#include <linux/audit.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/asm-offsets.h>
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index e202c37e56af..f8a842ddd82d 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -39,7 +39,6 @@
#include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
#include <asm/mmu_context.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 82fc01189488..5400e23a77a1 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -49,7 +49,7 @@
#include "../math-emu/math-emu.h" /* for handle_fpe() */
static void parisc_show_stack(struct task_struct *task,
- struct pt_regs *regs);
+ struct pt_regs *regs, const char *loglvl);
static int printbinary(char *buf, unsigned long x, int nbits)
{
@@ -155,7 +155,7 @@ void show_regs(struct pt_regs *regs)
printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
- parisc_show_stack(current, regs);
+ parisc_show_stack(current, regs, KERN_DEFAULT);
}
}
@@ -170,37 +170,37 @@ static DEFINE_RATELIMIT_STATE(_hppa_rs,
}
-static void do_show_stack(struct unwind_frame_info *info)
+static void do_show_stack(struct unwind_frame_info *info, const char *loglvl)
{
int i = 1;
- printk(KERN_CRIT "Backtrace:\n");
+ printk("%sBacktrace:\n", loglvl);
while (i <= MAX_UNWIND_ENTRIES) {
if (unwind_once(info) < 0 || info->ip == 0)
break;
if (__kernel_text_address(info->ip)) {
- printk(KERN_CRIT " [<" RFMT ">] %pS\n",
- info->ip, (void *) info->ip);
+ printk("%s [<" RFMT ">] %pS\n",
+ loglvl, info->ip, (void *) info->ip);
i++;
}
}
- printk(KERN_CRIT "\n");
+ printk("%s\n", loglvl);
}
static void parisc_show_stack(struct task_struct *task,
- struct pt_regs *regs)
+ struct pt_regs *regs, const char *loglvl)
{
struct unwind_frame_info info;
unwind_frame_init_task(&info, task, regs);
- do_show_stack(&info);
+ do_show_stack(&info, loglvl);
}
-void show_stack(struct task_struct *t, unsigned long *sp)
+void show_stack(struct task_struct *t, unsigned long *sp, const char *loglvl)
{
- parisc_show_stack(t, NULL);
+ parisc_show_stack(t, NULL, loglvl);
}
int is_valid_bugaddr(unsigned long iaoq)
@@ -446,7 +446,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
struct unwind_frame_info info;
unwind_frame_init(&info, current, regs);
- do_show_stack(&info);
+ do_show_stack(&info, KERN_CRIT);
}
printk("\n");
@@ -717,7 +717,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
if (user_mode(regs)) {
struct vm_area_struct *vma;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm,regs->iaoq[0]);
if (vma && (regs->iaoq[0] >= vma->vm_start)
&& (vma->vm_flags & VM_EXEC)) {
@@ -725,10 +725,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
fault_address = regs->iaoq[0];
fault_space = regs->iasq[0];
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
break; /* call do_page_fault() */
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
}
/* Fall Through */
case 27:
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index beceaab34ecb..94a9fe2702c2 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -57,14 +57,10 @@ void * memcpy(void * dst,const void *src, size_t count)
EXPORT_SYMBOL(raw_copy_in_user);
EXPORT_SYMBOL(memcpy);
-long probe_kernel_read(void *dst, const void *src, size_t size)
+bool probe_kernel_read_allowed(const void *unsafe_src, size_t size)
{
- unsigned long addr = (unsigned long)src;
-
- if (addr < PAGE_SIZE)
- return -EFAULT;
-
+ if ((unsigned long)unsafe_src < PAGE_SIZE)
+ return false;
/* check for I/O space F_EXTEND(0xfff00000) access as well? */
-
- return __probe_kernel_read(dst, src, size);
+ return true;
}
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 86e8c848f3d7..66ac0719bd49 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -282,7 +282,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
if (acc_type & VM_WRITE)
flags |= FAULT_FLAG_WRITE;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma_prev(mm, address, &prev_vma);
if (!vma || address < vma->vm_start)
goto check_expansion;
@@ -329,7 +329,7 @@ good_area:
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -337,7 +337,7 @@ good_area:
goto retry;
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
check_expansion:
@@ -349,7 +349,7 @@ check_expansion:
* Something tried to access memory that isn't in our memory map..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (user_mode(regs)) {
int signo, si_code;
@@ -421,7 +421,7 @@ no_context:
parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
diff --git a/arch/parisc/mm/fixmap.c b/arch/parisc/mm/fixmap.c
index e2d8b0a857ee..24426a7e1a5e 100644
--- a/arch/parisc/mm/fixmap.c
+++ b/arch/parisc/mm/fixmap.c
@@ -33,11 +33,7 @@ void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys)
void notrace clear_fixmap(enum fixed_addresses idx)
{
unsigned long vaddr = __fix_to_virt(idx);
- pgd_t *pgd = pgd_offset_k(vaddr);
- p4d_t *p4d = p4d_offset(pgd, vaddr);
- pud_t *pud = pud_offset(p4d, vaddr);
- pmd_t *pmd = pmd_offset(pud, vaddr);
- pte_t *pte = pte_offset_kernel(pmd, vaddr);
+ pte_t *pte = virt_to_kpte(vaddr);
if (WARN_ON(pte_none(*pte)))
return;
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index cdd760d39e7c..48d628a1a0af 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -26,7 +26,6 @@
#include <linux/compat.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/pdc_chassis.h>
#include <asm/mmzone.h>
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index d7978a5a79c3..224912432821 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -112,6 +112,9 @@ static inline bool pte_user(pte_t pte)
#define PMD_TABLE_SIZE 0
#define PUD_TABLE_SIZE 0
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
#endif /* __ASSEMBLY__ */
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
@@ -332,26 +335,9 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
-#define pmd_page_vaddr(pmd) \
- ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_page(pmd) \
pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* to find an entry in a page-table-directory */
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, addr) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
-#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
-static inline void pte_unmap(pte_t *pte) { }
-
/*
* Encode and decode a swap entry.
* Note that the bits we use in a PTE for representing a swap entry
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index f17442c3a092..25c3cb8272c0 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1005,52 +1005,9 @@ extern struct page *p4d_page(p4d_t p4d);
/* Pointers in the page table tree are physical addresses */
#define __pgtable_ptr_val(ptr) __pa(ptr)
-#define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS)
#define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS)
#define p4d_page_vaddr(p4d) __va(p4d_val(p4d) & ~P4D_MASKED_BITS)
-static inline unsigned long pgd_index(unsigned long address)
-{
- return (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1);
-}
-
-static inline unsigned long pud_index(unsigned long address)
-{
- return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
-}
-
-static inline unsigned long pmd_index(unsigned long address)
-{
- return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
-}
-
-static inline unsigned long pte_index(unsigned long address)
-{
- return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
-}
-
-/*
- * Find an entry in a page-table-directory. We combine the address region
- * (the high order N bits) and the pgd portion of the address.
- */
-
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-#define pud_offset(p4dp, addr) \
- (((pud_t *) p4d_page_vaddr(*(p4dp))) + pud_index(addr))
-#define pmd_offset(pudp,addr) \
- (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
-#define pte_offset_kernel(dir,addr) \
- (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-
-static inline void pte_unmap(pte_t *pte) { }
-
-/* to find an entry in a kernel page-table-directory */
-/* This now only contains the vmalloc pages */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index e92191b390f3..de600b915a3c 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -4,23 +4,9 @@
#ifndef _ASM_POWERPC_CACHEFLUSH_H
#define _ASM_POWERPC_CACHEFLUSH_H
-#ifdef __KERNEL__
-
#include <linux/mm.h>
#include <asm/cputable.h>
-/*
- * No cache flushing is required when address mappings are changed,
- * because the caches on PowerPCs are physically addressed.
- */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define flush_icache_page(vma, page) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
#ifdef CONFIG_PPC_BOOK3S_64
/*
* Book3s has no ptesync after setting a pte, so without this ptesync it's
@@ -33,20 +19,20 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
asm volatile("ptesync" ::: "memory");
}
-#else
-static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
-#endif
+#define flush_cache_vmap flush_cache_vmap
+#endif /* CONFIG_PPC_BOOK3S_64 */
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *page);
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
void flush_icache_range(unsigned long start, unsigned long stop);
-extern void flush_icache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long addr,
- int len);
-extern void flush_dcache_icache_page(struct page *page);
+#define flush_icache_range flush_icache_range
+
+void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len);
+#define flush_icache_user_page flush_icache_user_page
+
+void flush_dcache_icache_page(struct page *page);
void __flush_dcache_icache(void *page);
/**
@@ -111,14 +97,6 @@ static inline void invalidate_dcache_range(unsigned long start,
mb(); /* sync */
}
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
- } while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
-#endif /* __KERNEL__ */
+#include <asm-generic/cacheflush.h>
#endif /* _ASM_POWERPC_CACHEFLUSH_H */
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index ccbe2e83c950..29188810ba30 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -16,8 +16,8 @@
#ifndef __ASSEMBLY__
#include <linux/sizes.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 13f90dd03450..58635960403c 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -34,7 +34,6 @@ extern struct pci_dev *isa_bridge_pcidev;
#include <asm/mmiowb.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
-#include <asm/pgtable.h>
#define SIO_CONFIG_RA 0x398
#define SIO_CONFIG_RD 0x399
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index c745ee41ad66..1d0f7d838b2e 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -39,7 +39,7 @@
#else /* !__ASSEMBLY__ */
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
void setup_kup(void);
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 8dd24c7692a0..d32ec9ae73bd 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -155,12 +155,11 @@ extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
-extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
- struct kvm_vcpu *vcpu, unsigned long addr,
- unsigned long status);
+extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
+ unsigned long addr, unsigned long status);
extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
unsigned long slb_v, unsigned long valid);
-extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
unsigned long gpa, gva_t ea, int is_store);
extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
@@ -174,8 +173,7 @@ extern void kvmppc_mmu_hpte_sysexit(void);
extern int kvmppc_mmu_hv_init(void);
extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
-extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
- struct kvm_vcpu *vcpu,
+extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr);
extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
gva_t eaddr, void *to, void *from,
@@ -234,7 +232,7 @@ extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
-extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
bool writing, bool *writable);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
@@ -300,12 +298,12 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
-int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
+int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
u64 time_limit, unsigned long lpcr);
void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
struct hv_guest_state *hr);
-long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu);
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 337047ba4a56..7e2d061d0445 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -795,7 +795,6 @@ struct kvm_vcpu_arch {
struct mmio_hpte_cache_entry *pgfault_cache;
struct task_struct *run_task;
- struct kvm_run *kvm_run;
spinlock_t vpa_update_lock;
struct kvmppc_vpa vpa;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 94f5a32acaf1..ccf66b3a4c1d 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -58,28 +58,28 @@ enum xlate_readwrite {
XLATE_WRITE /* check for write permissions */
};
-extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
-extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
+extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern void kvmppc_handler_highmem(void);
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
-extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian);
-extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian);
-extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend);
-extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, int is_default_endian);
-extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
unsigned int rs, unsigned int bytes, int is_default_endian);
-extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes,
int is_default_endian);
-extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
int rs, unsigned int bytes,
int is_default_endian);
@@ -90,10 +90,9 @@ extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data);
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data);
-extern int kvmppc_emulate_instruction(struct kvm_run *run,
- struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
-extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
@@ -267,7 +266,7 @@ struct kvmppc_ops {
void (*vcpu_put)(struct kvm_vcpu *vcpu);
void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
- int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ int (*vcpu_run)(struct kvm_vcpu *vcpu);
int (*vcpu_create)(struct kvm_vcpu *vcpu);
void (*vcpu_free)(struct kvm_vcpu *vcpu);
int (*check_requests)(struct kvm_vcpu *vcpu);
@@ -291,7 +290,7 @@ struct kvmppc_ops {
int (*init_vm)(struct kvm *kvm);
void (*destroy_vm)(struct kvm *kvm);
int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
- int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ int (*emulate_op)(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index af7f13cf90cf..b56f14160ae5 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -28,6 +28,8 @@ extern int icache_44x_need_flush;
#define PMD_TABLE_SIZE 0
#define PUD_TABLE_SIZE 0
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+
+#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
#endif /* __ASSEMBLY__ */
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
@@ -203,10 +205,6 @@ static inline void pmd_clear(pmd_t *pmdp)
*pmdp = __pmd(0);
}
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
/* to find an entry in a page-table-directory */
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
@@ -330,8 +328,6 @@ static inline int pte_young(pte_t pte)
* of the pte page. -- paulus
*/
#ifndef CONFIG_BOOKE
-#define pmd_page_vaddr(pmd) \
- ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_page(pmd) \
pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
#else
@@ -341,15 +337,6 @@ static inline int pte_young(pte_t pte)
pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
#endif
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, addr) \
- (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
- pte_index(addr))
-#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
-static inline void pte_unmap(pte_t *pte) { }
-
/*
* Encode and decode a swap entry.
* Note that the bits we use in a PTE for representing a swap entry
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
index 81b1c54e3cf1..fe2f4c9acd9e 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
@@ -78,10 +78,6 @@ extern struct page *p4d_page(p4d_t p4d);
#endif /* !__ASSEMBLY__ */
-#define pud_offset(p4dp, addr) \
- (((pud_t *) p4d_page_vaddr(*(p4dp))) + \
- (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
-
#define pud_ERROR(e) \
pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 3424381b81da..6cb8aa357191 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -182,28 +182,6 @@ static inline void p4d_set(p4d_t *p4dp, unsigned long val)
*p4dp = __p4d(val);
}
-/*
- * Find an entry in a page-table-directory. We combine the address region
- * (the high order N bits) and the pgd portion of the address.
- */
-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
-
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-#define pmd_offset(pudp,addr) \
- (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
-
-#define pte_offset_kernel(dir,addr) \
- (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-
-static inline void pte_unmap(pte_t *pte) { }
-
-/* to find an entry in a kernel page-table-directory */
-/* This now only contains the vmalloc pages */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long addr,
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 50a4b0bb8d16..4b7c3472eab1 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -56,7 +56,7 @@ static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
#ifdef CONFIG_NUMA_BALANCING
/*
* These work without NUMA balancing but the kernel does not care. See the
- * comment in include/asm-generic/pgtable.h . On powerpc, this will only
+ * comment in include/linux/pgtable.h . On powerpc, this will only
* work for user pages and always return true for kernel pages.
*/
static inline int pte_protnone(pte_t pte)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index ae58b524a924..f7613f43c9cf 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -41,25 +41,6 @@ struct mm_struct;
#ifndef __ASSEMBLY__
-#ifdef CONFIG_PPC32
-static inline pmd_t *pmd_ptr(struct mm_struct *mm, unsigned long va)
-{
- return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
-}
-
-static inline pmd_t *pmd_ptr_k(unsigned long va)
-{
- return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
-}
-
-static inline pte_t *virt_to_kpte(unsigned long vaddr)
-{
- pmd_t *pmd = pmd_ptr_k(vaddr);
-
- return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
-}
-#endif
-
#include <asm/tlbflush.h>
/* Keep these as a macros to avoid include dependency mess */
@@ -76,6 +57,13 @@ static inline pgprot_t pte_pgprot(pte_t pte)
return __pgprot(pte_flags);
}
+#ifndef pmd_page_vaddr
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return ((unsigned long)__va(pmd_val(pmd) & ~PMD_MASKED_BITS));
+}
+#define pmd_page_vaddr pmd_page_vaddr
+#endif
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
@@ -96,8 +84,6 @@ extern unsigned long ioremap_bot;
*/
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_large(pmd) 0
#endif
diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h
index 20ebf153c871..2fe6cae14d10 100644
--- a/arch/powerpc/include/asm/pkeys.h
+++ b/arch/powerpc/include/asm/pkeys.h
@@ -101,7 +101,7 @@ static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
/*
* Returns a positive, 5-bit key on success, or -1 on failure.
- * Relies on the mmap_sem to protect against concurrency in mm_pkey_alloc() and
+ * Relies on the mmap_lock to protect against concurrency in mm_pkey_alloc() and
* mm_pkey_free().
*/
static inline int mm_pkey_alloc(struct mm_struct *mm)
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 7f3a8b902325..862985cf5180 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -10,7 +10,7 @@
#ifdef __KERNEL__
#ifndef __powerpc64__
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#endif
#include <asm/pgalloc.h>
#ifndef __powerpc64__
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9b9cde07e396..6657dc6b2336 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -30,7 +30,6 @@
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index f57712a55815..02300edc6989 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -9,13 +9,13 @@
#include <linux/init.h>
#include <linux/export.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/btext.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 1dfccf58fbb1..cac22cb97a8c 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -12,7 +12,6 @@
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index e2459550a3bf..705c042309d8 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -17,10 +17,10 @@
*/
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index a22a8209971b..926bfa73586a 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -26,10 +26,10 @@
*/
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 51dd01a27314..8e36718f3167 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -25,10 +25,10 @@
*/
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index abb71fad7d6a..9f359d3fba74 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -16,12 +16,12 @@
#include <linux/init.h>
#include <linux/magic.h>
+#include <linux/pgtable.h>
#include <linux/sizes.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/cache.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 840af004041e..586a6ac501e9 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -28,10 +28,10 @@
#include <linux/init.h>
#include <linux/threads.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index 0276bc8c8969..51bbaae94ccc 100644
--- a/arch/powerpc/kernel/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -10,10 +10,10 @@
#include <linux/kernel.h>
#include <linux/sched/mm.h> /* for init_mm */
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/ppc-pci.h>
#include <asm/io-workarounds.h>
#include <asm/pte-walk.h>
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 112d150354b2..05b1cc0e009e 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -51,10 +51,10 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
#include <linux/uaccess.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/cache.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index c32af49a5138..c3b522bff9b4 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -12,10 +12,10 @@
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/extable.h>
+#include <linux/pgtable.h>
#include <asm/mmu.h>
#include <asm/mce.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/pte-walk.h>
#include <asm/sstep.h>
#include <asm/exception-64s.h>
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 8d96169c597e..2168372b792d 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -8,11 +8,11 @@
#include <linux/memblock.h>
#include <linux/sched/task.h>
#include <linux/numa.h>
+#include <linux/pgtable.h>
#include <asm/lppaca.h>
#include <asm/paca.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/kexec.h>
#include <asm/svm.h>
#include <asm/ultravisor.h>
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 048d64c4e115..7bb7faf84490 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -41,7 +41,6 @@
#include <linux/pkeys.h>
#include <linux/seq_buf.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu.h>
@@ -1456,7 +1455,7 @@ void show_regs(struct pt_regs * regs)
printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
#endif
- show_stack(current, (unsigned long *) regs->gpr[1]);
+ show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
if (!user_mode(regs))
show_instructions(regs);
}
@@ -2063,7 +2062,8 @@ unsigned long get_wchan(struct task_struct *p)
static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
-void show_stack(struct task_struct *tsk, unsigned long *stack)
+void show_stack(struct task_struct *tsk, unsigned long *stack,
+ const char *loglvl)
{
unsigned long sp, ip, lr, newsp;
int count = 0;
@@ -2088,7 +2088,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
}
lr = 0;
- printk("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
do {
if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
break;
@@ -2097,7 +2097,8 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
newsp = stack[0];
ip = stack[STACK_FRAME_LR_SAVE];
if (!firstframe || ip != lr) {
- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
+ printk("%s["REG"] ["REG"] %pS",
+ loglvl, sp, ip, (void *)ip);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ret_addr = ftrace_graph_ret_addr(current,
&ftrace_idx, ip, stack);
@@ -2119,8 +2120,9 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
struct pt_regs *regs = (struct pt_regs *)
(sp + STACK_FRAME_OVERHEAD);
lr = regs->link;
- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
- regs->trap, (void *)regs->nip, (void *)lr);
+ printk("%s--- interrupt: %lx at %pS\n LR = %pS\n",
+ loglvl, regs->trap,
+ (void *)regs->nip, (void *)lr);
firstframe = 1;
}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 6a3bac357e24..9cc49f265c86 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -30,6 +30,7 @@
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <linux/cpu.h>
+#include <linux/pgtable.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -41,7 +42,6 @@
#include <asm/smp.h>
#include <asm/mmu.h>
#include <asm/paca.h>
-#include <asm/pgtable.h>
#include <asm/powernv.h>
#include <asm/iommu.h>
#include <asm/btext.h>
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 5f15b10eb007..90c604d00b7d 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/bitops.h>
+#include <linux/pgtable.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/page.h>
@@ -34,7 +35,6 @@
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/iommu.h>
#include <asm/btext.h>
#include <asm/sections.h>
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index ae5e43eaca48..781c1869902e 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -13,9 +13,9 @@
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index c376a0588039..9d3faac53295 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -31,13 +31,13 @@
#include <linux/memblock.h>
#include <linux/of_platform.h>
#include <linux/hugetlb.h>
+#include <linux/pgtable.h>
#include <asm/debugfs.h>
#include <asm/io.h>
#include <asm/paca.h>
#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/vdso_datapage.h>
-#include <asm/pgtable.h>
#include <asm/smp.h>
#include <asm/elf.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index d642e42eabb1..1823706ae076 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -19,11 +19,11 @@
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/nvram.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/elf.h>
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index bb47555d48a2..0ba1ed77dc68 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -30,13 +30,13 @@
#include <linux/lockdep.h>
#include <linux/memory.h>
#include <linux/nmi.h>
+#include <linux/pgtable.h>
#include <asm/debugfs.h>
#include <asm/io.h>
#include <asm/kdump.h>
#include <asm/prom.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/smp.h>
#include <asm/elf.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index ae3da7440b2f..1415c16ab628 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -47,7 +47,6 @@
#include <asm/unistd.h>
#else
#include <asm/ucontext.h>
-#include <asm/pgtable.h>
#endif
#include "signal.h"
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 77061915897f..55e5f76554da 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -25,7 +25,6 @@
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#include <asm/syscalls.h>
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index c820c95162ff..73199470c265 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -33,6 +33,7 @@
#include <linux/processor.h>
#include <linux/random.h>
#include <linux/stackprotector.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
@@ -41,7 +42,6 @@
#include <asm/kvm_ppc.h>
#include <asm/dbell.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/time.h>
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index c477b8585a29..b6440657ef92 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -260,7 +260,7 @@ static void raise_backtrace_ipi(cpumask_t *mask)
pr_cont(" current pointer corrupt? (%px)\n", p->__current);
pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
- show_stack(p->__current, (unsigned long *)p->saved_r1);
+ show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
}
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 067e501f2202..97413a385720 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -39,7 +39,6 @@
#include <linux/kmsg_dump.h>
#include <asm/emulated_ops.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/debugfs.h>
#include <asm/io.h>
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index f38f26e844b6..e0f4ba45b6cc 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -18,7 +18,6 @@
#include <linux/security.h>
#include <linux/memblock.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
@@ -171,7 +170,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* and end up putting it elsewhere.
* Add enough to the size so that the result can be aligned.
*/
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
vdso_base = get_unmapped_area(NULL, vdso_base,
(vdso_pages << PAGE_SHIFT) +
@@ -211,11 +210,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto fail_mmapsem;
}
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return 0;
fail_mmapsem:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return rc;
}
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 37508a356f28..41fedec69ac3 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -755,9 +755,9 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
}
EXPORT_SYMBOL_GPL(kvmppc_set_msr);
-int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
{
- return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
+ return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu);
}
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h
index eae259ee49af..9b6323ec8e60 100644
--- a/arch/powerpc/kvm/book3s.h
+++ b/arch/powerpc/kvm/book3s.h
@@ -18,7 +18,7 @@ extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
-extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
int sprn, ulong spr_val);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 18aed9775a3c..7c5a1812a1c3 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -412,7 +412,7 @@ static int instruction_is_store(unsigned int instr)
return (instr & mask) != 0;
}
-int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
unsigned long gpa, gva_t ea, int is_store)
{
u32 last_inst;
@@ -472,10 +472,10 @@ int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.paddr_accessed = gpa;
vcpu->arch.vaddr_accessed = ea;
- return kvmppc_emulate_mmio(run, vcpu);
+ return kvmppc_emulate_mmio(vcpu);
}
-int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr)
{
struct kvm *kvm = vcpu->kvm;
@@ -498,7 +498,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pte_t pte, *ptep;
if (kvm_is_radix(kvm))
- return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr);
+ return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr);
/*
* Real-mode code has already searched the HPT and found the
@@ -518,7 +518,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
gpa_base = r & HPTE_R_RPN & ~(psize - 1);
gfn_base = gpa_base >> PAGE_SHIFT;
gpa = gpa_base | (ea & (psize - 1));
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
+ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea,
dsisr & DSISR_ISSTORE);
}
}
@@ -554,7 +554,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* No memslot means it's an emulated MMIO region */
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
+ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea,
dsisr & DSISR_ISSTORE);
/*
@@ -581,7 +581,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
* We always ask for write permission since the common case
* is that the page is writable.
*/
- if (__get_user_pages_fast(hva, 1, 1, &page) == 1) {
+ if (get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
write_ok = true;
} else {
/* Call KVM generic code to do the slow-path check */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 02219e28b1e4..3cb0c9843d01 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -11,12 +11,12 @@
#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <linux/debugfs.h>
+#include <linux/pgtable.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/pte-walk.h>
#include <asm/ultravisor.h>
@@ -353,7 +353,13 @@ static struct kmem_cache *kvm_pmd_cache;
static pte_t *kvmppc_pte_alloc(void)
{
- return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
+ pte_t *pte;
+
+ pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
+ /* pmd_populate() will only reference _pa(pte). */
+ kmemleak_ignore(pte);
+
+ return pte;
}
static void kvmppc_pte_free(pte_t *ptep)
@@ -363,7 +369,13 @@ static void kvmppc_pte_free(pte_t *ptep)
static pmd_t *kvmppc_pmd_alloc(void)
{
- return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
+ pmd_t *pmd;
+
+ pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
+ /* pud_populate() will only reference _pa(pmd). */
+ kmemleak_ignore(pmd);
+
+ return pmd;
}
static void kvmppc_pmd_free(pmd_t *pmdp)
@@ -417,9 +429,13 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
* Callers are responsible for flushing the PWC.
*
* When page tables are being unmapped/freed as part of page fault path
- * (full == false), ptes are not expected. There is code to unmap them
- * and emit a warning if encountered, but there may already be data
- * corruption due to the unexpected mappings.
+ * (full == false), valid ptes are generally not expected; however, there
+ * is one situation where they arise, which is when dirty page logging is
+ * turned off for a memslot while the VM is running. The new memslot
+ * becomes visible to page faults before the memslot commit function
+ * gets to flush the memslot, which can lead to a 2MB page mapping being
+ * installed for a guest physical address where there are already 64kB
+ * (or 4kB) mappings (of sub-pages of the same 2MB page).
*/
static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
unsigned int lpid)
@@ -433,7 +449,6 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
if (pte_val(*p) == 0)
continue;
- WARN_ON_ONCE(1);
kvmppc_unmap_pte(kvm, p,
pte_pfn(*p) << PAGE_SHIFT,
PAGE_SHIFT, NULL, lpid);
@@ -795,7 +810,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
* is that the page is writable.
*/
hva = gfn_to_hva_memslot(memslot, gfn);
- if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
+ if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
upgrade_write = true;
} else {
unsigned long pfn;
@@ -891,7 +906,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
return ret;
}
-int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr)
{
struct kvm *kvm = vcpu->kvm;
@@ -937,7 +952,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
return RESUME_GUEST;
}
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
+ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
}
if (memslot->flags & KVM_MEM_READONLY) {
@@ -1142,6 +1157,11 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
kvm->arch.lpid);
gpa += PAGE_SIZE;
}
+ /*
+ * Increase the mmu notifier sequence number to prevent any page
+ * fault that read the memslot earlier from writing a PTE.
+ */
+ kvm->mmu_notifier_seq++;
spin_unlock(&kvm->mmu_lock);
}
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 50555ad1db93..1a529df0ab44 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -73,6 +73,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
struct iommu_table_group *table_group = NULL;
+ rcu_read_lock();
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
table_group = iommu_group_get_iommudata(grp);
@@ -87,7 +88,9 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
}
}
+ cond_resched_rcu();
}
+ rcu_read_unlock();
}
extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
@@ -105,12 +108,14 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if (!f.file)
return -EBADF;
+ rcu_read_lock();
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt == f.file->private_data) {
found = true;
break;
}
}
+ rcu_read_unlock();
fdput(f);
@@ -143,6 +148,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if (!tbl)
return -EINVAL;
+ rcu_read_lock();
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
if (tbl != stit->tbl)
continue;
@@ -150,14 +156,17 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if (!kref_get_unless_zero(&stit->kref)) {
/* stit is being destroyed */
iommu_tce_table_put(tbl);
+ rcu_read_unlock();
return -ENOTTY;
}
/*
* The table is already known to this KVM, we just increased
* its KVM reference counter and can return.
*/
+ rcu_read_unlock();
return 0;
}
+ rcu_read_unlock();
stit = kzalloc(sizeof(*stit), GFP_KERNEL);
if (!stit) {
@@ -365,18 +374,19 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
return H_TOO_HARD;
+ rcu_read_lock();
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
unsigned long hpa = 0;
struct mm_iommu_table_group_mem_t *mem;
long shift = stit->tbl->it_page_shift;
mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
- if (!mem)
- return H_TOO_HARD;
-
- if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
+ if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
+ rcu_read_unlock();
return H_TOO_HARD;
+ }
}
+ rcu_read_unlock();
return H_SUCCESS;
}
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index dad71d276b91..0effd48c8f4d 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -235,7 +235,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
#endif
-int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
@@ -371,13 +371,13 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
break;
- run->papr_hcall.nr = cmd;
+ vcpu->run->papr_hcall.nr = cmd;
for (i = 0; i < 9; ++i) {
ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
- run->papr_hcall.args[i] = gpr;
+ vcpu->run->papr_hcall.args[i] = gpr;
}
- run->exit_reason = KVM_EXIT_PAPR_HCALL;
+ vcpu->run->exit_reason = KVM_EXIT_PAPR_HCALL;
vcpu->arch.hcall_needed = 1;
emulated = EMULATE_EXIT_USER;
break;
@@ -629,7 +629,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
if (emulated == EMULATE_FAIL)
- emulated = kvmppc_emulate_paired_single(run, vcpu);
+ emulated = kvmppc_emulate_paired_single(vcpu);
return emulated;
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index a07e12ed9f5a..6bf66649ab92 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1094,9 +1094,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
ret = kvmppc_h_svm_init_done(vcpu->kvm);
break;
case H_SVM_INIT_ABORT:
- ret = H_UNSUPPORTED;
- if (kvmppc_get_srr1(vcpu) & MSR_S)
- ret = kvmppc_h_svm_init_abort(vcpu->kvm);
+ /*
+ * Even if that call is made by the Ultravisor, the SSR1 value
+ * is the guest context one, with the secure bit clear as it has
+ * not yet been secured. So we can't check it here.
+ * Instead the kvm->arch.secure_guest flag is checked inside
+ * kvmppc_h_svm_init_abort().
+ */
+ ret = kvmppc_h_svm_init_abort(vcpu->kvm);
break;
default:
@@ -1151,8 +1156,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
return kvmppc_hcall_impl_hv_realmode(cmd);
}
-static int kvmppc_emulate_debug_inst(struct kvm_run *run,
- struct kvm_vcpu *vcpu)
+static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
{
u32 last_inst;
@@ -1166,8 +1170,8 @@ static int kvmppc_emulate_debug_inst(struct kvm_run *run,
}
if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
- run->exit_reason = KVM_EXIT_DEBUG;
- run->debug.arch.address = kvmppc_get_pc(vcpu);
+ vcpu->run->exit_reason = KVM_EXIT_DEBUG;
+ vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
return RESUME_HOST;
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
@@ -1268,9 +1272,10 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
-static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
+ struct kvm_run *run = vcpu->run;
int r = RESUME_HOST;
vcpu->stat.sum_exits++;
@@ -1405,7 +1410,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
swab32(vcpu->arch.emul_inst) :
vcpu->arch.emul_inst;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
- r = kvmppc_emulate_debug_inst(run, vcpu);
+ r = kvmppc_emulate_debug_inst(vcpu);
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
@@ -1457,7 +1462,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
return r;
}
-static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
{
int r;
int srcu_idx;
@@ -1515,7 +1520,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
*/
case BOOK3S_INTERRUPT_H_DATA_STORAGE:
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvmhv_nested_page_fault(run, vcpu);
+ r = kvmhv_nested_page_fault(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
break;
case BOOK3S_INTERRUPT_H_INST_STORAGE:
@@ -1525,7 +1530,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvmhv_nested_page_fault(run, vcpu);
+ r = kvmhv_nested_page_fault(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
break;
@@ -2929,7 +2934,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
ret = RESUME_GUEST;
if (vcpu->arch.trap)
- ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
+ ret = kvmppc_handle_exit_hv(vcpu,
vcpu->arch.run_task);
vcpu->arch.ret = ret;
@@ -3894,15 +3899,16 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
return r;
}
-static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
int n_ceded, i, r;
struct kvmppc_vcore *vc;
struct kvm_vcpu *v;
trace_kvmppc_run_vcpu_enter(vcpu);
- kvm_run->exit_reason = 0;
+ run->exit_reason = 0;
vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0;
kvmppc_update_vpas(vcpu);
@@ -3914,7 +3920,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
spin_lock(&vc->lock);
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
- vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
@@ -3947,8 +3952,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
r = kvmhv_setup_mmu(vcpu);
spin_lock(&vc->lock);
if (r) {
- kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
- kvm_run->fail_entry.
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ run->fail_entry.
hardware_entry_failure_reason = 0;
vcpu->arch.ret = r;
break;
@@ -3967,7 +3972,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (signal_pending(v->arch.run_task)) {
kvmppc_remove_runnable(vc, v);
v->stat.signal_exits++;
- v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
+ v->run->exit_reason = KVM_EXIT_INTR;
v->arch.ret = -EINTR;
wake_up(&v->arch.cpu_run);
}
@@ -4008,7 +4013,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
kvmppc_remove_runnable(vc, vcpu);
vcpu->stat.signal_exits++;
- kvm_run->exit_reason = KVM_EXIT_INTR;
+ run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
}
@@ -4019,15 +4024,15 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
wake_up(&v->arch.cpu_run);
}
- trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
+ trace_kvmppc_run_vcpu_exit(vcpu);
spin_unlock(&vc->lock);
return vcpu->arch.ret;
}
-int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
- struct kvm_vcpu *vcpu, u64 time_limit,
+int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr)
{
+ struct kvm_run *run = vcpu->run;
int trap, r, pcpu;
int srcu_idx, lpid;
struct kvmppc_vcore *vc;
@@ -4036,14 +4041,13 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
trace_kvmppc_run_vcpu_enter(vcpu);
- kvm_run->exit_reason = 0;
+ run->exit_reason = 0;
vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0;
vc = vcpu->arch.vcore;
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
- vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
@@ -4161,9 +4165,9 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
r = RESUME_GUEST;
if (trap) {
if (!nested)
- r = kvmppc_handle_exit_hv(kvm_run, vcpu, current);
+ r = kvmppc_handle_exit_hv(vcpu, current);
else
- r = kvmppc_handle_nested_exit(kvm_run, vcpu);
+ r = kvmppc_handle_nested_exit(vcpu);
}
vcpu->arch.ret = r;
@@ -4173,7 +4177,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) {
if (signal_pending(current)) {
vcpu->stat.signal_exits++;
- kvm_run->exit_reason = KVM_EXIT_INTR;
+ run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
break;
}
@@ -4189,13 +4193,13 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
done:
kvmppc_remove_runnable(vc, vcpu);
- trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
+ trace_kvmppc_run_vcpu_exit(vcpu);
return vcpu->arch.ret;
sigpend:
vcpu->stat.signal_exits++;
- kvm_run->exit_reason = KVM_EXIT_INTR;
+ run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
out:
local_irq_enable();
@@ -4203,8 +4207,9 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
goto done;
}
-static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
int r;
int srcu_idx;
unsigned long ebb_regs[3] = {}; /* shut up GCC */
@@ -4288,10 +4293,10 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
*/
if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
!no_mixing_hpt_and_radix)
- r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0,
+ r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
vcpu->arch.vcore->lpcr);
else
- r = kvmppc_run_vcpu(run, vcpu);
+ r = kvmppc_run_vcpu(vcpu);
if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
!(vcpu->arch.shregs.msr & MSR_PR)) {
@@ -4301,7 +4306,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_core_prepare_to_enter(vcpu);
} else if (r == RESUME_PAGE_FAULT) {
srcu_idx = srcu_read_lock(&kvm->srcu);
- r = kvmppc_book3s_hv_page_fault(run, vcpu,
+ r = kvmppc_book3s_hv_page_fault(vcpu,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
srcu_read_unlock(&kvm->srcu, srcu_idx);
} else if (r == RESUME_PASSTHROUGH) {
@@ -4621,14 +4626,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* Look up the VMA for the start of this memory slot */
hva = memslot->userspace_addr;
- down_read(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
vma = find_vma(kvm->mm, hva);
if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
goto up_out;
psize = vma_kernel_pagesize(vma);
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
/* We can handle 4k, 64k or 16M pages in the VRMA */
if (psize >= 0x1000000)
@@ -4661,7 +4666,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
return err;
up_out:
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
goto out_srcu;
}
@@ -4975,7 +4980,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
}
/* We don't need to emulate any privileged instructions or dcbz */
-static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_core_emulate_op_hv(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
return EMULATE_FAIL;
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 75993f44519b..2c849a65db77 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -11,11 +11,11 @@
#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/llist.h>
+#include <linux/pgtable.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/pte-walk.h>
#include <asm/reg.h>
@@ -290,8 +290,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
r = RESUME_HOST;
break;
}
- r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp,
- lpcr);
+ r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
} while (is_kvmppc_resume_guest(r));
/* save L2 state for return */
@@ -1270,8 +1269,7 @@ static inline int kvmppc_radix_shift_to_level(int shift)
}
/* called with gp->tlb_lock held */
-static long int __kvmhv_nested_page_fault(struct kvm_run *run,
- struct kvm_vcpu *vcpu,
+static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
struct kvm_nested_guest *gp)
{
struct kvm *kvm = vcpu->kvm;
@@ -1354,7 +1352,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
}
/* passthrough of emulated MMIO case */
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
+ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
}
if (memslot->flags & KVM_MEM_READONLY) {
if (writing) {
@@ -1429,8 +1427,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
- if (n_rmap)
- kfree(n_rmap);
+ kfree(n_rmap);
if (ret == -EAGAIN)
ret = RESUME_GUEST; /* Let the guest try again */
@@ -1441,13 +1438,13 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
return RESUME_GUEST;
}
-long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu)
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
{
struct kvm_nested_guest *gp = vcpu->arch.nested;
long int ret;
mutex_lock(&gp->tlb_lock);
- ret = __kvmhv_nested_page_fault(run, vcpu, gp);
+ ret = __kvmhv_nested_page_fault(vcpu, gp);
mutex_unlock(&gp->tlb_lock);
return ret;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 287d5911df0f..4d7e5610731a 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -8,6 +8,7 @@
#include <linux/kvm_host.h>
#include <linux/err.h>
#include <linux/kernel_stat.h>
+#include <linux/pgtable.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
@@ -15,7 +16,6 @@
#include <asm/xics.h>
#include <asm/synch.h>
#include <asm/cputhreads.h>
-#include <asm/pgtable.h>
#include <asm/ppc-opcode.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xive.c b/arch/powerpc/kvm/book3s_hv_rm_xive.c
index 174d75e476fa..6f18632e30e9 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xive.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xive.c
@@ -3,6 +3,7 @@
#include <linux/kvm_host.h>
#include <linux/err.h>
#include <linux/kernel_stat.h>
+#include <linux/pgtable.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
@@ -11,7 +12,6 @@
#include <asm/debug.h>
#include <asm/synch.h>
#include <asm/cputhreads.h>
-#include <asm/pgtable.h>
#include <asm/ppc-opcode.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 76d05c71fb1f..09d8119024db 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -47,7 +47,7 @@
* Locking order
*
* 1. kvm->srcu - Protects KVM memslots
- * 2. kvm->mm->mmap_sem - find_vma, migrate_vma_pages and helpers, ksm_madvise
+ * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
* 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
* as sync-points for page-in/out
*/
@@ -402,13 +402,13 @@ kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
mig.dst = &dst_pfn;
/*
- * We come here with mmap_sem write lock held just for
- * ksm_madvise(), otherwise we only need read mmap_sem.
+ * We come here with mmap_lock write lock held just for
+ * ksm_madvise(), otherwise we only need read mmap_lock.
* Hence downgrade to read lock once ksm_madvise() is done.
*/
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
MADV_UNMERGEABLE, &vma->vm_flags);
- downgrade_write(&kvm->mm->mmap_sem);
+ mmap_write_downgrade(kvm->mm);
*downgrade = true;
if (ret)
return ret;
@@ -525,7 +525,7 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
ret = H_PARAMETER;
srcu_idx = srcu_read_lock(&kvm->srcu);
- down_write(&kvm->mm->mmap_sem);
+ mmap_write_lock(kvm->mm);
start = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(start))
@@ -548,9 +548,9 @@ out_unlock:
mutex_unlock(&kvm->arch.uvmem_lock);
out:
if (downgrade)
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
else
- up_write(&kvm->mm->mmap_sem);
+ mmap_write_unlock(kvm->mm);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
@@ -703,7 +703,7 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
ret = H_PARAMETER;
srcu_idx = srcu_read_lock(&kvm->srcu);
- down_read(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
start = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(start))
goto out;
@@ -716,7 +716,7 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
ret = H_SUCCESS;
out:
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
@@ -749,6 +749,20 @@ static u64 kvmppc_get_secmem_size(void)
const __be32 *prop;
u64 size = 0;
+ /*
+ * First try the new ibm,secure-memory nodes which supersede the
+ * secure-memory-ranges property.
+ * If we found some, no need to read the deprecated ones.
+ */
+ for_each_compatible_node(np, NULL, "ibm,secure-memory") {
+ prop = of_get_property(np, "reg", &len);
+ if (!prop)
+ continue;
+ size += of_read_number(prop + 2, 2);
+ }
+ if (size)
+ return size;
+
np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
if (!np)
goto out;
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index bf0282775e37..a11436720a8c 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -169,7 +169,7 @@ static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
}
-static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_emulate_fpr_load(struct kvm_vcpu *vcpu,
int rs, ulong addr, int ls_type)
{
int emulated = EMULATE_FAIL;
@@ -188,7 +188,7 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_inject_pf(vcpu, addr, false);
goto done_load;
} else if (r == EMULATE_DO_MMIO) {
- emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
+ emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR | rs,
len, 1);
goto done_load;
}
@@ -213,7 +213,7 @@ done_load:
return emulated;
}
-static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_emulate_fpr_store(struct kvm_vcpu *vcpu,
int rs, ulong addr, int ls_type)
{
int emulated = EMULATE_FAIL;
@@ -248,7 +248,7 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (r < 0) {
kvmppc_inject_pf(vcpu, addr, true);
} else if (r == EMULATE_DO_MMIO) {
- emulated = kvmppc_handle_store(run, vcpu, val, len, 1);
+ emulated = kvmppc_handle_store(vcpu, val, len, 1);
} else {
emulated = EMULATE_DONE;
}
@@ -259,7 +259,7 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_emulate_psq_load(struct kvm_vcpu *vcpu,
int rs, ulong addr, bool w, int i)
{
int emulated = EMULATE_FAIL;
@@ -279,12 +279,12 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_inject_pf(vcpu, addr, false);
goto done_load;
} else if ((r == EMULATE_DO_MMIO) && w) {
- emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
+ emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR | rs,
4, 1);
vcpu->arch.qpr[rs] = tmp[1];
goto done_load;
} else if (r == EMULATE_DO_MMIO) {
- emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FQPR | rs,
+ emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FQPR | rs,
8, 1);
goto done_load;
}
@@ -302,7 +302,7 @@ done_load:
return emulated;
}
-static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_emulate_psq_store(struct kvm_vcpu *vcpu,
int rs, ulong addr, bool w, int i)
{
int emulated = EMULATE_FAIL;
@@ -318,10 +318,10 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (r < 0) {
kvmppc_inject_pf(vcpu, addr, true);
} else if ((r == EMULATE_DO_MMIO) && w) {
- emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1);
+ emulated = kvmppc_handle_store(vcpu, tmp[0], 4, 1);
} else if (r == EMULATE_DO_MMIO) {
u64 val = ((u64)tmp[0] << 32) | tmp[1];
- emulated = kvmppc_handle_store(run, vcpu, val, 8, 1);
+ emulated = kvmppc_handle_store(vcpu, val, 8, 1);
} else {
emulated = EMULATE_DONE;
}
@@ -618,7 +618,7 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
return EMULATE_DONE;
}
-int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu)
{
u32 inst;
enum emulation_result emulated = EMULATE_DONE;
@@ -680,7 +680,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
- emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i);
break;
}
case OP_PSQ_LU:
@@ -690,7 +690,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
- emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
@@ -703,7 +703,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
- emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i);
break;
}
case OP_PSQ_STU:
@@ -713,7 +713,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
- emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
@@ -733,7 +733,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i);
break;
}
case OP_4X_PS_CMPO0:
@@ -747,7 +747,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
@@ -824,7 +824,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i);
break;
}
case OP_4XW_PSQ_STUX:
@@ -834,7 +834,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
@@ -922,7 +922,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr,
FPU_LS_SINGLE);
break;
}
@@ -930,7 +930,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr,
FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
@@ -941,7 +941,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
break;
}
@@ -949,7 +949,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
@@ -960,7 +960,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr,
FPU_LS_SINGLE);
break;
}
@@ -968,7 +968,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr,
FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
@@ -979,7 +979,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
break;
}
@@ -987,7 +987,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
@@ -1001,7 +1001,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd,
addr, FPU_LS_SINGLE);
break;
}
@@ -1010,7 +1010,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd,
addr, FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
@@ -1022,7 +1022,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
break;
}
@@ -1031,7 +1031,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
@@ -1043,7 +1043,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr, FPU_LS_SINGLE);
break;
}
@@ -1052,7 +1052,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr, FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
@@ -1064,7 +1064,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
break;
}
@@ -1073,7 +1073,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
@@ -1085,7 +1085,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr,
FPU_LS_SINGLE_LOW);
break;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index a0f6813f4560..ef54f917bdaf 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -700,7 +700,7 @@ static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
}
-int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu,
ulong eaddr, int vec)
{
bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
@@ -795,7 +795,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* The guest's PTE is not mapped yet. Map on the host */
if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
/* Exit KVM if mapping failed */
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
if (data)
@@ -808,7 +808,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->stat.mmio_exits++;
vcpu->arch.paddr_accessed = pte.raddr;
vcpu->arch.vaddr_accessed = pte.eaddr;
- r = kvmppc_emulate_mmio(run, vcpu);
+ r = kvmppc_emulate_mmio(vcpu);
if ( r == RESUME_HOST_NV )
r = RESUME_HOST;
}
@@ -992,7 +992,7 @@ static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
enum emulation_result er = EMULATE_FAIL;
if (!(kvmppc_get_msr(vcpu) & MSR_PR))
- er = kvmppc_emulate_instruction(vcpu->run, vcpu);
+ er = kvmppc_emulate_instruction(vcpu);
if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
/* Couldn't emulate, trigger interrupt in guest */
@@ -1089,8 +1089,7 @@ static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
}
}
-static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
+static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr)
{
enum emulation_result er;
ulong flags;
@@ -1124,7 +1123,7 @@ static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
vcpu->stat.emulated_inst_exits++;
- er = kvmppc_emulate_instruction(run, vcpu);
+ er = kvmppc_emulate_instruction(vcpu);
switch (er) {
case EMULATE_DONE:
r = RESUME_GUEST_NV;
@@ -1139,7 +1138,7 @@ static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
case EMULATE_DO_MMIO:
- run->exit_reason = KVM_EXIT_MMIO;
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
r = RESUME_HOST_NV;
break;
case EMULATE_EXIT_USER:
@@ -1198,7 +1197,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* only care about PTEG not found errors, but leave NX alone */
if (shadow_srr1 & 0x40000000) {
int idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
+ r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu->stat.sp_instruc++;
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
@@ -1248,7 +1247,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
int idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
+ r = kvmppc_handle_pagefault(vcpu, dar, exit_nr);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
} else {
kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
@@ -1292,7 +1291,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOK3S_INTERRUPT_PROGRAM:
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
- r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
+ r = kvmppc_exit_pr_progint(vcpu, exit_nr);
break;
case BOOK3S_INTERRUPT_SYSCALL:
{
@@ -1370,7 +1369,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
&last_inst);
if (emul == EMULATE_DONE)
- r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
+ r = kvmppc_exit_pr_progint(vcpu, exit_nr);
else
r = RESUME_GUEST;
@@ -1825,8 +1824,9 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
vfree(vcpu_book3s);
}
-static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
int ret;
#ifdef CONFIG_ALTIVEC
unsigned long uninitialized_var(vrsave);
@@ -1834,7 +1834,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) {
- kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = -EINVAL;
goto out;
}
@@ -1861,7 +1861,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_fix_ee_before_entry();
- ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+ ret = __kvmppc_vcpu_run(run, vcpu);
kvmppc_clear_debug(vcpu);
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 888afe8d35cc..c0d62a917e20 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -729,13 +729,14 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
return r;
}
-int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
int ret, s;
struct debug_reg debug;
if (!vcpu->arch.sane) {
- kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
@@ -777,7 +778,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
kvmppc_fix_ee_before_entry();
- ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+ ret = __kvmppc_vcpu_run(run, vcpu);
/* No need for guest_exit. It's done in handle_exit.
We also get here with interrupts enabled. */
@@ -799,11 +800,11 @@ out:
return ret;
}
-static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int emulation_exit(struct kvm_vcpu *vcpu)
{
enum emulation_result er;
- er = kvmppc_emulate_instruction(run, vcpu);
+ er = kvmppc_emulate_instruction(vcpu);
switch (er) {
case EMULATE_DONE:
/* don't overwrite subtypes, just account kvm_stats */
@@ -820,8 +821,8 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
__func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
/* For debugging, encode the failing instruction and
* report it to userspace. */
- run->hw.hardware_exit_reason = ~0ULL << 32;
- run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
+ vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
+ vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
kvmppc_core_queue_program(vcpu, ESR_PIL);
return RESUME_HOST;
@@ -833,8 +834,9 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
}
-static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
u32 dbsr = vcpu->arch.dbsr;
@@ -953,7 +955,7 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
}
}
-static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
enum emulation_result emulated, u32 last_inst)
{
switch (emulated) {
@@ -965,8 +967,8 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
__func__, vcpu->arch.regs.nip);
/* For debugging, encode the failing instruction and
* report it to userspace. */
- run->hw.hardware_exit_reason = ~0ULL << 32;
- run->hw.hardware_exit_reason |= last_inst;
+ vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
+ vcpu->run->hw.hardware_exit_reason |= last_inst;
kvmppc_core_queue_program(vcpu, ESR_PIL);
return RESUME_HOST;
@@ -1023,7 +1025,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->ready_for_interrupt_injection = 1;
if (emulated != EMULATE_DONE) {
- r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst);
+ r = kvmppc_resume_inst_load(vcpu, emulated, last_inst);
goto out;
}
@@ -1083,7 +1085,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOKE_INTERRUPT_HV_PRIV:
- r = emulation_exit(run, vcpu);
+ r = emulation_exit(vcpu);
break;
case BOOKE_INTERRUPT_PROGRAM:
@@ -1093,7 +1095,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* We are here because of an SW breakpoint instr,
* so lets return to host to handle.
*/
- r = kvmppc_handle_debug(run, vcpu);
+ r = kvmppc_handle_debug(vcpu);
run->exit_reason = KVM_EXIT_DEBUG;
kvmppc_account_exit(vcpu, DEBUG_EXITS);
break;
@@ -1114,7 +1116,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
}
- r = emulation_exit(run, vcpu);
+ r = emulation_exit(vcpu);
break;
case BOOKE_INTERRUPT_FP_UNAVAIL:
@@ -1281,7 +1283,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* actually RAM. */
vcpu->arch.paddr_accessed = gpaddr;
vcpu->arch.vaddr_accessed = eaddr;
- r = kvmppc_emulate_mmio(run, vcpu);
+ r = kvmppc_emulate_mmio(vcpu);
kvmppc_account_exit(vcpu, MMIO_EXITS);
}
@@ -1332,7 +1334,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
case BOOKE_INTERRUPT_DEBUG: {
- r = kvmppc_handle_debug(run, vcpu);
+ r = kvmppc_handle_debug(vcpu);
if (r == RESUME_HOST)
run->exit_reason = KVM_EXIT_DEBUG;
kvmppc_account_exit(vcpu, DEBUG_EXITS);
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 65b4d337d337..be9da96d9f06 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -70,7 +70,7 @@ void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr);
void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
-int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
@@ -94,16 +94,12 @@ enum int_class {
void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
-extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
- struct kvm_vcpu *vcpu,
+extern int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong spr_val);
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong *spr_val);
-extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
- struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance);
extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong spr_val);
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 689ff5f90e9e..d8d38aca71bd 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -39,7 +39,7 @@ static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, vcpu->arch.csrr1);
}
-int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 3d0d3ec5be96..64eb833e9f02 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -83,16 +83,16 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
}
#endif
-static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_e500_emul_ehpriv(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
switch (get_oc(inst)) {
case EHPRIV_OC_DEBUG:
- run->exit_reason = KVM_EXIT_DEBUG;
- run->debug.arch.address = vcpu->arch.regs.nip;
- run->debug.arch.status = 0;
+ vcpu->run->exit_reason = KVM_EXIT_DEBUG;
+ vcpu->run->debug.arch.address = vcpu->arch.regs.nip;
+ vcpu->run->debug.arch.status = 0;
kvmppc_account_exit(vcpu, DEBUG_EXITS);
emulated = EMULATE_EXIT_USER;
*advance = 0;
@@ -125,7 +125,7 @@ static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
return EMULATE_FAIL;
}
-int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
@@ -182,8 +182,7 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case XOP_EHPRIV:
- emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
- advance);
+ emulated = kvmppc_e500_emul_ehpriv(vcpu, inst, advance);
break;
default:
@@ -197,7 +196,7 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
if (emulated == EMULATE_FAIL)
- emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
+ emulated = kvmppc_booke_emulate_op(vcpu, inst, advance);
return emulated;
}
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index df9989cf7ba3..d6c1069e9954 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -355,7 +355,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
if (tlbsel == 1) {
struct vm_area_struct *vma;
- down_read(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
vma = find_vma(kvm->mm, hva);
if (vma && hva >= vma->vm_start &&
@@ -441,7 +441,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
}
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
}
if (likely(!pfnmap)) {
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 6fca38ca791f..ee1147c98cd8 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -191,7 +191,7 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
/* XXX Should probably auto-generate instruction decoding for a particular core
* from opcode tables in the future. */
-int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu)
{
u32 inst;
int rs, rt, sprn;
@@ -270,9 +270,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
* these are illegal instructions.
*/
if (inst == KVMPPC_INST_SW_BREAKPOINT) {
- run->exit_reason = KVM_EXIT_DEBUG;
- run->debug.arch.status = 0;
- run->debug.arch.address = kvmppc_get_pc(vcpu);
+ vcpu->run->exit_reason = KVM_EXIT_DEBUG;
+ vcpu->run->debug.arch.status = 0;
+ vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
emulated = EMULATE_EXIT_USER;
advance = 0;
} else
@@ -285,7 +285,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
if (emulated == EMULATE_FAIL) {
- emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
+ emulated = vcpu->kvm->arch.kvm_ops->emulate_op(vcpu, inst,
&advance);
if (emulated == EMULATE_AGAIN) {
advance = 0;
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 135d0e686622..48272a9b9c30 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -71,7 +71,6 @@ static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
*/
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 inst;
enum emulation_result emulated = EMULATE_FAIL;
int advance = 1;
@@ -104,10 +103,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
int instr_byte_swap = op.type & BYTEREV;
if (op.type & SIGNEXT)
- emulated = kvmppc_handle_loads(run, vcpu,
+ emulated = kvmppc_handle_loads(vcpu,
op.reg, size, !instr_byte_swap);
else
- emulated = kvmppc_handle_load(run, vcpu,
+ emulated = kvmppc_handle_load(vcpu,
op.reg, size, !instr_byte_swap);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
@@ -124,10 +123,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_sp64_extend = 1;
if (op.type & SIGNEXT)
- emulated = kvmppc_handle_loads(run, vcpu,
+ emulated = kvmppc_handle_loads(vcpu,
KVM_MMIO_REG_FPR|op.reg, size, 1);
else
- emulated = kvmppc_handle_load(run, vcpu,
+ emulated = kvmppc_handle_load(vcpu,
KVM_MMIO_REG_FPR|op.reg, size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
@@ -164,12 +163,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (size == 16) {
vcpu->arch.mmio_vmx_copy_nums = 2;
- emulated = kvmppc_handle_vmx_load(run,
- vcpu, KVM_MMIO_REG_VMX|op.reg,
+ emulated = kvmppc_handle_vmx_load(vcpu,
+ KVM_MMIO_REG_VMX|op.reg,
8, 1);
} else {
vcpu->arch.mmio_vmx_copy_nums = 1;
- emulated = kvmppc_handle_vmx_load(run, vcpu,
+ emulated = kvmppc_handle_vmx_load(vcpu,
KVM_MMIO_REG_VMX|op.reg,
size, 1);
}
@@ -217,7 +216,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
io_size_each = op.element_size;
}
- emulated = kvmppc_handle_vsx_load(run, vcpu,
+ emulated = kvmppc_handle_vsx_load(vcpu,
KVM_MMIO_REG_VSX|op.reg, io_size_each,
1, op.type & SIGNEXT);
break;
@@ -227,8 +226,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
/* if need byte reverse, op.val has been reversed by
* analyse_instr().
*/
- emulated = kvmppc_handle_store(run, vcpu, op.val,
- size, 1);
+ emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
@@ -250,7 +248,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (op.type & FPCONV)
vcpu->arch.mmio_sp64_extend = 1;
- emulated = kvmppc_handle_store(run, vcpu,
+ emulated = kvmppc_handle_store(vcpu,
VCPU_FPR(vcpu, op.reg), size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
@@ -290,12 +288,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (size == 16) {
vcpu->arch.mmio_vmx_copy_nums = 2;
- emulated = kvmppc_handle_vmx_store(run,
- vcpu, op.reg, 8, 1);
+ emulated = kvmppc_handle_vmx_store(vcpu,
+ op.reg, 8, 1);
} else {
vcpu->arch.mmio_vmx_copy_nums = 1;
- emulated = kvmppc_handle_vmx_store(run,
- vcpu, op.reg, size, 1);
+ emulated = kvmppc_handle_vmx_store(vcpu,
+ op.reg, size, 1);
}
break;
@@ -338,7 +336,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
io_size_each = op.element_size;
}
- emulated = kvmppc_handle_vsx_store(run, vcpu,
+ emulated = kvmppc_handle_vsx_store(vcpu,
op.reg, io_size_each, 1);
break;
}
diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S
index 3dfae0cb6228..315c94946bad 100644
--- a/arch/powerpc/kvm/fpu.S
+++ b/arch/powerpc/kvm/fpu.S
@@ -5,10 +5,10 @@
* Copyright (C) 2010 Alexander Graf (agraf@suse.de)
*/
+#include <linux/pgtable.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 27ccff612903..dd7d141e33e8 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -279,7 +279,7 @@ out:
}
EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
-int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
{
enum emulation_result er;
int r;
@@ -295,7 +295,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
r = RESUME_GUEST;
break;
case EMULATE_DO_MMIO:
- run->exit_reason = KVM_EXIT_MMIO;
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
/* We must reload nonvolatiles because "update" load/store
* instructions modify register state. */
/* Future optimization: only reload non-volatiles if they were
@@ -1107,9 +1107,9 @@ static inline u32 dp_to_sp(u64 fprd)
#define dp_to_sp(x) (x)
#endif /* CONFIG_PPC_FPU */
-static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
- struct kvm_run *run)
+static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
u64 uninitialized_var(gpr);
if (run->mmio.len > sizeof(gpr)) {
@@ -1219,10 +1219,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
}
}
-static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int sign_extend)
{
+ struct kvm_run *run = vcpu->run;
int idx, ret;
bool host_swabbed;
@@ -1256,7 +1257,7 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (!ret) {
- kvmppc_complete_mmio_load(vcpu, run);
+ kvmppc_complete_mmio_load(vcpu);
vcpu->mmio_needed = 0;
return EMULATE_DONE;
}
@@ -1264,24 +1265,24 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_DO_MMIO;
}
-int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian)
{
- return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
+ return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
}
EXPORT_SYMBOL_GPL(kvmppc_handle_load);
/* Same as above, but sign extends */
-int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian)
{
- return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
+ return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
}
#ifdef CONFIG_VSX
-int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend)
{
@@ -1292,13 +1293,13 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL;
while (vcpu->arch.mmio_vsx_copy_nums) {
- emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
+ emulated = __kvmppc_handle_load(vcpu, rt, bytes,
is_default_endian, mmio_sign_extend);
if (emulated != EMULATE_DONE)
break;
- vcpu->arch.paddr_accessed += run->mmio.len;
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vsx_copy_nums--;
vcpu->arch.mmio_vsx_offset++;
@@ -1307,9 +1308,10 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
#endif /* CONFIG_VSX */
-int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_store(struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, int is_default_endian)
{
+ struct kvm_run *run = vcpu->run;
void *data = run->mmio.data;
int idx, ret;
bool host_swabbed;
@@ -1423,7 +1425,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
return result;
}
-int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
int rs, unsigned int bytes, int is_default_endian)
{
u64 val;
@@ -1439,13 +1441,13 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
return EMULATE_FAIL;
- emulated = kvmppc_handle_store(run, vcpu,
+ emulated = kvmppc_handle_store(vcpu,
val, bytes, is_default_endian);
if (emulated != EMULATE_DONE)
break;
- vcpu->arch.paddr_accessed += run->mmio.len;
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vsx_copy_nums--;
vcpu->arch.mmio_vsx_offset++;
@@ -1454,19 +1456,19 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
- struct kvm_run *run)
+static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
enum emulation_result emulated = EMULATE_FAIL;
int r;
vcpu->arch.paddr_accessed += run->mmio.len;
if (!vcpu->mmio_is_write) {
- emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
+ emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
} else {
- emulated = kvmppc_handle_vsx_store(run, vcpu,
+ emulated = kvmppc_handle_vsx_store(vcpu,
vcpu->arch.io_gpr, run->mmio.len, 1);
}
@@ -1490,7 +1492,7 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
#endif /* CONFIG_VSX */
#ifdef CONFIG_ALTIVEC
-int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, int is_default_endian)
{
enum emulation_result emulated = EMULATE_DONE;
@@ -1499,13 +1501,13 @@ int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL;
while (vcpu->arch.mmio_vmx_copy_nums) {
- emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
+ emulated = __kvmppc_handle_load(vcpu, rt, bytes,
is_default_endian, 0);
if (emulated != EMULATE_DONE)
break;
- vcpu->arch.paddr_accessed += run->mmio.len;
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vmx_copy_nums--;
vcpu->arch.mmio_vmx_offset++;
}
@@ -1585,7 +1587,7 @@ int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
-int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
unsigned int rs, unsigned int bytes, int is_default_endian)
{
u64 val = 0;
@@ -1620,12 +1622,12 @@ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL;
}
- emulated = kvmppc_handle_store(run, vcpu, val, bytes,
+ emulated = kvmppc_handle_store(vcpu, val, bytes,
is_default_endian);
if (emulated != EMULATE_DONE)
break;
- vcpu->arch.paddr_accessed += run->mmio.len;
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vmx_copy_nums--;
vcpu->arch.mmio_vmx_offset++;
}
@@ -1633,19 +1635,19 @@ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
- struct kvm_run *run)
+static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
enum emulation_result emulated = EMULATE_FAIL;
int r;
vcpu->arch.paddr_accessed += run->mmio.len;
if (!vcpu->mmio_is_write) {
- emulated = kvmppc_handle_vmx_load(run, vcpu,
+ emulated = kvmppc_handle_vmx_load(vcpu,
vcpu->arch.io_gpr, run->mmio.len, 1);
} else {
- emulated = kvmppc_handle_vmx_store(run, vcpu,
+ emulated = kvmppc_handle_vmx_store(vcpu,
vcpu->arch.io_gpr, run->mmio.len, 1);
}
@@ -1775,7 +1777,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (vcpu->mmio_needed) {
vcpu->mmio_needed = 0;
if (!vcpu->mmio_is_write)
- kvmppc_complete_mmio_load(vcpu, run);
+ kvmppc_complete_mmio_load(vcpu);
#ifdef CONFIG_VSX
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
vcpu->arch.mmio_vsx_copy_nums--;
@@ -1783,7 +1785,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
}
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
- r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
+ r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
if (r == RESUME_HOST) {
vcpu->mmio_needed = 1;
goto out;
@@ -1797,7 +1799,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
}
if (vcpu->arch.mmio_vmx_copy_nums > 0) {
- r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
+ r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
if (r == RESUME_HOST) {
vcpu->mmio_needed = 1;
goto out;
@@ -1830,7 +1832,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (run->immediate_exit)
r = -EINTR;
else
- r = kvmppc_vcpu_run(run, vcpu);
+ r = kvmppc_vcpu_run(vcpu);
kvm_sigset_deactivate(vcpu);
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index 8a1e3b0047f1..4a61a971c34e 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -472,9 +472,9 @@ TRACE_EVENT(kvmppc_run_vcpu_enter,
);
TRACE_EVENT(kvmppc_run_vcpu_exit,
- TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_run *run),
+ TP_PROTO(struct kvm_vcpu *vcpu),
- TP_ARGS(vcpu, run),
+ TP_ARGS(vcpu),
TP_STRUCT__entry(
__field(int, vcpu_id)
@@ -484,7 +484,7 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id;
- __entry->exit = run->exit_reason;
+ __entry->exit = vcpu->run->exit_reason;
__entry->ret = vcpu->arch.ret;
),
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index e64546b8875c..0a051dfeb177 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/code-patching.h>
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index 2702e8762c0d..923ad8f374eb 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -14,9 +14,9 @@
* hash table, so this file is not used on them.)
*/
+#include <linux/pgtable.h>
#include <asm/reg.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
#include <asm/thread_info.h>
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index a6dcc708eee3..03b6ba54460e 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -320,7 +320,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea)
if (!Hash)
return;
- pmd = pmd_ptr(mm, ea);
+ pmd = pmd_off(mm, ea);
if (!pmd_none(*pmd))
add_hash_page(mm->context.id, ea, pmd_val(*pmd));
}
diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c
index dc9039a170aa..b6c7427daa6f 100644
--- a/arch/powerpc/mm/book3s32/tlb.c
+++ b/arch/powerpc/mm/book3s32/tlb.c
@@ -90,7 +90,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
if (start >= end)
return;
end = (end - 1) | ~PAGE_MASK;
- pmd = pmd_ptr(mm, start);
+ pmd = pmd_off(mm, start);
for (;;) {
pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
if (pmd_end > end)
@@ -129,7 +129,7 @@ void flush_tlb_mm(struct mm_struct *mm)
/*
* It is safe to go down the mm's list of vmas when called
- * from dup_mmap, holding mmap_sem. It would also be safe from
+ * from dup_mmap, holding mmap_lock. It would also be safe from
* unmap_region or exit_mmap, but not from vmtruncate on SMP -
* but it seems dup_mmap is the only SMP case which gets here.
*/
@@ -148,7 +148,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
return;
}
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
- pmd = pmd_ptr(mm, vmaddr);
+ pmd = pmd_off(mm, vmaddr);
if (!pmd_none(*pmd))
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
}
diff --git a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
index eefa89c6117b..25acb9c5ee1b 100644
--- a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
@@ -10,7 +10,6 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index d2d8237ea9d5..cf20e5229ce1 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -14,11 +14,11 @@
#include <linux/processor.h>
#include <linux/threads.h>
#include <linux/smp.h>
+#include <linux/pgtable.h>
#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/trace.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
index 8b4b0a602158..2a99167afbaf 100644
--- a/arch/powerpc/mm/book3s64/hash_pgtable.c
+++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
@@ -10,7 +10,6 @@
#include <linux/mm.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/mmu.h>
#include <asm/tlb.h>
@@ -238,7 +237,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
* to hugepage, we first clear the pmd, then invalidate all
* the PTE entries. The assumption here is that any low level
* page fault will see a none pmd and take the slow path that
- * will wait on mmap_sem. But we could very well be in a
+ * will wait on mmap_lock. But we could very well be in a
* hash_page with local ptep pointer value. Such a hash page
* can result in adding new HPTE entries for normal subpages.
* That means we could be modifying the page content as we
@@ -252,7 +251,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
* Now invalidate the hpte entries in the range
* covered by pmd. This make sure we take a
* fault and will find the pmd as none, which will
- * result in a major fault which takes mmap_sem and
+ * result in a major fault which takes mmap_lock and
* hence wait for collapse to complete. Without this
* the __collapse_huge_page_copy can result in copying
* the old content.
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 0124003e60d0..468169e33c86 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -35,10 +35,10 @@
#include <linux/pkeys.h>
#include <linux/hugetlb.h>
#include <linux/cpu.h>
+#include <linux/pgtable.h>
#include <asm/debugfs.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index fa05bbd1f682..563faa10bb66 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -96,7 +96,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
goto unlock_exit;
}
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
sizeof(struct vm_area_struct *);
chunk = min(chunk, entries);
@@ -114,7 +114,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
pinned += ret;
break;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (pinned != entries) {
if (!ret)
ret = -EFAULT;
diff --git a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
index cab06331c0c0..c812b401b66c 100644
--- a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
@@ -2,7 +2,6 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/security.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 8acb96de0e48..bb00e0cba119 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -17,7 +17,6 @@
#include <linux/string_helpers.h>
#include <linux/stop_machine.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/dma.h>
diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c
index 8141e8b40ee5..156c38f89511 100644
--- a/arch/powerpc/mm/book3s64/slb.c
+++ b/arch/powerpc/mm/book3s64/slb.c
@@ -10,7 +10,6 @@
*/
#include <asm/asm-prototypes.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
@@ -21,6 +20,7 @@
#include <linux/compiler.h>
#include <linux/context_tracking.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/udbg.h>
#include <asm/code-patching.h>
diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c
index 25a0c044bd93..60c6ea16a972 100644
--- a/arch/powerpc/mm/book3s64/subpage_prot.c
+++ b/arch/powerpc/mm/book3s64/subpage_prot.c
@@ -11,7 +11,7 @@
#include <linux/hugetlb.h>
#include <linux/syscalls.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <linux/uaccess.h>
/*
@@ -94,7 +94,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
size_t nw;
unsigned long next, limit;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
spt = mm_ctx_subpage_prot(&mm->context);
if (!spt)
@@ -129,7 +129,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
}
err_out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -219,13 +219,13 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32)))
return -EFAULT;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
spt = mm_ctx_subpage_prot(&mm->context);
if (!spt) {
/*
* Allocate subpage prot table if not already done.
- * Do this with mmap_sem held
+ * Do this with mmap_lock held
*/
spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL);
if (!spt) {
@@ -269,11 +269,11 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
if (addr + (nw << PAGE_SHIFT) > next)
nw = (next - addr) >> PAGE_SHIFT;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
if (__copy_from_user(spp, map, nw * sizeof(u32)))
return -EFAULT;
map += nw;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
/* now flush any existing HPTEs for the range */
hpte_flush_range(mm, addr, nw);
@@ -282,6 +282,6 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
spt->maxaddr = limit;
err = 0;
out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return err;
}
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index beb060b96632..b83abbead4a2 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -33,7 +33,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
if (mm->pgd == NULL)
return -EFAULT;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = -EFAULT;
vma = find_vma(mm, ea);
if (!vma)
@@ -82,7 +82,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
current->min_flt++;
out_unlock:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return ret;
}
EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 2393ed9d84bb..641fc5f3d7dd 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -35,7 +35,6 @@
#include <asm/firmware.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/siginfo.h>
@@ -109,7 +108,7 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return __bad_area_nosemaphore(regs, address, si_code);
}
@@ -139,13 +138,13 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
* 2. T1 : set AMR to deny access to pkey=4, touches, page
* 3. T1 : faults...
* 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
- * 5. T1 : enters fault handler, takes mmap_sem, etc...
+ * 5. T1 : enters fault handler, takes mmap_lock, etc...
* 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
* faulted on a pte with its pkey=4.
*/
pkey = vma_pkey(vma);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* If we are in kernel mode, bail out with a SEGV, this will
@@ -526,9 +525,9 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/*
- * We want to do this outside mmap_sem, because reading code around nip
+ * We want to do this outside mmap_lock, because reading code around nip
* can result in fault, which will cause a deadlock when called with
- * mmap_sem held
+ * mmap_lock held
*/
if (is_user)
flags |= FAULT_FLAG_USER;
@@ -540,7 +539,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
- * erroneous fault occurring in a code path which already holds mmap_sem
+ * erroneous fault occurring in a code path which already holds mmap_lock
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
@@ -552,12 +551,12 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mmap_read_trylock(mm))) {
if (!is_user && !search_exception_tables(regs->nip))
return bad_area_nosemaphore(regs, address);
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
@@ -581,7 +580,7 @@ retry:
if (!must_retry)
return bad_area(regs, address);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (fault_in_pages_readable((const char __user *)regs->nip,
sizeof(unsigned int)))
return bad_area_nosemaphore(regs, address);
@@ -616,7 +615,7 @@ good_area:
return user_mode(regs) ? 0 : SIGBUS;
/*
- * Handle the retry right now, the mmap_sem has been released in that
+ * Handle the retry right now, the mmap_lock has been released in that
* case.
*/
if (unlikely(fault & VM_FAULT_RETRY)) {
@@ -626,7 +625,7 @@ good_area:
}
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (unlikely(fault & VM_FAULT_ERROR))
return mm_fault_error(regs, address, fault);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 5b3d01404266..e9bfbccd975d 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -19,7 +19,6 @@
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/kmemleak.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/setup.h>
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index 42ef7a6e6098..8e0d792ac296 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -17,8 +17,8 @@
#undef DEBUG
#include <linux/string.h>
+#include <linux/pgtable.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/kup.h>
phys_addr_t memstart_addr __ro_after_init = (phys_addr_t)~0ull;
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 36c39bd37256..5a5469eb3174 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -32,7 +32,6 @@
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index c7ce4ec5060e..bc73abf0bc25 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -47,7 +47,6 @@
#include <asm/rtas.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <linux/uaccess.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/mm/kasan/8xx.c b/arch/powerpc/mm/kasan/8xx.c
index db4ef44af22f..569d98a41881 100644
--- a/arch/powerpc/mm/kasan/8xx.c
+++ b/arch/powerpc/mm/kasan/8xx.c
@@ -10,7 +10,7 @@
static int __init
kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block)
{
- pmd_t *pmd = pmd_ptr_k(k_start);
+ pmd_t *pmd = pmd_off_k(k_start);
unsigned long k_cur, k_next;
for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) {
@@ -59,7 +59,7 @@ int __init kasan_init_region(void *start, size_t size)
return ret;
for (; k_cur < k_end; k_cur += PAGE_SIZE) {
- pmd_t *pmd = pmd_ptr_k(k_cur);
+ pmd_t *pmd = pmd_off_k(k_cur);
void *va = block + k_cur - k_start;
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c
index 4bc491a4a1fd..a32b4640b9de 100644
--- a/arch/powerpc/mm/kasan/book3s_32.c
+++ b/arch/powerpc/mm/kasan/book3s_32.c
@@ -46,7 +46,7 @@ int __init kasan_init_region(void *start, size_t size)
kasan_update_early_region(k_start, k_cur, __pte(0));
for (; k_cur < k_end; k_cur += PAGE_SIZE) {
- pmd_t *pmd = pmd_ptr_k(k_cur);
+ pmd_t *pmd = pmd_off_k(k_cur);
void *va = block + k_cur - k_start;
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index c42085801c04..0760e1e754e4 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -33,7 +33,7 @@ int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_
pmd_t *pmd;
unsigned long k_cur, k_next;
- pmd = pmd_ptr_k(k_start);
+ pmd = pmd_off_k(k_start);
for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
pte_t *new;
@@ -69,7 +69,7 @@ int __init __weak kasan_init_region(void *start, size_t size)
return -ENOMEM;
for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
- pmd_t *pmd = pmd_ptr_k(k_cur);
+ pmd_t *pmd = pmd_off_k(k_cur);
void *va = block + k_cur - k_start;
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
@@ -86,7 +86,7 @@ kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte)
phys_addr_t pa = __pa(kasan_early_shadow_page);
for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) {
- pmd_t *pmd = pmd_ptr_k(k_cur);
+ pmd_t *pmd = pmd_off_k(k_cur);
pte_t *ptep = pte_offset_kernel(pmd, k_cur);
if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
@@ -184,7 +184,7 @@ void __init kasan_early_init(void)
unsigned long addr = KASAN_SHADOW_START;
unsigned long end = KASAN_SHADOW_END;
unsigned long next;
- pmd_t *pmd = pmd_ptr_k(addr);
+ pmd_t *pmd = pmd_off_k(addr);
BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 5f7fe13211e9..c2c11eb8dcfc 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -38,7 +38,6 @@
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
@@ -577,7 +576,7 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
flush_dcache_page(pg);
}
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
unsigned long maddr;
@@ -586,7 +585,6 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
flush_icache_range(maddr, maddr + len);
kunmap(page);
}
-EXPORT_SYMBOL(flush_icache_user_range);
/*
* System memory should not be in /proc/iomem but various tools expect it
diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c
index 4eaf462cda30..13e74bc39ba5 100644
--- a/arch/powerpc/mm/nohash/40x.c
+++ b/arch/powerpc/mm/nohash/40x.c
@@ -36,7 +36,6 @@
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <linux/uaccess.h>
#include <asm/smp.h>
@@ -104,7 +103,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_RW;
- pmdp = pmd_ptr_k(v);
+ pmdp = pmd_off_k(v);
*pmdp++ = __pmd(val);
*pmdp++ = __pmd(val);
*pmdp++ = __pmd(val);
@@ -119,7 +118,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_RW;
- pmdp = pmd_ptr_k(v);
+ pmdp = pmd_off_k(v);
*pmdp = __pmd(val);
v += LARGE_PAGE_SIZE_4M;
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 286441bbbe49..92e8929cbe3e 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -74,7 +74,7 @@ static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
pgprot_t prot, int psize, bool new)
{
- pmd_t *pmdp = pmd_ptr_k(va);
+ pmd_t *pmdp = pmd_off_k(va);
pte_t *ptep;
if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
diff --git a/arch/powerpc/mm/nohash/fsl_booke.c b/arch/powerpc/mm/nohash/fsl_booke.c
index b4eb06ceb189..c06dfbb771f4 100644
--- a/arch/powerpc/mm/nohash/fsl_booke.c
+++ b/arch/powerpc/mm/nohash/fsl_booke.c
@@ -41,7 +41,6 @@
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <linux/uaccess.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S
index 1f110c3c48fb..d5e2704d0096 100644
--- a/arch/powerpc/mm/nohash/tlb_low_64e.S
+++ b/arch/powerpc/mm/nohash/tlb_low_64e.S
@@ -6,6 +6,7 @@
* Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
*/
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/reg.h>
#include <asm/page.h>
@@ -13,7 +14,6 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
-#include <asm/pgtable.h>
#include <asm/exception-64e.h>
#include <asm/ppc-opcode.h>
#include <asm/kvm_asm.h>
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index cea5b4e25a24..1136257c3a99 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -264,7 +264,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
#if defined(CONFIG_PPC_8xx)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
{
- pmd_t *pmd = pmd_ptr(mm, addr);
+ pmd_t *pmd = pmd_off(mm, addr);
pte_basic_t val;
pte_basic_t *entry = &ptep->pte;
int num = is_hugepd(*((hugepd_t *)pmd)) ? 1 : SZ_512K / SZ_4K;
@@ -306,7 +306,7 @@ void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
pmd = pmd_offset(pud, addr);
/*
* khugepaged to collapse normal pages to hugepage, first set
- * pmd to none to force page fault/gup to take mmap_sem. After
+ * pmd to none to force page fault/gup to take mmap_lock. After
* pmd is set to none, we do a pte_clear which does this assertion
* so if we find pmd none, return.
*/
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 05902bbff8d6..6eb4eab79385 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -24,7 +24,6 @@
#include <linux/memblock.h>
#include <linux/slab.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
#include <asm/setup.h>
@@ -41,7 +40,7 @@ notrace void __init early_ioremap_init(void)
{
unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
pte_t *ptep = (pte_t *)early_fixmap_pagetable;
- pmd_t *pmdp = pmd_ptr_k(addr);
+ pmd_t *pmdp = pmd_off_k(addr);
for (; (s32)(FIXADDR_TOP - addr) > 0;
addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
@@ -79,7 +78,7 @@ int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
int err = -ENOMEM;
/* Use upper 10 bits of VA to index the first level map */
- pd = pmd_ptr_k(va);
+ pd = pmd_off_k(va);
/* Use middle 10 bits of VA to index the second-level map */
if (likely(slab_is_available()))
pg = pte_alloc_kernel(pd, va);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 1f86a88fd4bb..bb43a8c04bee 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -35,7 +35,6 @@
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/mm/ptdump/8xx.c b/arch/powerpc/mm/ptdump/8xx.c
index 4bc350736c1d..8a797dcbf475 100644
--- a/arch/powerpc/mm/ptdump/8xx.c
+++ b/arch/powerpc/mm/ptdump/8xx.c
@@ -5,7 +5,7 @@
*
*/
#include <linux/kernel.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include "ptdump.h"
diff --git a/arch/powerpc/mm/ptdump/bats.c b/arch/powerpc/mm/ptdump/bats.c
index cebb58c7e289..e29b338d499f 100644
--- a/arch/powerpc/mm/ptdump/bats.c
+++ b/arch/powerpc/mm/ptdump/bats.c
@@ -6,8 +6,8 @@
* This dumps the content of BATS
*/
+#include <linux/pgtable.h>
#include <asm/debugfs.h>
-#include <asm/pgtable.h>
#include <asm/cpu_has_feature.h>
#include "ptdump.h"
diff --git a/arch/powerpc/mm/ptdump/book3s64.c b/arch/powerpc/mm/ptdump/book3s64.c
index 0dfca72cb9bd..14f73868db66 100644
--- a/arch/powerpc/mm/ptdump/book3s64.c
+++ b/arch/powerpc/mm/ptdump/book3s64.c
@@ -5,7 +5,7 @@
*
*/
#include <linux/kernel.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include "ptdump.h"
diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c
index 6aaeb1eb3b9c..a2c33efc7ce8 100644
--- a/arch/powerpc/mm/ptdump/hashpagetable.c
+++ b/arch/powerpc/mm/ptdump/hashpagetable.c
@@ -15,7 +15,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <asm/pgtable.h>
#include <linux/const.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index 3209f78297ad..de6e05ef871c 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -19,7 +19,6 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
#include <linux/const.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c
index 784f8df17f73..c005fe041c18 100644
--- a/arch/powerpc/mm/ptdump/shared.c
+++ b/arch/powerpc/mm/ptdump/shared.c
@@ -5,7 +5,7 @@
*
*/
#include <linux/kernel.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include "ptdump.h"
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index 0caec3d8d436..df59d0bb121f 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -332,7 +332,7 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
fput(exe_file);
}
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
continue;
@@ -349,13 +349,13 @@ get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
*spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
pr_debug("got dcookie for %pD\n", vma->vm_file);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out:
return app_cookie;
fail_no_image_cookie:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
printk(KERN_ERR "SPU_PROF: "
"%s, line %d: Cannot find dcookie for SPU binary\n",
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index dd5051015008..6c028ee513c0 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -11,7 +11,6 @@
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c
index 8aa951003141..f7d888d39cd3 100644
--- a/arch/powerpc/perf/callchain_32.c
+++ b/arch/powerpc/perf/callchain_32.c
@@ -11,7 +11,6 @@
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c
index b63086b663ef..814d1c2c2b9c 100644
--- a/arch/powerpc/perf/callchain_64.c
+++ b/arch/powerpc/perf/callchain_64.c
@@ -11,7 +11,6 @@
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
@@ -30,11 +29,9 @@ int read_user_stack_slow(void __user *ptr, void *buf, int nb)
unsigned long addr = (unsigned long) ptr;
unsigned long offset;
struct page *page;
- int nrpages;
void *kaddr;
- nrpages = __get_user_pages_fast(addr, 1, 1, &page);
- if (nrpages == 1) {
+ if (get_user_page_fast_only(addr, FOLL_WRITE, &page)) {
kaddr = page_address(page);
/* align address to page boundary */
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 27ac38f7e1a9..6aa8defb5857 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -12,11 +12,11 @@
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/pgtable.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/pgtable.h>
#include <asm/ppc-pci.h>
#include <mm/mmu_decl.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 915ab6710b93..172d2b7cfeb7 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -22,8 +22,8 @@
#include <linux/interrupt.h>
#include <linux/fsl_devices.h>
#include <linux/of_platform.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <linux/atomic.h>
#include <asm/time.h>
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index a43b8c30157c..a4127b0b161f 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -13,8 +13,8 @@
#include <linux/kernel.h>
#include <linux/of_fdt.h>
+#include <linux/pgtable.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/time.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index dd97ef277276..e4acf5ce6b07 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -24,8 +24,8 @@
#include <linux/interrupt.h>
#include <linux/fsl_devices.h>
#include <linux/of_platform.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <linux/atomic.h>
#include <asm/time.h>
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 48f7d96ae37d..fda108bae95f 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -16,9 +16,9 @@
#include <linux/highmem.h>
#include <linux/cpu.h>
#include <linux/fsl/guts.h>
+#include <linux/pgtable.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/mpic.h>
#include <asm/cacheflush.h>
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index dba3aa73c062..87f524e4b09c 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -10,10 +10,10 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/pgtable.h>
#include <asm/code-patching.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
#include <asm/cacheflush.h>
diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c
index 4db4ca2e1222..c58b6f1c40e3 100644
--- a/arch/powerpc/platforms/8xx/cpm1.c
+++ b/arch/powerpc/platforms/8xx/cpm1.c
@@ -34,7 +34,6 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/8xx_immap.h>
#include <asm/cpm1.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/8xx/micropatch.c b/arch/powerpc/platforms/8xx/micropatch.c
index c80bd7afd6c5..aed4bc75f352 100644
--- a/arch/powerpc/platforms/8xx/micropatch.c
+++ b/arch/powerpc/platforms/8xx/micropatch.c
@@ -16,7 +16,6 @@
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/8xx_immap.h>
#include <asm/cpm.h>
#include <asm/cpm1.h>
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index 0be212a27254..c2a0678d85db 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -12,9 +12,9 @@
#include <linux/export.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/cell-regs.h>
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 5927ead4aed2..c0ab62ba6f16 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -23,9 +23,9 @@
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/kernel_stat.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index 6af3a6e600a7..9068edef71f7 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -15,11 +15,11 @@
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/kallsyms.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/prom.h>
-#include <asm/pgtable.h>
#include <asm/reg.h>
#include <asm/cell-regs.h>
#include <asm/cpu_has_feature.h>
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 855eedb8d7d7..edefa785d2ef 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -31,7 +31,6 @@
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index 85d795d96a27..c855a0aeb49c 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -21,12 +21,12 @@
#include <linux/err.h>
#include <linux/device.h>
#include <linux/cpu.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 026b72c0a452..210785f59271 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -10,8 +10,8 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/ioport.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index e44427c24585..62d90a5e23d1 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -325,7 +325,7 @@ static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
return VM_FAULT_SIGBUS;
/*
- * Because we release the mmap_sem, the context may be destroyed while
+ * Because we release the mmap_lock, the context may be destroyed while
* we're in spu_wait. Grab an extra reference so it isn't destroyed
* in the meantime.
*/
@@ -334,8 +334,8 @@ static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
/*
* We have to wait for context to be loaded before we have
* pages to hand out to the user, but we don't want to wait
- * with the mmap_sem held.
- * It is possible to drop the mmap_sem here, but then we need
+ * with the mmap_lock held.
+ * It is possible to drop the mmap_lock here, but then we need
* to return VM_FAULT_NOPAGE because the mappings may have
* hanged.
*/
@@ -343,11 +343,11 @@ static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
goto refault;
if (ctx->state == SPU_STATE_SAVED) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
} else {
area = ctx->spu->problem_phys + ps_offs;
ret = vmf_insert_pfn(vmf->vma, vmf->address,
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index b020c757d2bf..b2c2bf35b76c 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -8,9 +8,9 @@
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/hydra.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 65a7e01a8f7d..c45435aa5e36 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -34,7 +34,6 @@
#include <linux/timer.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/dma.h>
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index f7bb6cb8d1e3..e30cd2915e54 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -16,12 +16,12 @@
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 15b2c6eb506d..f7e66a2005b4 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -42,7 +42,6 @@
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/prom.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c
index 701c4e098fe9..78209bb7629c 100644
--- a/arch/powerpc/platforms/maple/time.c
+++ b/arch/powerpc/platforms/maple/time.c
@@ -23,7 +23,6 @@
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/time.h>
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 95fb4feb6ccc..f002b0fa69b8 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -51,7 +51,6 @@
#include <asm/reg.h>
#include <asm/sections.h>
#include <asm/prom.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/ohare.h>
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 9969c07035b6..eb23264910e1 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -30,13 +30,13 @@
#include <linux/hardirq.h>
#include <linux/cpu.h>
#include <linux/compiler.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/code-patching.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index b36ddee17c87..31d6213a6c8f 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -28,7 +28,6 @@
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/nvram.h>
diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c
index 25db70be4c9c..266a6ca5e15e 100644
--- a/arch/powerpc/platforms/powernv/vas-fault.c
+++ b/arch/powerpc/platforms/powernv/vas-fault.c
@@ -127,7 +127,7 @@ static void update_csb(struct vas_window *window,
return;
}
- use_mm(window->mm);
+ kthread_use_mm(window->mm);
rc = copy_to_user(csb_addr, &csb, sizeof(csb));
/*
* User space polls on csb.flags (first byte). So add barrier
@@ -139,7 +139,7 @@ static void update_csb(struct vas_window *window,
smp_mb();
rc = copy_to_user(csb_addr, &csb, sizeof(u8));
}
- unuse_mm(window->mm);
+ kthread_unuse_mm(window->mm);
put_task_struct(tsk);
/* Success */
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index e4ed5317f117..fd26f3d21d7b 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -21,10 +21,10 @@
#include <linux/cpuhotplug.h>
#include <linux/workqueue.h>
#include <linux/proc_fs.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 64d18f4bf093..2db8469e475f 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -43,7 +43,6 @@
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index ad61e90032da..6891710833be 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -20,12 +20,12 @@
#include <linux/err.h>
#include <linux/device.h>
#include <linux/cpu.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/smp.h>
diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c
index 07718b9a2c99..68538b8329f7 100644
--- a/arch/powerpc/sysdev/cpm2.c
+++ b/arch/powerpc/sysdev/cpm2.c
@@ -39,7 +39,6 @@
#include <asm/irq.h>
#include <asm/mpc8260.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/cpm2.h>
#include <asm/rheap.h>
#include <asm/fs_pd.h>
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
index f6c665dac725..a3aeaa5f0f1b 100644
--- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
+++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
@@ -15,7 +15,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of_platform.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/fsl_85xx_cache_sram.h>
#include "fsl_85xx_cache_ctlr.h"
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index a3a72b780e67..b0426f28946a 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -29,11 +29,11 @@
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/ratelimit.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <asm/signal.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index a001711863e5..7efe4bc3ccf6 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -35,7 +35,6 @@
#include <asm/machdep.h>
#include <asm/xmon.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/plpar_wrappers.h>
@@ -3919,7 +3918,7 @@ static void sysrq_handle_xmon(int key)
xmon_init(0);
}
-static struct sysrq_key_op sysrq_xmon_op = {
+static const struct sysrq_key_op sysrq_xmon_op = {
.handler = sysrq_handle_xmon,
.help_msg = "xmon(x)",
.action_msg = "Entering xmon",
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index c733007b90ab..128192e14ff2 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -12,64 +12,70 @@ config 32BIT
config RISCV
def_bool y
- select OF
- select OF_EARLY_FLATTREE
- select OF_IRQ
+ select ARCH_CLOCKSOURCE_INIT
select ARCH_HAS_BINFMT_FLAT
+ select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEBUG_WX
+ select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_HAS_MMIOWB
+ select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_SET_DIRECT_MAP
+ select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_STRICT_KERNEL_RWX if MMU
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
+ select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select CLONE_BACKWARDS
select COMMON_CLK
+ select EDAC_SUPPORT
+ select GENERIC_ARCH_TOPOLOGY if SMP
+ select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS
+ select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
+ select GENERIC_IOREMAP
+ select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
+ select GENERIC_PTDUMP if MMU
select GENERIC_SCHED_CLOCK
+ select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER if MMU
- select GENERIC_SMP_IDLE_THREAD
- select GENERIC_ATOMIC64 if !64BIT
- select GENERIC_IOREMAP
- select GENERIC_PTDUMP if MMU
+ select GENERIC_TIME_VSYSCALL if MMU && 64BIT
+ select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_KASAN if MMU && 64BIT
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KGDB_QXFER_PKT
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_TRACEHOOK
select HAVE_ASM_MODVERSIONS
+ select HAVE_COPY_THREAD_TLS
select HAVE_DMA_CONTIGUOUS if MMU
+ select HAVE_EBPF_JIT if MMU
select HAVE_FUTEX_CMPXCHG if FUTEX
+ select HAVE_GENERIC_VDSO if MMU && 64BIT
+ select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
- select SPARSE_IRQ
- select SYSCTL_EXCEPTION_TRACE
- select HAVE_ARCH_TRACEHOOK
- select HAVE_PCI
select MODULES_USE_ELF_RELA if MODULES
select MODULE_SECTIONS if MODULES
- select THREAD_INFO_IN_TASK
+ select OF
+ select OF_EARLY_FLATTREE
+ select OF_IRQ
select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI
+ select RISCV_INTC
select RISCV_TIMER
- select GENERIC_IRQ_MULTI_HANDLER
- select GENERIC_ARCH_TOPOLOGY if SMP
- select ARCH_HAS_PTE_SPECIAL
- select ARCH_HAS_MMIOWB
- select ARCH_HAS_DEBUG_VIRTUAL if MMU
- select HAVE_EBPF_JIT if MMU
- select EDAC_SUPPORT
- select ARCH_HAS_GIGANTIC_PAGE
- select ARCH_HAS_SET_DIRECT_MAP
- select ARCH_HAS_SET_MEMORY
- select ARCH_HAS_STRICT_KERNEL_RWX if MMU
- select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select SPARSEMEM_STATIC if 32BIT
- select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
- select HAVE_ARCH_MMAP_RND_BITS if MMU
- select ARCH_HAS_GCOV_PROFILE_ALL
- select HAVE_COPY_THREAD_TLS
- select HAVE_ARCH_KASAN if MMU && 64BIT
- select HAVE_ARCH_KGDB
- select HAVE_ARCH_KGDB_QXFER_PKT
+ select SPARSE_IRQ
+ select SYSCTL_EXCEPTION_TRACE
+ select THREAD_INFO_IN_TASK
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
@@ -196,11 +202,11 @@ config ARCH_RV64I
bool "RV64I"
select 64BIT
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
- select HAVE_FUNCTION_TRACER
- select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE if MMU
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
select SWIOTLB if MMU
endchoice
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index c8677c75f82c..23ff70350992 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -8,65 +8,6 @@
#include <linux/mm.h>
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-
-/*
- * The cache doesn't need to be flushed when TLB entries change when
- * the cache is mapped to physical memory, not virtual memory
- */
-static inline void flush_cache_all(void)
-{
-}
-
-static inline void flush_cache_mm(struct mm_struct *mm)
-{
-}
-
-static inline void flush_cache_dup_mm(struct mm_struct *mm)
-{
-}
-
-static inline void flush_cache_range(struct vm_area_struct *vma,
- unsigned long start,
- unsigned long end)
-{
-}
-
-static inline void flush_cache_page(struct vm_area_struct *vma,
- unsigned long vmaddr,
- unsigned long pfn)
-{
-}
-
-static inline void flush_dcache_mmap_lock(struct address_space *mapping)
-{
-}
-
-static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
-{
-}
-
-static inline void flush_icache_page(struct vm_area_struct *vma,
- struct page *page)
-{
-}
-
-static inline void flush_cache_vmap(unsigned long start, unsigned long end)
-{
-}
-
-static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
-{
-}
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
- } while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
static inline void local_flush_icache_all(void)
{
asm volatile ("fence.i" ::: "memory");
@@ -79,13 +20,15 @@ static inline void flush_dcache_page(struct page *page)
if (test_bit(PG_dcache_clean, &page->flags))
clear_bit(PG_dcache_clean, &page->flags);
}
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
/*
* RISC-V doesn't have an instruction to flush parts of the instruction cache,
* so instead we just flush the whole thing.
*/
#define flush_icache_range(start, end) flush_icache_all()
-#define flush_icache_user_range(vma, pg, addr, len) flush_icache_mm(vma->vm_mm, 0)
+#define flush_icache_user_page(vma, pg, addr, len) \
+ flush_icache_mm(vma->vm_mm, 0)
#ifndef CONFIG_SMP
@@ -105,4 +48,6 @@ void flush_icache_mm(struct mm_struct *mm, bool local);
#define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
#define SYS_RISCV_FLUSH_ICACHE_ALL (SYS_RISCV_FLUSH_ICACHE_LOCAL)
+#include <asm-generic/cacheflush.h>
+
#endif /* _ASM_RISCV_CACHEFLUSH_H */
diff --git a/arch/riscv/include/asm/clocksource.h b/arch/riscv/include/asm/clocksource.h
new file mode 100644
index 000000000000..482185566b0c
--- /dev/null
+++ b/arch/riscv/include/asm/clocksource.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_CLOCKSOURCE_H
+#define _ASM_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 2368d49eb4ef..1ff075a8dfc7 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -8,8 +8,8 @@
#include <linux/kernel.h>
#include <linux/sizes.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#ifdef CONFIG_MMU
/*
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 0f477206a4ed..3835c3295dc5 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -12,8 +12,8 @@
#define _ASM_RISCV_IO_H
#include <linux/types.h>
+#include <linux/pgtable.h>
#include <asm/mmiowb.h>
-#include <asm/pgtable.h>
/*
* MMIO access functions are separated out to break dependency cycles
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
index 6e1b0e0325eb..9807ad164015 100644
--- a/arch/riscv/include/asm/irq.h
+++ b/arch/riscv/include/asm/irq.h
@@ -10,11 +10,6 @@
#include <linux/interrupt.h>
#include <linux/linkage.h>
-#define NR_IRQS 0
-
-void riscv_timer_interrupt(void);
-void riscv_software_interrupt(void);
-
#include <asm-generic/irq.h>
#endif /* _ASM_RISCV_IRQ_H */
diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h
index b47045cb85ce..b04028c6218c 100644
--- a/arch/riscv/include/asm/kasan.h
+++ b/arch/riscv/include/asm/kasan.h
@@ -8,8 +8,6 @@
#ifdef CONFIG_KASAN
-#include <asm/pgtable.h>
-
#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_SIZE (UL(1) << (38 - KASAN_SHADOW_SCALE_SHIFT))
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index b15f70a1fdfa..f3b0da64c6c8 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -70,13 +70,6 @@ static inline struct page *pud_page(pud_t pud)
return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
}
-#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
-
static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
{
return __pmd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index d50706ea1c94..eaea1f717010 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -173,16 +173,6 @@ static inline unsigned long _pgd_pfn(pgd_t pgd)
return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
}
-#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-
-/* Locate an entry in the page global directory */
-static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr)
-{
- return mm->pgd + pgd_index(addr);
-}
-/* Locate an entry in the kernel page global directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, (addr))
-
static inline struct page *pmd_page(pmd_t pmd)
{
return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
@@ -209,16 +199,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
-{
- return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr);
-}
-
-#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte) ((void)(pte))
-
static inline int pte_present(pte_t pte)
{
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
@@ -496,8 +476,6 @@ void paging_init(void);
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-#include <asm-generic/pgtable.h>
-
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_RISCV_PGTABLE_H */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 3ddb798264f1..bdddcd5c1b71 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -8,6 +8,8 @@
#include <linux/const.h>
+#include <vdso/processor.h>
+
#include <asm/ptrace.h>
/*
@@ -58,16 +60,6 @@ static inline void release_thread(struct task_struct *dead_task)
extern unsigned long get_wchan(struct task_struct *p);
-static inline void cpu_relax(void)
-{
-#ifdef __riscv_muldiv
- int dummy;
- /* In lieu of a halt instruction, induce a long-latency stall. */
- __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
-#endif
- barrier();
-}
-
static inline void wait_for_interrupt(void)
{
__asm__ __volatile__ ("wfi");
@@ -75,6 +67,7 @@ static inline void wait_for_interrupt(void)
struct device_node;
int riscv_of_processor_hartid(struct device_node *node);
+int riscv_of_parent_hartid(struct device_node *node);
extern void riscv_fill_hwcap(void);
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index f4c7cfda6b7f..40bb1c15a731 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -28,6 +28,9 @@ void show_ipi_stats(struct seq_file *p, int prec);
/* SMP initialization hook for setup_arch */
void __init setup_smp(void);
+/* Called from C code, this handles an IPI. */
+void handle_IPI(struct pt_regs *regs);
+
/* Hook for the generic smp_call_function_many() routine. */
void arch_send_call_function_ipi_mask(struct cpumask *mask);
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
index 7a7fce63c474..8454f746bbfd 100644
--- a/arch/riscv/include/asm/vdso.h
+++ b/arch/riscv/include/asm/vdso.h
@@ -10,8 +10,10 @@
#include <linux/types.h>
+#ifndef GENERIC_TIME_VSYSCALL
struct vdso_data {
};
+#endif
/*
* The VDSO symbols are mapped into Linux so we can just use regular symbol
diff --git a/arch/riscv/include/asm/vdso/clocksource.h b/arch/riscv/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..df6ea65c1dec
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif
diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000000..c8e818688ec1
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/unistd.h>
+#include <asm/csr.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+static __always_inline
+int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct __kernel_old_timeval *tv asm("a0") = _tv;
+ register struct timezone *tz asm("a1") = _tz;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_gettimeofday;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(tv), "r"(tz), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_clock_gettime;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_clock_getres;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
+{
+ /*
+ * The purpose of csr_read(CSR_TIME) is to trap the system into
+ * M-mode to obtain the value of CSR_TIME. Hence, unlike other
+ * architecture, no fence instructions surround the csr_read()
+ */
+ return csr_read(CSR_TIME);
+}
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return _vdso_data;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..82a5693b1861
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/processor.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+static inline void cpu_relax(void)
+{
+#ifdef __riscv_muldiv
+ int dummy;
+ /* In lieu of a halt instruction, induce a long-latency stall. */
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+#endif
+ barrier();
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/vdso/vsyscall.h b/arch/riscv/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000000..82fd5d83bd60
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/vsyscall.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline struct vdso_data *__riscv_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+
+#define __arch_get_k_vdso_data __riscv_get_k_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index 40a3c442ac5f..6d59e6906fdd 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -44,6 +44,22 @@ int riscv_of_processor_hartid(struct device_node *node)
return hart;
}
+/*
+ * Find hart ID of the CPU DT node under which given DT node falls.
+ *
+ * To achieve this, we walk up the DT tree until we find an active
+ * RISC-V core (HART) node and extract the cpuid from it.
+ */
+int riscv_of_parent_hartid(struct device_node *node)
+{
+ for (; node; node = node->parent) {
+ if (of_device_is_compatible(node, "riscv"))
+ return riscv_of_processor_hartid(node);
+ }
+
+ return -1;
+}
+
#ifdef CONFIG_PROC_FS
static void print_isa(struct seq_file *f, const char *isa)
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 56d071b2c0a1..cae7e6d4c7ef 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -106,7 +106,9 @@ _save_context:
/* Handle interrupts */
move a0, sp /* pt_regs */
- tail do_IRQ
+ la a1, handle_arch_irq
+ REG_L a1, (a1)
+ jr a1
1:
/*
* Exceptions run with interrupts enabled or disabled depending on the
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index 345c4f2eba13..7207fa08d78f 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -7,7 +7,6 @@
#include <linux/interrupt.h>
#include <linux/irqchip.h>
-#include <linux/irqdomain.h>
#include <linux/seq_file.h>
#include <asm/smp.h>
@@ -17,37 +16,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
return 0;
}
-asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- irq_enter();
- switch (regs->cause & ~CAUSE_IRQ_FLAG) {
- case RV_IRQ_TIMER:
- riscv_timer_interrupt();
- break;
-#ifdef CONFIG_SMP
- case RV_IRQ_SOFT:
- /*
- * We only use software interrupts to pass IPIs, so if a non-SMP
- * system gets one, then we don't know what to do.
- */
- riscv_software_interrupt();
- break;
-#endif
- case RV_IRQ_EXT:
- handle_arch_irq(regs);
- break;
- default:
- pr_alert("unexpected interrupt cause 0x%lx", regs->cause);
- BUG();
- }
- irq_exit();
-
- set_irq_regs(old_regs);
-}
-
void __init init_IRQ(void)
{
irqchip_init();
+ if (!handle_arch_irq)
+ panic("No interrupt controller found.");
}
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 8bbe5dbe1341..7191342c54da 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -10,7 +10,7 @@
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
#include <linux/sizes.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/sections.h>
static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 5805791cd5b5..d4a64dfed342 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -11,6 +11,7 @@
#include <asm/kprobes.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
+#include <asm/patch.h>
struct patch_insn {
void *addr;
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 3e528312f615..f04373be54a6 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -22,7 +22,6 @@
#include <asm/cpu_ops.h>
#include <asm/setup.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h>
#include <asm/thread_info.h>
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index a65a8fa0c22d..b1d4f452f843 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -123,11 +123,14 @@ static inline void clear_ipi(void)
clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
}
-void riscv_software_interrupt(void)
+void handle_IPI(struct pt_regs *regs)
{
+ struct pt_regs *old_regs = set_irq_regs(regs);
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
unsigned long *stats = ipi_data[smp_processor_id()].stats;
+ irq_enter();
+
clear_ipi();
while (true) {
@@ -138,7 +141,7 @@ void riscv_software_interrupt(void)
ops = xchg(pending_ipis, 0);
if (ops == 0)
- return;
+ goto done;
if (ops & (1 << IPI_RESCHEDULE)) {
stats[IPI_RESCHEDULE]++;
@@ -160,6 +163,10 @@ void riscv_software_interrupt(void)
/* Order data access and bit testing. */
mb();
}
+
+done:
+ irq_exit();
+ set_irq_regs(old_regs);
}
static const char * const ipi_names[] = {
diff --git a/arch/riscv/kernel/soc.c b/arch/riscv/kernel/soc.c
index 1fc87621c728..c7b0a73e382e 100644
--- a/arch/riscv/kernel/soc.c
+++ b/arch/riscv/kernel/soc.c
@@ -4,7 +4,7 @@
*/
#include <linux/init.h>
#include <linux/libfdt.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/soc.h>
/*
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 837b9b38f825..595342910c3f 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -99,17 +99,18 @@ void notrace walk_stackframe(struct task_struct *task,
static bool print_trace_address(unsigned long pc, void *arg)
{
- print_ip_sym(pc);
+ const char *loglvl = arg;
+
+ print_ip_sym(loglvl, pc);
return false;
}
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
pr_cont("Call Trace:\n");
- walk_stackframe(task, NULL, print_trace_address, NULL);
+ walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
}
-
static bool save_wchan(unsigned long pc, void *arg)
{
if (!in_sched_functions(pc)) {
diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
index 6a53c02e9c73..4d3a1048ad8b 100644
--- a/arch/riscv/kernel/time.c
+++ b/arch/riscv/kernel/time.c
@@ -26,3 +26,12 @@ void __init time_init(void)
lpj_fine = riscv_timebase / HZ;
timer_probe();
}
+
+void clocksource_arch_init(struct clocksource *cs)
+{
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
+ cs->vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER;
+#else
+ cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
+#endif
+}
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 5080fdf8c296..ecec1778e3a4 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -183,6 +183,4 @@ void trap_init(void)
csr_write(CSR_SCRATCH, 0);
/* Set the exception vector address */
csr_write(CSR_TVEC, &handle_exception);
- /* Enable interrupts */
- csr_write(CSR_IE, IE_SIE);
}
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index 484d95a70907..678204231700 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -11,8 +11,12 @@
#include <linux/slab.h>
#include <linux/binfmts.h>
#include <linux/err.h>
-
+#include <asm/page.h>
+#ifdef GENERIC_TIME_VSYSCALL
+#include <vdso/datapage.h>
+#else
#include <asm/vdso.h>
+#endif
extern char vdso_start[], vdso_end[];
@@ -26,7 +30,7 @@ static union {
struct vdso_data data;
u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data;
-static struct vdso_data *vdso_data = &vdso_data_store.data;
+struct vdso_data *vdso_data = &vdso_data_store.data;
static int __init vdso_init(void)
{
@@ -61,7 +65,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
ret = vdso_base;
@@ -75,15 +79,24 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
*/
mm->context.vdso = (void *)vdso_base;
- ret = install_special_mapping(mm, vdso_base, vdso_len,
+ ret =
+ install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
vdso_pagelist);
- if (unlikely(ret))
+ if (unlikely(ret)) {
mm->context.vdso = NULL;
+ goto end;
+ }
+ vdso_base += (vdso_pages << PAGE_SHIFT);
+ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+
+ if (unlikely(ret))
+ mm->context.vdso = NULL;
end:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
@@ -91,5 +104,8 @@ const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
return "[vdso]";
+ if (vma->vm_mm && (vma->vm_start ==
+ (long)vma->vm_mm->context.vdso + PAGE_SIZE))
+ return "[vdso_data]";
return NULL;
}
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 4c8b2a4a6a70..38ba55b0eb9d 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -1,12 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copied from arch/tile/kernel/vdso/Makefile
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_RISCV_32|R_RISCV_64|R_RISCV_JUMP_SLOT
+include $(srctree)/lib/vdso/Makefile
# Symbols present in the vdso
vdso-syms = rt_sigreturn
ifdef CONFIG_64BIT
-vdso-syms += gettimeofday
-vdso-syms += clock_gettime
-vdso-syms += clock_getres
+vdso-syms += vgettimeofday
endif
vdso-syms += getcpu
vdso-syms += flush_icache
@@ -14,6 +16,10 @@ vdso-syms += flush_icache
# Files to link into the vdso
obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
+ifneq ($(c-gettimeofday-y),)
+ CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+endif
+
# Build rules
targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
diff --git a/arch/riscv/kernel/vdso/clock_getres.S b/arch/riscv/kernel/vdso/clock_getres.S
deleted file mode 100644
index 91378a52eb22..000000000000
--- a/arch/riscv/kernel/vdso/clock_getres.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
- .text
-/* int __vdso_clock_getres(clockid_t clock_id, struct timespec *res); */
-ENTRY(__vdso_clock_getres)
- .cfi_startproc
- /* For now, just do the syscall. */
- li a7, __NR_clock_getres
- ecall
- ret
- .cfi_endproc
-ENDPROC(__vdso_clock_getres)
diff --git a/arch/riscv/kernel/vdso/clock_gettime.S b/arch/riscv/kernel/vdso/clock_gettime.S
deleted file mode 100644
index 5371fd9bc01f..000000000000
--- a/arch/riscv/kernel/vdso/clock_gettime.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
- .text
-/* int __vdso_clock_gettime(clockid_t clock_id, struct timespec *tp); */
-ENTRY(__vdso_clock_gettime)
- .cfi_startproc
- /* For now, just do the syscall. */
- li a7, __NR_clock_gettime
- ecall
- ret
- .cfi_endproc
-ENDPROC(__vdso_clock_gettime)
diff --git a/arch/riscv/kernel/vdso/gettimeofday.S b/arch/riscv/kernel/vdso/gettimeofday.S
deleted file mode 100644
index e6fb8af88632..000000000000
--- a/arch/riscv/kernel/vdso/gettimeofday.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 SiFive
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd.h>
-
- .text
-/* int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); */
-ENTRY(__vdso_gettimeofday)
- .cfi_startproc
- /* For now, just do the syscall. */
- li a7, __NR_gettimeofday
- ecall
- ret
- .cfi_endproc
-ENDPROC(__vdso_gettimeofday)
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
index f66a091cb890..e6f558bca71b 100644
--- a/arch/riscv/kernel/vdso/vdso.lds.S
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -2,11 +2,13 @@
/*
* Copyright (C) 2012 Regents of the University of California
*/
+#include <asm/page.h>
OUTPUT_ARCH(riscv)
SECTIONS
{
+ PROVIDE(_vdso_data = . + PAGE_SIZE);
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/riscv/kernel/vdso/vgettimeofday.c b/arch/riscv/kernel/vdso/vgettimeofday.c
new file mode 100644
index 000000000000..d264943e2e47
--- /dev/null
+++ b/arch/riscv/kernel/vdso/vgettimeofday.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copied from arch/arm64/kernel/vdso/vgettimeofday.c
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2020 SiFive
+ */
+
+#include <linux/time.h>
+#include <linux/types.h>
+
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
+{
+ return __cvdso_clock_getres(clock_id, res);
+}
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 8930ab7278e6..094118663285 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -3,7 +3,6 @@
* Copyright (C) 2017 SiFive
*/
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_SMP
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index be84e32adc4c..ae7b7fe24658 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -69,7 +69,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, addr);
if (unlikely(!vma))
goto bad_area;
@@ -114,7 +114,7 @@ good_area:
/*
* If we need to retry but a fatal signal is pending, handle the
- * signal first. We do not need to release the mmap_sem because it
+ * signal first. We do not need to release the mmap_lock because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
if (fault_signal_pending(fault, regs))
@@ -147,7 +147,7 @@ good_area:
flags |= FAULT_FLAG_TRIED;
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -155,7 +155,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -163,7 +163,7 @@ good_area:
* Fix it, but check if it's kernel or user first.
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
do_trap(regs, SIGSEGV, code, addr);
@@ -191,14 +191,14 @@ no_context:
* (which will retry the fault, or kill us if we got oom-killed).
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 34327407b0c5..f4adb3684f3d 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -18,7 +18,6 @@
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/soc.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/ptdump.h>
@@ -236,12 +235,12 @@ static void __init create_pte_mapping(pte_t *ptep,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
- uintptr_t pte_index = pte_index(va);
+ uintptr_t pte_idx = pte_index(va);
BUG_ON(sz != PAGE_SIZE);
- if (pte_none(ptep[pte_index]))
- ptep[pte_index] = pfn_pte(PFN_DOWN(pa), prot);
+ if (pte_none(ptep[pte_idx]))
+ ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
}
#ifndef __PAGETABLE_PMD_FOLDED
@@ -284,21 +283,21 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
{
pte_t *ptep;
phys_addr_t pte_phys;
- uintptr_t pmd_index = pmd_index(va);
+ uintptr_t pmd_idx = pmd_index(va);
if (sz == PMD_SIZE) {
- if (pmd_none(pmdp[pmd_index]))
- pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pa), prot);
+ if (pmd_none(pmdp[pmd_idx]))
+ pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
return;
}
- if (pmd_none(pmdp[pmd_index])) {
+ if (pmd_none(pmdp[pmd_idx])) {
pte_phys = alloc_pte(va);
- pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
+ pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
ptep = get_pte_virt(pte_phys);
memset(ptep, 0, PAGE_SIZE);
} else {
- pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_index]));
+ pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
ptep = get_pte_virt(pte_phys);
}
@@ -326,21 +325,21 @@ static void __init create_pgd_mapping(pgd_t *pgdp,
{
pgd_next_t *nextp;
phys_addr_t next_phys;
- uintptr_t pgd_index = pgd_index(va);
+ uintptr_t pgd_idx = pgd_index(va);
if (sz == PGDIR_SIZE) {
- if (pgd_val(pgdp[pgd_index]) == 0)
- pgdp[pgd_index] = pfn_pgd(PFN_DOWN(pa), prot);
+ if (pgd_val(pgdp[pgd_idx]) == 0)
+ pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
return;
}
- if (pgd_val(pgdp[pgd_index]) == 0) {
+ if (pgd_val(pgdp[pgd_idx]) == 0) {
next_phys = alloc_pgd_next(va);
- pgdp[pgd_index] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
+ pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
nextp = get_pgd_next_virt(next_phys);
memset(nextp, 0, PAGE_SIZE);
} else {
- next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_index]));
+ next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
nextp = get_pgd_next_virt(next_phys);
}
@@ -481,17 +480,6 @@ static void __init setup_vm_final(void)
csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
local_flush_tlb_all();
}
-
-void free_initmem(void)
-{
- unsigned long init_begin = (unsigned long)__init_begin;
- unsigned long init_end = (unsigned long)__init_end;
-
- /* Make the region as non-execuatble. */
- set_memory_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
- free_initmem_default(POISON_FREE_INITMEM);
-}
-
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index ec0ca90dd900..4a8b61806633 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -6,8 +6,8 @@
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
#include <asm/fixmap.h>
extern pgd_t early_pg_dir[PTRS_PER_PGD];
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index 728759eb530a..ec2c70f84994 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -4,7 +4,7 @@
*/
#include <linux/pagewalk.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/bitops.h>
@@ -117,10 +117,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
if (!numpages)
return 0;
- down_read(&init_mm.mmap_sem);
+ mmap_read_lock(&init_mm);
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
&masks);
- up_read(&init_mm.mmap_sem);
+ mmap_read_unlock(&init_mm);
flush_tlb_kernel_range(start, end);
diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
index 070505d79b06..0831c2e61a8f 100644
--- a/arch/riscv/mm/ptdump.c
+++ b/arch/riscv/mm/ptdump.c
@@ -9,7 +9,7 @@
#include <linux/ptdump.h>
#include <asm/ptdump.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/kasan.h>
#define pt_dump_seq_printf(m, fmt, args...) \
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index e68136c3c23a..21c3147bd92a 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -29,10 +29,6 @@
* the structure version (product ID, see appldata_base.c) needs to be changed
* as well and all documentation and z/VM applications using it must be
* updated.
- *
- * The record layout is documented in the Linux for zSeries Device Drivers
- * book:
- * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
*/
struct appldata_mem_data {
u64 timestamp;
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 8bc14b0d1def..59c282ca002f 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -25,10 +25,6 @@
* This is accessed as binary data by z/VM. If changes to it can't be avoided,
* the structure version (product ID, see appldata_base.c) needs to be changed
* as well and all documentation and z/VM applications using it must be updated.
- *
- * The record layout is documented in the Linux for zSeries Device Drivers
- * book:
- * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
*/
struct appldata_net_sum_data {
u64 timestamp;
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 8bf46d705957..5503217366ec 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -32,10 +32,6 @@
* the structure version (product ID, see appldata_base.c) needs to be changed
* as well and all documentation and z/VM applications using it must be
* updated.
- *
- * The record layout is documented in the Linux for zSeries Device Drivers
- * book:
- * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
*/
struct appldata_os_per_cpu {
u32 per_cpu_user; /* timer ticks spent in user mode */
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 357adad991d2..8e222a666025 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -2,12 +2,12 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ctype.h>
+#include <linux/pgtable.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
#include <asm/facility.h>
-#include <asm/pgtable.h>
#include <asm/uv.h>
#include "boot.h"
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index 5591243d673e..d4442163ffa9 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -2,8 +2,8 @@
/*
* Copyright IBM Corp. 2019
*/
+#include <linux/pgtable.h>
#include <asm/mem_detect.h>
-#include <asm/pgtable.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/sclp.h>
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 865ce1cb86d5..3cfe1eb89838 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -11,6 +11,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+#include <asm/chsc.h>
#include <asm/fcx.h>
#include <asm/irq.h>
#include <asm/schid.h>
@@ -236,4 +237,8 @@ extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *);
struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *, int);
u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx);
+int ccw_device_pnso(struct ccw_device *cdev,
+ struct chsc_pnso_area *pnso_area,
+ struct chsc_pnso_resume_token resume_token,
+ int cnc);
#endif /* _S390_CCWDEV_H_ */
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
new file mode 100644
index 000000000000..36ce2d25a5fc
--- /dev/null
+++ b/arch/s390/include/asm/chsc.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s): Alexandra Winter <wintera@linux.ibm.com>
+ *
+ * Interface for Channel Subsystem Call
+ */
+#ifndef _ASM_S390_CHSC_H
+#define _ASM_S390_CHSC_H
+
+#include <uapi/asm/chsc.h>
+
+/**
+ * struct chsc_pnso_naid_l2 - network address information descriptor
+ * @nit: Network interface token
+ * @addr_lnid: network address and logical network id (VLAN ID)
+ */
+struct chsc_pnso_naid_l2 {
+ u64 nit;
+ struct { u8 mac[6]; u16 lnid; } addr_lnid;
+} __packed;
+
+struct chsc_pnso_resume_token {
+ u64 t1;
+ u64 t2;
+} __packed;
+
+struct chsc_pnso_naihdr {
+ struct chsc_pnso_resume_token resume_token;
+ u32:32;
+ u32 instance;
+ u32:24;
+ u8 naids;
+ u32 reserved[3];
+} __packed;
+
+struct chsc_pnso_area {
+ struct chsc_header request;
+ u8:2;
+ u8 m:1;
+ u8:5;
+ u8:2;
+ u8 ssid:2;
+ u8 fmt:4;
+ u16 sch;
+ u8:8;
+ u8 cssid;
+ u16:16;
+ u8 oc;
+ u32:24;
+ struct chsc_pnso_resume_token resume_token;
+ u32 n:1;
+ u32:31;
+ u32 reserved[3];
+ struct chsc_header response;
+ u32:32;
+ struct chsc_pnso_naihdr naihdr;
+ struct chsc_pnso_naid_l2 entries[0];
+} __packed __aligned(PAGE_SIZE);
+
+#endif /* _ASM_S390_CHSC_H */
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 9ddf4a43a590..60f9241e5e4a 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -9,8 +9,8 @@
#ifndef _ASM_S390_HUGETLB_H
#define _ASM_S390_HUGETLB_H
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#define hugetlb_free_pgd_range free_pgd_range
#define hugepages_supported() (MACHINE_HAS_EDAT1)
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 5a16f500515a..da014e4f8113 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -26,7 +26,7 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define IO_SPACE_LIMIT 0
-void __iomem *ioremap(unsigned long offset, unsigned long size);
+void __iomem *ioremap(phys_addr_t addr, size_t size);
void iounmap(volatile void __iomem *addr);
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index b63bd66404b8..7d5cfdda5277 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -21,6 +21,7 @@ struct ipl_parameter_block {
struct ipl_pb0_common common;
struct ipl_pb0_fcp fcp;
struct ipl_pb0_ccw ccw;
+ struct ipl_pb0_nvme nvme;
char raw[PAGE_SIZE - sizeof(struct ipl_pl_hdr)];
};
} __packed __aligned(PAGE_SIZE);
@@ -30,6 +31,11 @@ struct ipl_parameter_block {
#define IPL_BP_FCP_LEN (sizeof(struct ipl_pl_hdr) + \
sizeof(struct ipl_pb0_fcp))
#define IPL_BP0_FCP_LEN (sizeof(struct ipl_pb0_fcp))
+
+#define IPL_BP_NVME_LEN (sizeof(struct ipl_pl_hdr) + \
+ sizeof(struct ipl_pb0_nvme))
+#define IPL_BP0_NVME_LEN (sizeof(struct ipl_pb0_nvme))
+
#define IPL_BP_CCW_LEN (sizeof(struct ipl_pl_hdr) + \
sizeof(struct ipl_pb0_ccw))
#define IPL_BP0_CCW_LEN (sizeof(struct ipl_pb0_ccw))
@@ -59,6 +65,7 @@ enum ipl_type {
IPL_TYPE_FCP = 4,
IPL_TYPE_FCP_DUMP = 8,
IPL_TYPE_NSS = 16,
+ IPL_TYPE_NVME = 32,
};
struct ipl_info
@@ -74,6 +81,10 @@ struct ipl_info
u64 lun;
} fcp;
struct {
+ u32 fid;
+ u32 nsid;
+ } nvme;
+ struct {
char name[NSS_NAME_SIZE + 1];
} nss;
} data;
diff --git a/arch/s390/include/asm/kasan.h b/arch/s390/include/asm/kasan.h
index 70930fe5c496..89d6886040c8 100644
--- a/arch/s390/include/asm/kasan.h
+++ b/arch/s390/include/asm/kasan.h
@@ -2,8 +2,6 @@
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
-#include <asm/pgtable.h>
-
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SCALE_SHIFT 3
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 3d554887794e..cee3cb6455a2 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -978,7 +978,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
-void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index b160da8fa14b..5afee80cff58 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -99,7 +99,7 @@ int nmi_alloc_per_cpu(struct lowcore *lc);
void nmi_free_per_cpu(struct lowcore *lc);
void s390_handle_mcck(void);
-void s390_do_machine_check(struct pt_regs *regs);
+int s390_do_machine_check(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_NMI_H */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 7485ee561fec..99b92c3e46b0 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -22,12 +22,17 @@ int pci_domain_nr(struct pci_bus *);
int pci_proc_domain(struct pci_bus *);
#define ZPCI_BUS_NR 0 /* default bus number */
-#define ZPCI_DEVFN 0 /* default device number */
#define ZPCI_NR_DMA_SPACES 1
#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
#define ZPCI_DOMAIN_BITMAP_SIZE (1 << 16)
+#ifdef PCI
+#if (ZPCI_NR_DEVICES > ZPCI_DOMAIN_BITMAP_SIZE)
+# error ZPCI_NR_DEVICES can not be bigger than ZPCI_DOMAIN_BITMAP_SIZE
+#endif
+#endif /* PCI */
+
/* PCI Function Controls */
#define ZPCI_FC_FN_ENABLED 0x80
#define ZPCI_FC_ERROR 0x40
@@ -94,10 +99,26 @@ struct zpci_bar_struct {
struct s390_domain;
+#define ZPCI_FUNCTIONS_PER_BUS 256
+struct zpci_bus {
+ struct kref kref;
+ struct pci_bus *bus;
+ struct zpci_dev *function[ZPCI_FUNCTIONS_PER_BUS];
+ struct list_head resources;
+ struct list_head bus_next;
+ struct resource bus_resource;
+ int pchid;
+ int domain_nr;
+ bool multifunction;
+ enum pci_bus_speed max_bus_speed;
+};
+
/* Private data per function */
struct zpci_dev {
- struct pci_bus *bus;
+ struct zpci_bus *zbus;
struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */
+ struct list_head bus_next;
+ struct kref kref;
struct hotplug_slot hotplug_slot;
enum zpci_state state;
@@ -107,7 +128,12 @@ struct zpci_dev {
u16 pchid; /* physical channel ID */
u8 pfgid; /* function group ID */
u8 pft; /* pci function type */
- u16 domain;
+ u8 port;
+ u8 rid_available : 1;
+ u8 has_hp_slot : 1;
+ u8 is_physfn : 1;
+ u8 reserved : 5;
+ unsigned int devfn; /* DEVFN part of the RID*/
struct mutex lock;
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
@@ -167,6 +193,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
extern const struct attribute_group *zpci_attr_groups[];
extern unsigned int s390_pci_force_floating __initdata;
+extern unsigned int s390_pci_no_rid;
/* -----------------------------------------------------------------------------
Prototypes
@@ -227,7 +254,14 @@ static inline void zpci_exit_slot(struct zpci_dev *zdev) {}
/* Helpers */
static inline struct zpci_dev *to_zpci(struct pci_dev *pdev)
{
- return pdev->sysdata;
+ struct zpci_bus *zbus = pdev->sysdata;
+
+ return zbus->function[pdev->devfn];
+}
+
+static inline struct zpci_dev *to_zpci_dev(struct device *dev)
+{
+ return to_zpci(to_pci_dev(dev));
}
struct zpci_dev *get_zdev_by_fid(u32);
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index bd2cb4ea7d93..eb51272dd2cc 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -93,7 +93,10 @@ struct clp_req_query_pci {
struct clp_rsp_query_pci {
struct clp_rsp_hdr hdr;
u16 vfn; /* virtual fn number */
- u16 : 6;
+ u16 : 3;
+ u16 rid_avail : 1;
+ u16 is_physfn : 1;
+ u16 reserved1 : 1;
u16 mio_addr_avail : 1;
u16 util_str_avail : 1; /* utility string available? */
u16 pfgid : 8; /* pci function group id */
@@ -102,12 +105,16 @@ struct clp_rsp_query_pci {
u16 pchid;
__le32 bar[PCI_STD_NUM_BARS];
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
- u32 : 16;
+ u16 : 12;
+ u16 port : 4;
u8 fmb_len;
u8 pft; /* pci function type */
u64 sdma; /* start dma as */
u64 edma; /* end dma as */
- u32 reserved[11];
+#define ZPCI_RID_MASK_DEVFN 0x00ff
+ u16 rid; /* BUS/DEVFN PCI address */
+ u16 reserved0;
+ u32 reserved[10];
u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
u32 reserved2[16];
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index e2528e057980..19d603bd1f36 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1229,7 +1229,6 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
@@ -1260,7 +1259,6 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
}
#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
{
@@ -1275,6 +1273,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
return (pud_t *) p4d_deref(*p4d) + pud_index(address);
return (pud_t *) p4d;
}
+#define pud_offset pud_offset
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
@@ -1282,17 +1281,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
return (pmd_t *) pud_deref(*pud) + pmd_index(address);
return (pmd_t *) pud;
}
+#define pmd_offset pmd_offset
-static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
- return (pte_t *) pmd_deref(*pmd) + pte_index(address);
+ return (unsigned long) pmd_deref(pmd);
}
-#define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
-#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
-
-static inline void pte_unmap(pte_t *pte) { }
-
static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
{
return end <= current->mm->context.asce_limit;
@@ -1683,6 +1678,4 @@ extern void s390_reset_cmma(struct mm_struct *mm);
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#include <asm-generic/pgtable.h>
-
#endif /* _S390_PAGE_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 555d148ccf32..962da04234af 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -14,17 +14,15 @@
#include <linux/bits.h>
-#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
-#define CIF_ASCE_PRIMARY 1 /* primary asce needs fixup / uaccess */
-#define CIF_ASCE_SECONDARY 2 /* secondary asce needs fixup / uaccess */
-#define CIF_NOHZ_DELAY 3 /* delay HZ disable for a tick */
-#define CIF_FPU 4 /* restore FPU registers */
-#define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */
-#define CIF_ENABLED_WAIT 6 /* in enabled wait state */
-#define CIF_MCCK_GUEST 7 /* machine check happening in guest */
-#define CIF_DEDICATED_CPU 8 /* this CPU is dedicated */
-
-#define _CIF_MCCK_PENDING BIT(CIF_MCCK_PENDING)
+#define CIF_ASCE_PRIMARY 0 /* primary asce needs fixup / uaccess */
+#define CIF_ASCE_SECONDARY 1 /* secondary asce needs fixup / uaccess */
+#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
+#define CIF_FPU 3 /* restore FPU registers */
+#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */
+#define CIF_ENABLED_WAIT 5 /* in enabled wait state */
+#define CIF_MCCK_GUEST 6 /* machine check happening in guest */
+#define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */
+
#define _CIF_ASCE_PRIMARY BIT(CIF_ASCE_PRIMARY)
#define _CIF_ASCE_SECONDARY BIT(CIF_ASCE_SECONDARY)
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 86a3796e9be8..e69dbf438f99 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -365,34 +365,6 @@ struct qdio_initialize {
struct qdio_outbuf_state *output_sbal_state_array;
};
-/**
- * enum qdio_brinfo_entry_type - type of address entry for qdio_brinfo_desc()
- * @l3_ipv6_addr: entry contains IPv6 address
- * @l3_ipv4_addr: entry contains IPv4 address
- * @l2_addr_lnid: entry contains MAC address and VLAN ID
- */
-enum qdio_brinfo_entry_type {l3_ipv6_addr, l3_ipv4_addr, l2_addr_lnid};
-
-/**
- * struct qdio_brinfo_entry_XXX - Address entry for qdio_brinfo_desc()
- * @nit: Network interface token
- * @addr: Address of one of the three types
- *
- * The struct is passed to the callback function by qdio_brinfo_desc()
- */
-struct qdio_brinfo_entry_l3_ipv6 {
- u64 nit;
- struct { unsigned char _s6_addr[16]; } addr;
-} __packed;
-struct qdio_brinfo_entry_l3_ipv4 {
- u64 nit;
- struct { uint32_t _s_addr; } addr;
-} __packed;
-struct qdio_brinfo_entry_l2 {
- u64 nit;
- struct { u8 mac[6]; u16 lnid; } addr_lnid;
-} __packed;
-
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */
#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
@@ -423,10 +395,5 @@ extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr,
extern int qdio_shutdown(struct ccw_device *, int);
extern int qdio_free(struct ccw_device *);
extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
-extern int qdio_pnso_brinfo(struct subchannel_id schid,
- int cnc, u16 *response,
- void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
- void *entry),
- void *priv);
#endif /* __QDIO_H__ */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 231a51e870fe..7326f110d48c 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -58,5 +58,6 @@ extern int smp_rescan_cpus(void);
extern void __noreturn cpu_die(void);
extern void __cpu_die(unsigned int cpu);
extern int __cpu_disable(void);
+extern void schedule_mcck_handler(void);
#endif /* __ASM_SMP_H */
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 82703e03f35d..2204704840ea 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -6,7 +6,6 @@
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
/*
* Flush all TLB entries on the local CPU.
diff --git a/arch/s390/include/uapi/asm/ipl.h b/arch/s390/include/uapi/asm/ipl.h
index 451ba7d08905..d1ecd5d722a0 100644
--- a/arch/s390/include/uapi/asm/ipl.h
+++ b/arch/s390/include/uapi/asm/ipl.h
@@ -27,6 +27,7 @@ enum ipl_pbt {
IPL_PBT_FCP = 0,
IPL_PBT_SCP_DATA = 1,
IPL_PBT_CCW = 2,
+ IPL_PBT_NVME = 4,
};
/* IPL Parameter Block 0 with common fields */
@@ -67,6 +68,30 @@ struct ipl_pb0_fcp {
#define IPL_PB0_FCP_OPT_IPL 0x10
#define IPL_PB0_FCP_OPT_DUMP 0x20
+/* IPL Parameter Block 0 for NVMe */
+struct ipl_pb0_nvme {
+ __u32 len;
+ __u8 pbt;
+ __u8 reserved1[3];
+ __u8 loadparm[8];
+ __u8 reserved2[304];
+ __u8 opt;
+ __u8 reserved3[3];
+ __u32 fid;
+ __u8 reserved4[12];
+ __u32 nsid;
+ __u8 reserved5[4];
+ __u32 bootprog;
+ __u8 reserved6[12];
+ __u64 br_lba;
+ __u32 scp_data_len;
+ __u8 reserved7[260];
+ __u8 scp_data[];
+} __packed;
+
+#define IPL_PB0_NVME_OPT_IPL 0x10
+#define IPL_PB0_NVME_OPT_DUMP 0x20
+
/* IPL Parameter Block 0 for CCW */
struct ipl_pb0_ccw {
__u32 len;
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 75f26d775027..a8f136943deb 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -33,11 +33,6 @@ CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
-#
-# Pass UTS_MACHINE for user_regset definition
-#
-CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index e80f0e6f5972..165031bd3370 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -11,9 +11,9 @@
#include <linux/kvm_host.h>
#include <linux/sched.h>
#include <linux/purgatory.h>
+#include <linux/pgtable.h>
#include <asm/idle.h>
#include <asm/vdso.h>
-#include <asm/pgtable.h>
#include <asm/gmap.h>
#include <asm/nmi.h>
#include <asm/stacktrace.h>
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 2c122d8bab93..0dc4b258b98d 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -126,15 +126,16 @@ unknown:
return -EINVAL;
}
-void show_stack(struct task_struct *task, unsigned long *stack)
+void show_stack(struct task_struct *task, unsigned long *stack,
+ const char *loglvl)
{
struct unwind_state state;
- printk("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
unwind_for_each_frame(&state, task, NULL, (unsigned long) stack)
- printk(state.reliable ? " [<%016lx>] %pSR \n" :
- "([<%016lx>] %pSR)\n",
- state.ip, (void *) state.ip);
+ printk(state.reliable ? "%s [<%016lx>] %pSR \n" :
+ "%s([<%016lx>] %pSR)\n",
+ loglvl, state.ip, (void *) state.ip);
debug_show_held_locks(task ? : current);
}
@@ -175,7 +176,7 @@ void show_regs(struct pt_regs *regs)
show_registers(regs);
/* Show stack backtrace if pt_regs is from kernel mode */
if (!user_mode(regs))
- show_stack(NULL, (unsigned long *) regs->gprs[15]);
+ show_stack(NULL, (unsigned long *) regs->gprs[15], KERN_DEFAULT);
show_last_breaking_event(regs);
}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3ae64914bd14..50ff6dd0f995 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -55,14 +55,11 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
_TIF_UPROBE | _TIF_GUARDED_STORAGE | _TIF_PATCH_PENDING)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
-_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE_PRIMARY | \
- _CIF_ASCE_SECONDARY | _CIF_FPU)
+_CIF_WORK = (_CIF_ASCE_PRIMARY | _CIF_ASCE_SECONDARY | _CIF_FPU)
_PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
_LPP_OFFSET = __LC_LPP
-#define BASED(name) name-cleanup_critical(%r13)
-
.macro TRACE_IRQS_ON
#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0
@@ -116,17 +113,39 @@ _LPP_OFFSET = __LC_LPP
.macro SWITCH_ASYNC savearea,timer
tmhh %r8,0x0001 # interrupting from user ?
jnz 2f
+#if IS_ENABLED(CONFIG_KVM)
lgr %r14,%r9
- cghi %r14,__LC_RETURN_LPSWE
- je 0f
- slg %r14,BASED(.Lcritical_start)
- clg %r14,BASED(.Lcritical_length)
- jhe 1f
-0:
+ larl %r13,.Lsie_gmap
+ slgr %r14,%r13
+ lghi %r13,.Lsie_done - .Lsie_gmap
+ clgr %r14,%r13
+ jhe 0f
lghi %r11,\savearea # inside critical section, do cleanup
- brasl %r14,cleanup_critical
- tmhh %r8,0x0001 # retest problem state after cleanup
- jnz 2f
+ brasl %r14,.Lcleanup_sie
+#endif
+0: larl %r13,.Lpsw_idle_exit
+ cgr %r13,%r9
+ jne 1f
+
+ mvc __CLOCK_IDLE_EXIT(8,%r2), __LC_INT_CLOCK
+ mvc __TIMER_IDLE_EXIT(8,%r2), __LC_ASYNC_ENTER_TIMER
+ # account system time going idle
+ ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
+
+ lg %r13,__LC_STEAL_TIMER
+ alg %r13,__CLOCK_IDLE_ENTER(%r2)
+ slg %r13,__LC_LAST_UPDATE_CLOCK
+ stg %r13,__LC_STEAL_TIMER
+
+ mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
+
+ lg %r13,__LC_SYSTEM_TIMER
+ alg %r13,__LC_LAST_UPDATE_TIMER
+ slg %r13,__TIMER_IDLE_ENTER(%r2)
+ stg %r13,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
+
+ nihh %r8,0xfcfd # clear wait state and irq bits
1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
slgr %r14,%r15
srag %r14,%r14,STACK_SHIFT
@@ -152,12 +171,30 @@ _LPP_OFFSET = __LC_LPP
mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
.endm
- .macro REENABLE_IRQS
+ .macro RESTORE_SM_CLEAR_PER
stg %r8,__LC_RETURN_PSW
ni __LC_RETURN_PSW,0xbf
ssm __LC_RETURN_PSW
.endm
+ .macro ENABLE_INTS
+ stosm __SF_EMPTY(%r15),3
+ .endm
+
+ .macro ENABLE_INTS_TRACE
+ TRACE_IRQS_ON
+ ENABLE_INTS
+ .endm
+
+ .macro DISABLE_INTS
+ stnsm __SF_EMPTY(%r15),0xfc
+ .endm
+
+ .macro DISABLE_INTS_TRACE
+ DISABLE_INTS
+ TRACE_IRQS_OFF
+ .endm
+
.macro STCK savearea
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
.insn s,0xb27c0000,\savearea # store clock fast
@@ -254,8 +291,6 @@ ENTRY(__switch_to)
BR_EX %r14
ENDPROC(__switch_to)
-.L__critical_start:
-
#if IS_ENABLED(CONFIG_KVM)
/*
* sie64a calling convention:
@@ -288,7 +323,6 @@ ENTRY(sie64a)
BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_entry:
sie 0(%r14)
-.Lsie_exit:
BPOFF
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_skip:
@@ -341,7 +375,6 @@ EXPORT_SYMBOL(sie_exit)
ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
-.Lsysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
BPOFF
lg %r12,__LC_CURRENT
@@ -350,7 +383,6 @@ ENTRY(system_call)
.Lsysc_per:
lg %r15,__LC_KERNEL_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
-.Lsysc_vtime:
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
stmg %r0,%r7,__PT_R0(%r11)
@@ -358,6 +390,7 @@ ENTRY(system_call)
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11)
+ ENABLE_INTS
.Lsysc_do_svc:
# clear user controlled register to prevent speculative use
xgr %r0,%r0
@@ -393,26 +426,26 @@ ENTRY(system_call)
jnz .Lsysc_work
TSTMSK __TI_flags(%r12),_TIF_WORK
jnz .Lsysc_work # check for work
- TSTMSK __LC_CPU_FLAGS,_CIF_WORK
+ TSTMSK __LC_CPU_FLAGS,(_CIF_WORK-_CIF_FPU)
jnz .Lsysc_work
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
.Lsysc_restore:
+ DISABLE_INTS
+ TSTMSK __LC_CPU_FLAGS, _CIF_FPU
+ jz .Lsysc_skip_fpu
+ brasl %r14,load_fpu_regs
+.Lsysc_skip_fpu:
lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
-.Lsysc_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
- lmg %r11,%r15,__PT_R11(%r11)
- b __LC_RETURN_LPSWE(%r0)
-.Lsysc_done:
+ lmg %r0,%r15,__PT_R0(%r11)
+ b __LC_RETURN_LPSWE
#
# One of the work bits is on. Find out which one.
#
.Lsysc_work:
- TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
- jo .Lsysc_mcck_pending
TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jo .Lsysc_reschedule
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL_RESTART
@@ -436,11 +469,9 @@ ENTRY(system_call)
jo .Lsysc_sigpending
TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
jo .Lsysc_notify_resume
- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- jo .Lsysc_vxrs
TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
jnz .Lsysc_asce
- j .Lsysc_return # beware of critical section cleanup
+ j .Lsysc_return
#
# _TIF_NEED_RESCHED is set, call schedule
@@ -450,13 +481,6 @@ ENTRY(system_call)
jg schedule
#
-# _CIF_MCCK_PENDING is set, call handler
-#
-.Lsysc_mcck_pending:
- larl %r14,.Lsysc_return
- jg s390_handle_mcck # TIF bit will be cleared by handler
-
-#
# _CIF_ASCE_PRIMARY and/or _CIF_ASCE_SECONDARY set, load user space asce
#
.Lsysc_asce:
@@ -475,12 +499,6 @@ ENTRY(system_call)
larl %r14,.Lsysc_return
jg set_fs_fixup
-#
-# CIF_FPU is set, restore floating-point controls and floating-point registers.
-#
-.Lsysc_vxrs:
- larl %r14,.Lsysc_return
- jg load_fpu_regs
#
# _TIF_SIGPENDING is set, call do_signal
@@ -564,7 +582,6 @@ ENTRY(system_call)
jnh .Lsysc_tracenogo
sllg %r8,%r2,3
lg %r9,0(%r8,%r10)
-.Lsysc_tracego:
lmg %r3,%r7,__PT_R3(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
lg %r2,__PT_ORIG_GPR2(%r11)
@@ -585,8 +602,6 @@ ENTRY(ret_from_fork)
la %r11,STACK_FRAME_OVERHEAD(%r15)
lg %r12,__LC_CURRENT
brasl %r14,schedule_tail
- TRACE_IRQS_ON
- ssm __LC_SVC_NEW_PSW # reenable interrupts
tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
jne .Lsysc_tracenogo
# it's a kernel thread
@@ -620,15 +635,16 @@ ENTRY(pgm_check_handler)
lghi %r10,1
0: lg %r12,__LC_CURRENT
lghi %r11,0
- larl %r13,cleanup_critical
lmg %r8,%r9,__LC_PGM_OLD_PSW
tmhh %r8,0x0001 # test problem state bit
jnz 3f # -> fault in user space
#if IS_ENABLED(CONFIG_KVM)
# cleanup critical section for program checks in sie64a
lgr %r14,%r9
- slg %r14,BASED(.Lsie_critical_start)
- clg %r14,BASED(.Lsie_critical_length)
+ larl %r13,.Lsie_gmap
+ slgr %r14,%r13
+ lghi %r13,.Lsie_done - .Lsie_gmap
+ clgr %r14,%r13
jhe 1f
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
@@ -680,7 +696,7 @@ ENTRY(pgm_check_handler)
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
-6: REENABLE_IRQS
+6: RESTORE_SM_CLEAR_PER
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11)
@@ -702,7 +718,7 @@ ENTRY(pgm_check_handler)
# PER event in supervisor state, must be kprobes
#
.Lpgm_kprobe:
- REENABLE_IRQS
+ RESTORE_SM_CLEAR_PER
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_per_trap
@@ -713,11 +729,10 @@ ENTRY(pgm_check_handler)
#
.Lpgm_svcper:
mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
- lghi %r13,__TASK_thread
larl %r14,.Lsysc_per
stg %r14,__LC_RETURN_PSW+8
lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
- lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
+ lpswe __LC_RETURN_PSW # branch to .Lsysc_per
ENDPROC(pgm_check_handler)
/*
@@ -729,7 +744,6 @@ ENTRY(io_int_handler)
BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r12,__LC_CURRENT
- larl %r13,cleanup_critical
lmg %r8,%r9,__LC_IO_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
@@ -749,7 +763,12 @@ ENTRY(io_int_handler)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore
+#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
+ tmhh %r8,0x300
+ jz 1f
TRACE_IRQS_OFF
+1:
+#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
.Lio_loop:
lgr %r2,%r11 # pass pointer to pt_regs
@@ -767,25 +786,27 @@ ENTRY(io_int_handler)
j .Lio_loop
.Lio_return:
LOCKDEP_SYS_EXIT
- TRACE_IRQS_ON
-.Lio_tif:
TSTMSK __TI_flags(%r12),_TIF_WORK
jnz .Lio_work # there is work to do (signals etc.)
TSTMSK __LC_CPU_FLAGS,_CIF_WORK
jnz .Lio_work
.Lio_restore:
+#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
+ tm __PT_PSW(%r11),3
+ jno 0f
+ TRACE_IRQS_ON
+0:
+#endif
lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno .Lio_exit_kernel
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
-.Lio_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
.Lio_exit_kernel:
- lmg %r11,%r15,__PT_R11(%r11)
- b __LC_RETURN_LPSWE(%r0)
+ lmg %r0,%r15,__PT_R0(%r11)
+ b __LC_RETURN_LPSWE
.Lio_done:
#
@@ -813,9 +834,6 @@ ENTRY(io_int_handler)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1
- # TRACE_IRQS_ON already done at .Lio_return, call
- # TRACE_IRQS_OFF to keep things symmetrical
- TRACE_IRQS_OFF
brasl %r14,preempt_schedule_irq
j .Lio_return
#else
@@ -835,9 +853,6 @@ ENTRY(io_int_handler)
#
# One of the work bits is on. Find out which one.
#
-.Lio_work_tif:
- TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
- jo .Lio_mcck_pending
TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jo .Lio_reschedule
#ifdef CONFIG_LIVEPATCH
@@ -854,15 +869,6 @@ ENTRY(io_int_handler)
jo .Lio_vxrs
TSTMSK __LC_CPU_FLAGS,(_CIF_ASCE_PRIMARY|_CIF_ASCE_SECONDARY)
jnz .Lio_asce
- j .Lio_return # beware of critical section cleanup
-
-#
-# _CIF_MCCK_PENDING is set, call handler
-#
-.Lio_mcck_pending:
- # TRACE_IRQS_ON already done at .Lio_return
- brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
- TRACE_IRQS_OFF
j .Lio_return
#
@@ -895,23 +901,19 @@ ENTRY(io_int_handler)
# _TIF_GUARDED_STORAGE is set, call guarded_storage_load
#
.Lio_guarded_storage:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
+ ENABLE_INTS_TRACE
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,gs_load_bc_cb
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
+ DISABLE_INTS_TRACE
j .Lio_return
#
# _TIF_NEED_RESCHED is set, call schedule
#
.Lio_reschedule:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
+ ENABLE_INTS_TRACE
brasl %r14,schedule # call scheduler
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
+ DISABLE_INTS_TRACE
j .Lio_return
#
@@ -928,24 +930,20 @@ ENTRY(io_int_handler)
# _TIF_SIGPENDING or is set, call do_signal
#
.Lio_sigpending:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
+ ENABLE_INTS_TRACE
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_signal
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
+ DISABLE_INTS_TRACE
j .Lio_return
#
# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
#
.Lio_notify_resume:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
+ ENABLE_INTS_TRACE
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_notify_resume
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
+ DISABLE_INTS_TRACE
j .Lio_return
ENDPROC(io_int_handler)
@@ -958,7 +956,6 @@ ENTRY(ext_int_handler)
BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r12,__LC_CURRENT
- larl %r13,cleanup_critical
lmg %r8,%r9,__LC_EXT_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
@@ -981,7 +978,12 @@ ENTRY(ext_int_handler)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
jo .Lio_restore
+#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
+ tmhh %r8,0x300
+ jz 1f
TRACE_IRQS_OFF
+1:
+#endif
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
lghi %r3,EXT_INTERRUPT
@@ -990,11 +992,11 @@ ENTRY(ext_int_handler)
ENDPROC(ext_int_handler)
/*
- * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
+ * Load idle PSW.
*/
ENTRY(psw_idle)
stg %r3,__SF_EMPTY(%r15)
- larl %r1,.Lpsw_idle_lpsw+4
+ larl %r1,.Lpsw_idle_exit
stg %r1,__SF_EMPTY+8(%r15)
larl %r1,smp_cpu_mtid
llgf %r1,0(%r1)
@@ -1006,10 +1008,9 @@ ENTRY(psw_idle)
BPON
STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
-.Lpsw_idle_lpsw:
lpswe __SF_EMPTY(%r15)
+.Lpsw_idle_exit:
BR_EX %r14
-.Lpsw_idle_end:
ENDPROC(psw_idle)
/*
@@ -1020,6 +1021,7 @@ ENDPROC(psw_idle)
* of the register contents at return from io or a system call.
*/
ENTRY(save_fpu_regs)
+ stnsm __SF_EMPTY(%r15),0xfc
lg %r2,__LC_CURRENT
aghi %r2,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
@@ -1051,6 +1053,7 @@ ENTRY(save_fpu_regs)
.Lsave_fpu_regs_done:
oi __LC_CPU_FLAGS+7,_CIF_FPU
.Lsave_fpu_regs_exit:
+ ssm __SF_EMPTY(%r15)
BR_EX %r14
.Lsave_fpu_regs_end:
ENDPROC(save_fpu_regs)
@@ -1102,8 +1105,6 @@ load_fpu_regs:
.Lload_fpu_regs_end:
ENDPROC(load_fpu_regs)
-.L__critical_end:
-
/*
* Machine check handler routines
*/
@@ -1116,7 +1117,6 @@ ENTRY(mcck_int_handler)
lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
lg %r12,__LC_CURRENT
- larl %r13,cleanup_critical
lmg %r8,%r9,__LC_MCK_OLD_PSW
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
jo .Lmcck_panic # yes -> rest of mcck code invalid
@@ -1202,15 +1202,13 @@ ENTRY(mcck_int_handler)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,s390_do_machine_check
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno .Lmcck_return
+ cghi %r2,0
+ je .Lmcck_return
lg %r1,__LC_KERNEL_STACK # switch to kernel stack
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1
- TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
- jno .Lmcck_return
TRACE_IRQS_OFF
brasl %r14,s390_handle_mcck
TRACE_IRQS_ON
@@ -1280,265 +1278,23 @@ ENTRY(stack_overflow)
ENDPROC(stack_overflow)
#endif
-ENTRY(cleanup_critical)
- cghi %r9,__LC_RETURN_LPSWE
- je .Lcleanup_lpswe
-#if IS_ENABLED(CONFIG_KVM)
- clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
- jl 0f
- clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
- jl .Lcleanup_sie
-#endif
- clg %r9,BASED(.Lcleanup_table) # system_call
- jl 0f
- clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
- jl .Lcleanup_system_call
- clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
- jl 0f
- clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
- jl .Lcleanup_sysc_tif
- clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
- jl .Lcleanup_sysc_restore
- clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
- jl 0f
- clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
- jl .Lcleanup_io_tif
- clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
- jl .Lcleanup_io_restore
- clg %r9,BASED(.Lcleanup_table+64) # psw_idle
- jl 0f
- clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
- jl .Lcleanup_idle
- clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs
- jl 0f
- clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end
- jl .Lcleanup_save_fpu_regs
- clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs
- jl 0f
- clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
- jl .Lcleanup_load_fpu_regs
-0: BR_EX %r14,%r11
-ENDPROC(cleanup_critical)
-
- .align 8
-.Lcleanup_table:
- .quad system_call
- .quad .Lsysc_do_svc
- .quad .Lsysc_tif
- .quad .Lsysc_restore
- .quad .Lsysc_done
- .quad .Lio_tif
- .quad .Lio_restore
- .quad .Lio_done
- .quad psw_idle
- .quad .Lpsw_idle_end
- .quad save_fpu_regs
- .quad .Lsave_fpu_regs_end
- .quad load_fpu_regs
- .quad .Lload_fpu_regs_end
-
#if IS_ENABLED(CONFIG_KVM)
-.Lcleanup_table_sie:
- .quad .Lsie_gmap
- .quad .Lsie_done
-
.Lcleanup_sie:
- cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt?
- je 1f
- slg %r9,BASED(.Lsie_crit_mcck_start)
- clg %r9,BASED(.Lsie_crit_mcck_length)
- jh 1f
- oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
-1: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ cghi %r11,__LC_SAVE_AREA_ASYNC #Is this in normal interrupt?
+ je 1f
+ larl %r13,.Lsie_entry
+ slgr %r9,%r13
+ larl %r13,.Lsie_skip
+ clgr %r9,%r13
+ jh 1f
+ oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
+1: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
BR_EX %r14,%r11
-#endif
-.Lcleanup_system_call:
- # check if stpt has been executed
- clg %r9,BASED(.Lcleanup_system_call_insn)
- jh 0f
- mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
- cghi %r11,__LC_SAVE_AREA_ASYNC
- je 0f
- mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
-0: # check if stmg has been executed
- clg %r9,BASED(.Lcleanup_system_call_insn+8)
- jh 0f
- mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
-0: # check if base register setup + TIF bit load has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+16)
- jhe 0f
- # set up saved register r12 task struct pointer
- stg %r12,32(%r11)
- # set up saved register r13 __TASK_thread offset
- mvc 40(8,%r11),BASED(.Lcleanup_system_call_const)
-0: # check if the user time update has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+24)
- jh 0f
- lg %r15,__LC_EXIT_TIMER
- slg %r15,__LC_SYNC_ENTER_TIMER
- alg %r15,__LC_USER_TIMER
- stg %r15,__LC_USER_TIMER
-0: # check if the system time update has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+32)
- jh 0f
- lg %r15,__LC_LAST_UPDATE_TIMER
- slg %r15,__LC_EXIT_TIMER
- alg %r15,__LC_SYSTEM_TIMER
- stg %r15,__LC_SYSTEM_TIMER
-0: # update accounting time stamp
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
- # set up saved register r11
- lg %r15,__LC_KERNEL_STACK
- la %r9,STACK_FRAME_OVERHEAD(%r15)
- stg %r9,24(%r11) # r11 pt_regs pointer
- # fill pt_regs
- mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
- stmg %r0,%r7,__PT_R0(%r9)
- mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
- xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
- mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
- # setup saved register r15
- stg %r15,56(%r11) # r15 stack pointer
- # set new psw address and exit
- larl %r9,.Lsysc_do_svc
- BR_EX %r14,%r11
-.Lcleanup_system_call_insn:
- .quad system_call
- .quad .Lsysc_stmg
- .quad .Lsysc_per
- .quad .Lsysc_vtime+36
- .quad .Lsysc_vtime+42
-.Lcleanup_system_call_const:
- .quad __TASK_thread
-
-.Lcleanup_sysc_tif:
- larl %r9,.Lsysc_tif
- BR_EX %r14,%r11
-
-.Lcleanup_sysc_restore:
- # check if stpt has been executed
- clg %r9,BASED(.Lcleanup_sysc_restore_insn)
- jh 0f
- mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
- cghi %r11,__LC_SAVE_AREA_ASYNC
- je 0f
- mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
-0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
- je 1f
- lg %r9,24(%r11) # get saved pointer to pt_regs
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
- mvc 0(64,%r11),__PT_R8(%r9)
- lmg %r0,%r7,__PT_R0(%r9)
-.Lcleanup_lpswe:
-1: lmg %r8,%r9,__LC_RETURN_PSW
- BR_EX %r14,%r11
-.Lcleanup_sysc_restore_insn:
- .quad .Lsysc_exit_timer
- .quad .Lsysc_done - 4
-
-.Lcleanup_io_tif:
- larl %r9,.Lio_tif
- BR_EX %r14,%r11
-
-.Lcleanup_io_restore:
- # check if stpt has been executed
- clg %r9,BASED(.Lcleanup_io_restore_insn)
- jh 0f
- mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
-0: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
- je 1f
- lg %r9,24(%r11) # get saved r11 pointer to pt_regs
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
- mvc 0(64,%r11),__PT_R8(%r9)
- lmg %r0,%r7,__PT_R0(%r9)
-1: lmg %r8,%r9,__LC_RETURN_PSW
- BR_EX %r14,%r11
-.Lcleanup_io_restore_insn:
- .quad .Lio_exit_timer
- .quad .Lio_done - 4
-
-.Lcleanup_idle:
- ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
- # copy interrupt clock & cpu timer
- mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
- mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
- cghi %r11,__LC_SAVE_AREA_ASYNC
- je 0f
- mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
- mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
-0: # check if stck & stpt have been executed
- clg %r9,BASED(.Lcleanup_idle_insn)
- jhe 1f
- mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
- mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
-1: # calculate idle cycles
- clg %r9,BASED(.Lcleanup_idle_insn)
- jl 3f
- larl %r1,smp_cpu_mtid
- llgf %r1,0(%r1)
- ltgr %r1,%r1
- jz 3f
- .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
- larl %r3,mt_cycles
- ag %r3,__LC_PERCPU_OFFSET
- la %r4,__SF_EMPTY+16(%r15)
-2: lg %r0,0(%r3)
- slg %r0,0(%r4)
- alg %r0,64(%r4)
- stg %r0,0(%r3)
- la %r3,8(%r3)
- la %r4,8(%r4)
- brct %r1,2b
-3: # account system time going idle
- lg %r9,__LC_STEAL_TIMER
- alg %r9,__CLOCK_IDLE_ENTER(%r2)
- slg %r9,__LC_LAST_UPDATE_CLOCK
- stg %r9,__LC_STEAL_TIMER
- mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
- lg %r9,__LC_SYSTEM_TIMER
- alg %r9,__LC_LAST_UPDATE_TIMER
- slg %r9,__TIMER_IDLE_ENTER(%r2)
- stg %r9,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
- # prepare return psw
- nihh %r8,0xfcfd # clear irq & wait state bits
- lg %r9,48(%r11) # return from psw_idle
- BR_EX %r14,%r11
-.Lcleanup_idle_insn:
- .quad .Lpsw_idle_lpsw
-
-.Lcleanup_save_fpu_regs:
- larl %r9,save_fpu_regs
- BR_EX %r14,%r11
-
-.Lcleanup_load_fpu_regs:
- larl %r9,load_fpu_regs
- BR_EX %r14,%r11
-
-/*
- * Integer constants
- */
- .align 8
-.Lcritical_start:
- .quad .L__critical_start
-.Lcritical_length:
- .quad .L__critical_end - .L__critical_start
-#if IS_ENABLED(CONFIG_KVM)
-.Lsie_critical_start:
- .quad .Lsie_gmap
-.Lsie_critical_length:
- .quad .Lsie_done - .Lsie_gmap
-.Lsie_crit_mcck_start:
- .quad .Lsie_entry
-.Lsie_crit_mcck_length:
- .quad .Lsie_skip - .Lsie_entry
#endif
.section .rodata, "a"
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 4cd9b1ada834..44e01dd1e624 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -72,22 +72,6 @@ static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
#endif
}
-static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
-{
-#ifdef CONFIG_KPROBES
- insn->opc = BREAKPOINT_INSTRUCTION;
- insn->disp = KPROBE_ON_FTRACE_NOP;
-#endif
-}
-
-static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
-{
-#ifdef CONFIG_KPROBES
- insn->opc = BREAKPOINT_INSTRUCTION;
- insn->disp = KPROBE_ON_FTRACE_CALL;
-#endif
-}
-
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index 8f8456816d83..0d7fbdfe995a 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -24,19 +24,19 @@ void enabled_wait(void)
{
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
unsigned long long idle_time;
- unsigned long psw_mask;
+ unsigned long psw_mask, flags;
- trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */
psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
clear_cpu_flag(CIF_NOHZ_DELAY);
+ local_irq_save(flags);
/* Call the assembler magic in entry.S */
psw_idle(idle, psw_mask);
+ local_irq_restore(flags);
- trace_hardirqs_off();
/* Account time spent with enabled wait psw loaded as idle time. */
write_seqcount_begin(&idle->seqcount);
@@ -118,22 +118,16 @@ u64 arch_cpu_idle_time(int cpu)
void arch_cpu_idle_enter(void)
{
- local_mcck_disable();
}
void arch_cpu_idle(void)
{
- if (!test_cpu_flag(CIF_MCCK_PENDING))
- /* Halt the cpu and keep track of cpu time accounting. */
- enabled_wait();
+ enabled_wait();
local_irq_enable();
}
void arch_cpu_idle_exit(void)
{
- local_mcck_enable();
- if (test_cpu_flag(CIF_MCCK_PENDING))
- s390_handle_mcck();
}
void arch_cpu_idle_dead(void)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 4a71061974fd..ccea9a245867 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -39,6 +39,7 @@
#define IPL_CCW_STR "ccw"
#define IPL_FCP_STR "fcp"
#define IPL_FCP_DUMP_STR "fcp_dump"
+#define IPL_NVME_STR "nvme"
#define IPL_NSS_STR "nss"
#define DUMP_CCW_STR "ccw"
@@ -93,6 +94,8 @@ static char *ipl_type_str(enum ipl_type type)
return IPL_FCP_DUMP_STR;
case IPL_TYPE_NSS:
return IPL_NSS_STR;
+ case IPL_TYPE_NVME:
+ return IPL_NVME_STR;
case IPL_TYPE_UNKNOWN:
default:
return IPL_UNKNOWN_STR;
@@ -133,6 +136,7 @@ static int reipl_capabilities = IPL_TYPE_UNKNOWN;
static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
static struct ipl_parameter_block *reipl_block_fcp;
+static struct ipl_parameter_block *reipl_block_nvme;
static struct ipl_parameter_block *reipl_block_ccw;
static struct ipl_parameter_block *reipl_block_nss;
static struct ipl_parameter_block *reipl_block_actual;
@@ -261,6 +265,8 @@ static __init enum ipl_type get_ipl_type(void)
return IPL_TYPE_FCP_DUMP;
else
return IPL_TYPE_FCP;
+ case IPL_PBT_NVME:
+ return IPL_TYPE_NVME;
}
return IPL_TYPE_UNKNOWN;
}
@@ -317,6 +323,8 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
return sprintf(page, "0.0.%04x\n", ipl_block.fcp.devno);
+ case IPL_TYPE_NVME:
+ return sprintf(page, "%08ux\n", ipl_block.nvme.fid);
default:
return 0;
}
@@ -345,15 +353,35 @@ static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
+
+static ssize_t ipl_nvme_scp_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ unsigned int size = ipl_block.nvme.scp_data_len;
+ void *scp_data = &ipl_block.nvme.scp_data;
+
+ return memory_read_from_buffer(buf, count, &off, scp_data, size);
+}
+
static struct bin_attribute ipl_scp_data_attr =
__BIN_ATTR(scp_data, S_IRUGO, ipl_scp_data_read, NULL, PAGE_SIZE);
+static struct bin_attribute ipl_nvme_scp_data_attr =
+ __BIN_ATTR(scp_data, S_IRUGO, ipl_nvme_scp_data_read, NULL, PAGE_SIZE);
+
static struct bin_attribute *ipl_fcp_bin_attrs[] = {
&ipl_parameter_attr,
&ipl_scp_data_attr,
NULL,
};
+static struct bin_attribute *ipl_nvme_bin_attrs[] = {
+ &ipl_parameter_attr,
+ &ipl_nvme_scp_data_attr,
+ NULL,
+};
+
/* FCP ipl device attributes */
DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n",
@@ -365,6 +393,16 @@ DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n",
DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n",
(unsigned long long)ipl_block.fcp.br_lba);
+/* NVMe ipl device attributes */
+DEFINE_IPL_ATTR_RO(ipl_nvme, fid, "0x%08llx\n",
+ (unsigned long long)ipl_block.nvme.fid);
+DEFINE_IPL_ATTR_RO(ipl_nvme, nsid, "0x%08llx\n",
+ (unsigned long long)ipl_block.nvme.nsid);
+DEFINE_IPL_ATTR_RO(ipl_nvme, bootprog, "%lld\n",
+ (unsigned long long)ipl_block.nvme.bootprog);
+DEFINE_IPL_ATTR_RO(ipl_nvme, br_lba, "%lld\n",
+ (unsigned long long)ipl_block.nvme.br_lba);
+
static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
@@ -399,6 +437,24 @@ static struct attribute_group ipl_fcp_attr_group = {
.bin_attrs = ipl_fcp_bin_attrs,
};
+static struct attribute *ipl_nvme_attrs[] = {
+ &sys_ipl_type_attr.attr,
+ &sys_ipl_nvme_fid_attr.attr,
+ &sys_ipl_nvme_nsid_attr.attr,
+ &sys_ipl_nvme_bootprog_attr.attr,
+ &sys_ipl_nvme_br_lba_attr.attr,
+ &sys_ipl_ccw_loadparm_attr.attr,
+ &sys_ipl_secure_attr.attr,
+ &sys_ipl_has_secure_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ipl_nvme_attr_group = {
+ .attrs = ipl_nvme_attrs,
+ .bin_attrs = ipl_nvme_bin_attrs,
+};
+
+
/* CCW ipl device attributes */
static struct attribute *ipl_ccw_attrs_vm[] = {
@@ -474,6 +530,9 @@ static int __init ipl_init(void)
case IPL_TYPE_FCP_DUMP:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
break;
+ case IPL_TYPE_NVME:
+ rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nvme_attr_group);
+ break;
default:
rc = sysfs_create_group(&ipl_kset->kobj,
&ipl_unknown_attr_group);
@@ -727,6 +786,93 @@ static struct attribute_group reipl_fcp_attr_group = {
static struct kobj_attribute sys_reipl_fcp_clear_attr =
__ATTR(clear, 0644, reipl_fcp_clear_show, reipl_fcp_clear_store);
+/* NVME reipl device attributes */
+
+static ssize_t reipl_nvme_scpdata_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ size_t size = reipl_block_nvme->nvme.scp_data_len;
+ void *scp_data = reipl_block_nvme->nvme.scp_data;
+
+ return memory_read_from_buffer(buf, count, &off, scp_data, size);
+}
+
+static ssize_t reipl_nvme_scpdata_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ size_t scpdata_len = count;
+ size_t padding;
+
+ if (off)
+ return -EINVAL;
+
+ memcpy(reipl_block_nvme->nvme.scp_data, buf, count);
+ if (scpdata_len % 8) {
+ padding = 8 - (scpdata_len % 8);
+ memset(reipl_block_nvme->nvme.scp_data + scpdata_len,
+ 0, padding);
+ scpdata_len += padding;
+ }
+
+ reipl_block_nvme->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
+ reipl_block_nvme->nvme.len = IPL_BP0_FCP_LEN + scpdata_len;
+ reipl_block_nvme->nvme.scp_data_len = scpdata_len;
+
+ return count;
+}
+
+static struct bin_attribute sys_reipl_nvme_scp_data_attr =
+ __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_nvme_scpdata_read,
+ reipl_nvme_scpdata_write, DIAG308_SCPDATA_SIZE);
+
+static struct bin_attribute *reipl_nvme_bin_attrs[] = {
+ &sys_reipl_nvme_scp_data_attr,
+ NULL,
+};
+
+DEFINE_IPL_ATTR_RW(reipl_nvme, fid, "0x%08llx\n", "%llx\n",
+ reipl_block_nvme->nvme.fid);
+DEFINE_IPL_ATTR_RW(reipl_nvme, nsid, "0x%08llx\n", "%llx\n",
+ reipl_block_nvme->nvme.nsid);
+DEFINE_IPL_ATTR_RW(reipl_nvme, bootprog, "%lld\n", "%lld\n",
+ reipl_block_nvme->nvme.bootprog);
+DEFINE_IPL_ATTR_RW(reipl_nvme, br_lba, "%lld\n", "%lld\n",
+ reipl_block_nvme->nvme.br_lba);
+
+/* nvme wrapper */
+static ssize_t reipl_nvme_loadparm_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return reipl_generic_loadparm_show(reipl_block_nvme, page);
+}
+
+static ssize_t reipl_nvme_loadparm_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ return reipl_generic_loadparm_store(reipl_block_nvme, buf, len);
+}
+
+static struct kobj_attribute sys_reipl_nvme_loadparm_attr =
+ __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_nvme_loadparm_show,
+ reipl_nvme_loadparm_store);
+
+static struct attribute *reipl_nvme_attrs[] = {
+ &sys_reipl_nvme_fid_attr.attr,
+ &sys_reipl_nvme_nsid_attr.attr,
+ &sys_reipl_nvme_bootprog_attr.attr,
+ &sys_reipl_nvme_br_lba_attr.attr,
+ &sys_reipl_nvme_loadparm_attr.attr,
+ NULL,
+};
+
+static struct attribute_group reipl_nvme_attr_group = {
+ .attrs = reipl_nvme_attrs,
+ .bin_attrs = reipl_nvme_bin_attrs
+};
+
/* CCW reipl device attributes */
DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ccw);
@@ -891,6 +1037,9 @@ static int reipl_set_type(enum ipl_type type)
case IPL_TYPE_FCP:
reipl_block_actual = reipl_block_fcp;
break;
+ case IPL_TYPE_NVME:
+ reipl_block_actual = reipl_block_nvme;
+ break;
case IPL_TYPE_NSS:
reipl_block_actual = reipl_block_nss;
break;
@@ -917,6 +1066,8 @@ static ssize_t reipl_type_store(struct kobject *kobj,
rc = reipl_set_type(IPL_TYPE_CCW);
else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
rc = reipl_set_type(IPL_TYPE_FCP);
+ else if (strncmp(buf, IPL_NVME_STR, strlen(IPL_NVME_STR)) == 0)
+ rc = reipl_set_type(IPL_TYPE_NVME);
else if (strncmp(buf, IPL_NSS_STR, strlen(IPL_NSS_STR)) == 0)
rc = reipl_set_type(IPL_TYPE_NSS);
return (rc != 0) ? rc : len;
@@ -927,6 +1078,7 @@ static struct kobj_attribute reipl_type_attr =
static struct kset *reipl_kset;
static struct kset *reipl_fcp_kset;
+static struct kset *reipl_nvme_kset;
static void __reipl_run(void *unused)
{
@@ -945,6 +1097,10 @@ static void __reipl_run(void *unused)
else
diag308(DIAG308_LOAD_NORMAL, NULL);
break;
+ case IPL_TYPE_NVME:
+ diag308(DIAG308_SET, reipl_block_nvme);
+ diag308(DIAG308_LOAD_CLEAR, NULL);
+ break;
case IPL_TYPE_NSS:
diag308(DIAG308_SET, reipl_block_nss);
diag308(DIAG308_LOAD_CLEAR, NULL);
@@ -1093,6 +1249,49 @@ out1:
return rc;
}
+static int __init reipl_nvme_init(void)
+{
+ int rc;
+
+ reipl_block_nvme = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!reipl_block_nvme)
+ return -ENOMEM;
+
+ /* sysfs: create kset for mixing attr group and bin attrs */
+ reipl_nvme_kset = kset_create_and_add(IPL_NVME_STR, NULL,
+ &reipl_kset->kobj);
+ if (!reipl_nvme_kset) {
+ free_page((unsigned long) reipl_block_nvme);
+ return -ENOMEM;
+ }
+
+ rc = sysfs_create_group(&reipl_nvme_kset->kobj, &reipl_nvme_attr_group);
+ if (rc) {
+ kset_unregister(reipl_nvme_kset);
+ free_page((unsigned long) reipl_block_nvme);
+ return rc;
+ }
+
+ if (ipl_info.type == IPL_TYPE_NVME) {
+ memcpy(reipl_block_nvme, &ipl_block, sizeof(ipl_block));
+ /*
+ * Fix loadparm: There are systems where the (SCSI) LOADPARM
+ * is invalid in the IPL parameter block, so take it
+ * always from sclp_ipl_info.
+ */
+ memcpy(reipl_block_nvme->nvme.loadparm, sclp_ipl_info.loadparm,
+ LOADPARM_LEN);
+ } else {
+ reipl_block_nvme->hdr.len = IPL_BP_NVME_LEN;
+ reipl_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION;
+ reipl_block_nvme->nvme.len = IPL_BP0_NVME_LEN;
+ reipl_block_nvme->nvme.pbt = IPL_PBT_NVME;
+ reipl_block_nvme->nvme.opt = IPL_PB0_NVME_OPT_IPL;
+ }
+ reipl_capabilities |= IPL_TYPE_NVME;
+ return 0;
+}
+
static int __init reipl_type_init(void)
{
enum ipl_type reipl_type = ipl_info.type;
@@ -1108,6 +1307,9 @@ static int __init reipl_type_init(void)
if (reipl_block->pb0_hdr.pbt == IPL_PBT_FCP) {
memcpy(reipl_block_fcp, reipl_block, size);
reipl_type = IPL_TYPE_FCP;
+ } else if (reipl_block->pb0_hdr.pbt == IPL_PBT_NVME) {
+ memcpy(reipl_block_nvme, reipl_block, size);
+ reipl_type = IPL_TYPE_NVME;
} else if (reipl_block->pb0_hdr.pbt == IPL_PBT_CCW) {
memcpy(reipl_block_ccw, reipl_block, size);
reipl_type = IPL_TYPE_CCW;
@@ -1134,6 +1336,9 @@ static int __init reipl_init(void)
rc = reipl_fcp_init();
if (rc)
return rc;
+ rc = reipl_nvme_init();
+ if (rc)
+ return rc;
rc = reipl_nss_init();
if (rc)
return rc;
@@ -1750,6 +1955,10 @@ void __init setup_ipl(void)
ipl_info.data.fcp.wwpn = ipl_block.fcp.wwpn;
ipl_info.data.fcp.lun = ipl_block.fcp.lun;
break;
+ case IPL_TYPE_NVME:
+ ipl_info.data.nvme.fid = ipl_block.nvme.fid;
+ ipl_info.data.nvme.nsid = ipl_block.nvme.nsid;
+ break;
case IPL_TYPE_NSS:
case IPL_TYPE_UNKNOWN:
/* We have no info to copy */
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 3a854cb5a4c6..93c6b8932fbd 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -16,7 +16,6 @@
#include <linux/debug_locks.h>
#include <asm/cio.h>
#include <asm/setup.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/smp.h>
#include <asm/ipl.h>
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 0a487fae763e..86c8d5370e7f 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -148,7 +148,6 @@ void s390_handle_mcck(void)
local_mcck_disable();
mcck = *this_cpu_ptr(&cpu_mcck);
memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
- clear_cpu_flag(CIF_MCCK_PENDING);
local_mcck_enable();
local_irq_restore(flags);
@@ -333,7 +332,7 @@ NOKPROBE_SYMBOL(s390_backup_mcck_info);
/*
* machine check handler.
*/
-void notrace s390_do_machine_check(struct pt_regs *regs)
+int notrace s390_do_machine_check(struct pt_regs *regs)
{
static int ipd_count;
static DEFINE_SPINLOCK(ipd_lock);
@@ -342,6 +341,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
unsigned long long tmp;
union mci mci;
unsigned long mcck_dam_code;
+ int mcck_pending = 0;
nmi_enter();
inc_irq_stat(NMI_NMI);
@@ -400,7 +400,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
*/
mcck->kill_task = 1;
mcck->mcck_code = mci.val;
- set_cpu_flag(CIF_MCCK_PENDING);
+ mcck_pending = 1;
}
/*
@@ -420,8 +420,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
mcck->stp_queue |= stp_sync_check();
if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
mcck->stp_queue |= stp_island_check();
- if (mcck->stp_queue)
- set_cpu_flag(CIF_MCCK_PENDING);
+ mcck_pending = 1;
}
/*
@@ -442,12 +441,12 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
if (mci.cp) {
/* Channel report word pending */
mcck->channel_report = 1;
- set_cpu_flag(CIF_MCCK_PENDING);
+ mcck_pending = 1;
}
if (mci.w) {
/* Warning pending */
mcck->warning = 1;
- set_cpu_flag(CIF_MCCK_PENDING);
+ mcck_pending = 1;
}
/*
@@ -462,7 +461,17 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
*((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
}
clear_cpu_flag(CIF_MCCK_GUEST);
+
+ if (user_mode(regs) && mcck_pending) {
+ nmi_exit();
+ return 1;
+ }
+
+ if (mcck_pending)
+ schedule_mcck_handler();
+
nmi_exit();
+ return 0;
}
NOKPROBE_SYMBOL(s390_do_machine_check);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 58faa12542a1..ce60a459a143 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -25,7 +25,6 @@
#include <linux/compat.h>
#include <trace/syscall.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
@@ -1416,7 +1415,7 @@ static const struct user_regset s390_regsets[] = {
};
static const struct user_regset_view user_s390_view = {
- .name = UTS_MACHINE,
+ .name = "s390x",
.e_machine = EM_S390,
.regsets = s390_regsets,
.n = ARRAY_SIZE(s390_regsets)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 0f0b140b5558..5853c9872dfe 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -381,8 +381,7 @@ static void __init setup_lowcore_dat_off(void)
lc->restart_psw.addr = (unsigned long) restart_int_handler;
lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->external_new_psw.addr = (unsigned long) ext_int_handler;
- lc->svc_new_psw.mask = PSW_KERNEL_BITS |
- PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
+ lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->svc_new_psw.addr = (unsigned long) system_call;
lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
@@ -594,9 +593,10 @@ static void __init setup_memory_end(void)
#ifdef CONFIG_CRASH_DUMP
/*
- * When kdump is enabled, we have to ensure that no memory from
- * the area [0 - crashkernel memory size] and
- * [crashk_res.start - crashk_res.end] is set offline.
+ * When kdump is enabled, we have to ensure that no memory from the area
+ * [0 - crashkernel memory size] is set offline - it will be exchanged with
+ * the crashkernel memory region when kdump is triggered. The crashkernel
+ * memory region can never get offlined (pages are unmovable).
*/
static int kdump_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
@@ -607,11 +607,7 @@ static int kdump_mem_notifier(struct notifier_block *nb,
return NOTIFY_OK;
if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
return NOTIFY_BAD;
- if (arg->start_pfn > PFN_DOWN(crashk_res.end))
- return NOTIFY_OK;
- if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
- return NOTIFY_OK;
- return NOTIFY_BAD;
+ return NOTIFY_OK;
}
static struct notifier_block kdump_mem_nb = {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 10dbb12eb14d..e6be63ff162a 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -61,6 +61,7 @@ enum {
ec_schedule = 0,
ec_call_function_single,
ec_stop_cpu,
+ ec_mcck_pending,
};
enum {
@@ -403,6 +404,11 @@ int smp_find_processor_id(u16 address)
return -1;
}
+void schedule_mcck_handler(void)
+{
+ pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending);
+}
+
bool notrace arch_vcpu_is_preempted(int cpu)
{
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
@@ -497,6 +503,8 @@ static void smp_handle_ext_call(void)
scheduler_ipi();
if (test_bit(ec_call_function_single, &bits))
generic_smp_call_function_single_interrupt();
+ if (test_bit(ec_mcck_pending, &bits))
+ s390_handle_mcck();
}
static void do_ext_call_interrupt(struct ext_code ext_code,
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 4c0677fc8904..66e89b2866d7 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -205,7 +205,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
again:
rc = -EFAULT;
- down_read(&gmap->mm->mmap_sem);
+ mmap_read_lock(gmap->mm);
uaddr = __gmap_translate(gmap, gaddr);
if (IS_ERR_VALUE(uaddr))
@@ -234,7 +234,7 @@ again:
pte_unmap_unlock(ptep, ptelock);
unlock_page(page);
out:
- up_read(&gmap->mm->mmap_sem);
+ mmap_read_unlock(gmap->mm);
if (rc == -EAGAIN) {
wait_on_page_writeback(page);
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index bcc9bdb39ba2..c4baefaa6e34 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -21,7 +21,6 @@
#include <linux/memblock.h>
#include <linux/compat.h>
#include <asm/asm-offsets.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
@@ -208,7 +207,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* it at vdso_base which is the "natural" base for it, but we might
* fail and end up putting it elsewhere.
*/
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
@@ -239,7 +238,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
rc = 0;
out_up:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return rc;
}
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 47a67a958107..6d6b57059493 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -9,8 +9,8 @@
#include <linux/vmalloc.h>
#include <linux/mm_types.h>
#include <linux/err.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/gmap.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -1173,7 +1173,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
int dat_protection, fake;
int rc;
- down_read(&sg->mm->mmap_sem);
+ mmap_read_lock(sg->mm);
/*
* We don't want any guest-2 tables to change - so the parent
* tables/pointers we read stay valid - unshadowing is however
@@ -1202,6 +1202,6 @@ shadow_page:
if (!rc)
rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
ipte_unlock(vcpu);
- up_read(&sg->mm->mmap_sem);
+ mmap_read_unlock(sg->mm);
return rc;
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index a4d4ca2769bd..1608fd99bbee 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -2767,10 +2767,10 @@ static struct page *get_map_page(struct kvm *kvm, u64 uaddr)
{
struct page *page = NULL;
- down_read(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE,
&page, NULL, NULL);
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
return page;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 06bde4bad205..d47c19718615 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -31,11 +31,11 @@
#include <linux/bitmap.h>
#include <linux/sched/signal.h>
#include <linux/string.h>
+#include <linux/pgtable.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
#include <asm/stp.h>
-#include <asm/pgtable.h>
#include <asm/gmap.h>
#include <asm/nmi.h>
#include <asm/switch_to.h>
@@ -763,9 +763,9 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
r = -EINVAL;
else {
r = 0;
- down_write(&kvm->mm->mmap_sem);
+ mmap_write_lock(kvm->mm);
kvm->mm->context.allow_gmap_hpage_1m = 1;
- up_write(&kvm->mm->mmap_sem);
+ mmap_write_unlock(kvm->mm);
/*
* We might have to create fake 4k page
* tables. To avoid that the hardware works on
@@ -1815,7 +1815,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
if (!keys)
return -ENOMEM;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
@@ -1829,7 +1829,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
break;
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (!r) {
r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
@@ -1873,7 +1873,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
goto out;
i = 0;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
while (i < args->count) {
unlocked = false;
@@ -1900,7 +1900,7 @@ static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
i++;
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
out:
kvfree(keys);
return r;
@@ -2089,14 +2089,14 @@ static int kvm_s390_get_cmma_bits(struct kvm *kvm,
if (!values)
return -ENOMEM;
- down_read(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
if (peek)
ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
else
ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
srcu_read_unlock(&kvm->srcu, srcu_idx);
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
if (kvm->arch.migration_mode)
args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
@@ -2146,7 +2146,7 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
goto out;
}
- down_read(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
srcu_idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
@@ -2161,12 +2161,12 @@ static int kvm_s390_set_cmma_bits(struct kvm *kvm,
set_pgste_bits(kvm->mm, hva, mask, pgstev);
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
if (!kvm->mm->context.uses_cmm) {
- down_write(&kvm->mm->mmap_sem);
+ mmap_write_lock(kvm->mm);
kvm->mm->context.uses_cmm = 1;
- up_write(&kvm->mm->mmap_sem);
+ mmap_write_unlock(kvm->mm);
}
out:
vfree(bits);
@@ -2239,9 +2239,9 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
if (r)
break;
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
r = gmap_mark_unmergeable();
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
if (r)
break;
@@ -3923,11 +3923,13 @@ static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
}
}
-void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
__kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
+
+ return true;
}
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
@@ -3998,9 +4000,6 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
if (need_resched())
schedule();
- if (test_cpu_flag(CIF_MCCK_PENDING))
- s390_handle_mcck();
-
if (!kvm_is_ucontrol(vcpu->kvm)) {
rc = kvm_s390_deliver_pending_interrupts(vcpu);
if (rc)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 893893642415..96ae368aa0a2 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/compat.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/asm-offsets.h>
#include <asm/facility.h>
@@ -20,7 +21,6 @@
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
-#include <asm/pgtable.h>
#include <asm/page-states.h>
#include <asm/pgalloc.h>
#include <asm/gmap.h>
@@ -270,18 +270,18 @@ static int handle_iske(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
retry:
unlocked = false;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
rc = get_guest_storage_key(current->mm, vmaddr, &key);
if (rc) {
rc = fixup_user_fault(current, current->mm, vmaddr,
FAULT_FLAG_WRITE, &unlocked);
if (!rc) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
goto retry;
}
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc < 0)
@@ -317,17 +317,17 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
retry:
unlocked = false;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
rc = reset_guest_reference_bit(current->mm, vmaddr);
if (rc < 0) {
rc = fixup_user_fault(current, current->mm, vmaddr,
FAULT_FLAG_WRITE, &unlocked);
if (!rc) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
goto retry;
}
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc < 0)
@@ -385,7 +385,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
if (kvm_is_error_hva(vmaddr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
m3 & SSKE_NQ, m3 & SSKE_MR,
m3 & SSKE_MC);
@@ -395,7 +395,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
FAULT_FLAG_WRITE, &unlocked);
rc = !rc ? -EAGAIN : rc;
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc < 0)
@@ -1091,7 +1091,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
if (rc)
return rc;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
rc = cond_set_guest_storage_key(current->mm, vmaddr,
key, NULL, nq, mr, mc);
if (rc < 0) {
@@ -1099,7 +1099,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
FAULT_FLAG_WRITE, &unlocked);
rc = !rc ? -EAGAIN : rc;
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (rc == -EAGAIN)
@@ -1122,7 +1122,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
}
/*
- * Must be called with relevant read locks held (kvm->mm->mmap_sem, kvm->srcu)
+ * Must be called with relevant read locks held (kvm->mm->mmap_lock, kvm->srcu)
*/
static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
{
@@ -1220,9 +1220,9 @@ static int handle_essa(struct kvm_vcpu *vcpu)
* already correct, we do nothing and avoid the lock.
*/
if (vcpu->kvm->mm->context.uses_cmm == 0) {
- down_write(&vcpu->kvm->mm->mmap_sem);
+ mmap_write_lock(vcpu->kvm->mm);
vcpu->kvm->mm->context.uses_cmm = 1;
- up_write(&vcpu->kvm->mm->mmap_sem);
+ mmap_write_unlock(vcpu->kvm->mm);
}
/*
* If we are here, we are supposed to have CMMA enabled in
@@ -1239,11 +1239,11 @@ static int handle_essa(struct kvm_vcpu *vcpu)
} else {
int srcu_idx;
- down_read(&vcpu->kvm->mm->mmap_sem);
+ mmap_read_lock(vcpu->kvm->mm);
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
i = __do_essa(vcpu, orc);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
- up_read(&vcpu->kvm->mm->mmap_sem);
+ mmap_read_unlock(vcpu->kvm->mm);
if (i < 0)
return i;
/* Account for the possible extra cbrl entry */
@@ -1251,10 +1251,10 @@ static int handle_essa(struct kvm_vcpu *vcpu)
}
vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
- down_read(&gmap->mm->mmap_sem);
+ mmap_read_lock(gmap->mm);
for (i = 0; i < entries; ++i)
__gmap_zap(gmap, cbrlo[i]);
- up_read(&gmap->mm->mmap_sem);
+ mmap_read_unlock(gmap->mm);
return 0;
}
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index ef05b4e167fb..9e9056cebfcf 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -1000,9 +1000,6 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
handle_last_fault(vcpu, vsie_page);
- if (test_cpu_flag(CIF_MCCK_PENDING))
- s390_handle_mcck();
-
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
/* save current guest state of bp isolation override */
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index d4aa10795605..daca7bad66de 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(__delay);
static void __udelay_disabled(unsigned long long usecs)
{
- unsigned long cr0, cr0_new, psw_mask;
+ unsigned long cr0, cr0_new, psw_mask, flags;
struct s390_idle_data idle;
u64 end;
@@ -45,7 +45,9 @@ static void __udelay_disabled(unsigned long long usecs)
psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT;
set_clock_comparator(end);
set_cpu_flag(CIF_IGNORE_IRQ);
+ local_irq_save(flags);
psw_idle(&idle, psw_mask);
+ local_irq_restore(flags);
clear_cpu_flag(CIF_IGNORE_IRQ);
set_clock_comparator(S390_lowcore.clock_comparator);
__ctl_load(cr0, 0, 0);
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 5d67b81c704a..c2ac9b8ae612 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -6,7 +6,6 @@
#include <linux/kasan.h>
#include <asm/kasan.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
static unsigned long max_addr;
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index fd0dae9d10f4..9e0aa7aa03ba 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -20,9 +20,9 @@
#include <linux/ctype.h>
#include <linux/ioport.h>
#include <linux/refcount.h>
+#include <linux/pgtable.h>
#include <asm/diag.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/ebcdic.h>
#include <asm/errno.h>
#include <asm/extmem.h>
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index dedc28be27ab..6a24751557f0 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -33,7 +33,6 @@
#include <linux/hugetlb.h>
#include <asm/asm-offsets.h>
#include <asm/diag.h>
-#include <asm/pgtable.h>
#include <asm/gmap.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
@@ -434,7 +433,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
flags |= FAULT_FLAG_USER;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
gmap = NULL;
if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
@@ -508,14 +507,14 @@ retry:
if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
(flags & FAULT_FLAG_RETRY_NOWAIT)) {
/* FAULT_FLAG_RETRY_NOWAIT has been set,
- * mmap_sem has not been released */
+ * mmap_lock has not been released */
current->thread.gmap_pfault = 1;
fault = VM_FAULT_PFAULT;
goto out_up;
}
flags &= ~FAULT_FLAG_RETRY_NOWAIT;
flags |= FAULT_FLAG_TRIED;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
goto retry;
}
}
@@ -533,7 +532,7 @@ retry:
}
fault = 0;
out_up:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out:
return fault;
}
@@ -825,22 +824,22 @@ void do_secure_storage_access(struct pt_regs *regs)
switch (get_fault_type(regs)) {
case USER_FAULT:
mm = current->mm;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, addr);
if (!vma) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
break;
}
page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
if (IS_ERR_OR_NULL(page)) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
break;
}
if (arch_make_page_accessible(page))
send_sig(SIGSEGV, current, 0);
put_page(page);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
break;
case KERNEL_FAULT:
page = phys_to_page(addr);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 4b6903fbba4a..190357ff86b3 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -17,8 +17,8 @@
#include <linux/swapops.h>
#include <linux/ksm.h>
#include <linux/mman.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/gmap.h>
#include <asm/tlb.h>
@@ -300,7 +300,7 @@ struct gmap *gmap_get_enabled(void)
EXPORT_SYMBOL_GPL(gmap_get_enabled);
/*
- * gmap_alloc_table is assumed to be called with mmap_sem held
+ * gmap_alloc_table is assumed to be called with mmap_lock held
*/
static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
unsigned long init, unsigned long gaddr)
@@ -405,10 +405,10 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
return -EINVAL;
flush = 0;
- down_write(&gmap->mm->mmap_sem);
+ mmap_write_lock(gmap->mm);
for (off = 0; off < len; off += PMD_SIZE)
flush |= __gmap_unmap_by_gaddr(gmap, to + off);
- up_write(&gmap->mm->mmap_sem);
+ mmap_write_unlock(gmap->mm);
if (flush)
gmap_flush_tlb(gmap);
return 0;
@@ -438,7 +438,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
return -EINVAL;
flush = 0;
- down_write(&gmap->mm->mmap_sem);
+ mmap_write_lock(gmap->mm);
for (off = 0; off < len; off += PMD_SIZE) {
/* Remove old translation */
flush |= __gmap_unmap_by_gaddr(gmap, to + off);
@@ -448,7 +448,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
(void *) from + off))
break;
}
- up_write(&gmap->mm->mmap_sem);
+ mmap_write_unlock(gmap->mm);
if (flush)
gmap_flush_tlb(gmap);
if (off >= len)
@@ -466,7 +466,7 @@ EXPORT_SYMBOL_GPL(gmap_map_segment);
* Returns user space address which corresponds to the guest address or
* -EFAULT if no such mapping exists.
* This function does not establish potentially missing page table entries.
- * The mmap_sem of the mm that belongs to the address space must be held
+ * The mmap_lock of the mm that belongs to the address space must be held
* when this function gets called.
*
* Note: Can also be called for shadow gmaps.
@@ -495,9 +495,9 @@ unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
{
unsigned long rc;
- down_read(&gmap->mm->mmap_sem);
+ mmap_read_lock(gmap->mm);
rc = __gmap_translate(gmap, gaddr);
- up_read(&gmap->mm->mmap_sem);
+ mmap_read_unlock(gmap->mm);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_translate);
@@ -534,7 +534,7 @@ static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
*
* Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
* if the vm address is already mapped to a different guest segment.
- * The mmap_sem of the mm that belongs to the address space must be held
+ * The mmap_lock of the mm that belongs to the address space must be held
* when this function gets called.
*/
int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
@@ -640,7 +640,7 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr,
int rc;
bool unlocked;
- down_read(&gmap->mm->mmap_sem);
+ mmap_read_lock(gmap->mm);
retry:
unlocked = false;
@@ -655,7 +655,7 @@ retry:
goto out_up;
}
/*
- * In the case that fixup_user_fault unlocked the mmap_sem during
+ * In the case that fixup_user_fault unlocked the mmap_lock during
* faultin redo __gmap_translate to not race with a map/unmap_segment.
*/
if (unlocked)
@@ -663,13 +663,13 @@ retry:
rc = __gmap_link(gmap, gaddr, vmaddr);
out_up:
- up_read(&gmap->mm->mmap_sem);
+ mmap_read_unlock(gmap->mm);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_fault);
/*
- * this function is assumed to be called with mmap_sem held
+ * this function is assumed to be called with mmap_lock held
*/
void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
{
@@ -696,7 +696,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
unsigned long gaddr, vmaddr, size;
struct vm_area_struct *vma;
- down_read(&gmap->mm->mmap_sem);
+ mmap_read_lock(gmap->mm);
for (gaddr = from; gaddr < to;
gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
/* Find the vm address for the guest address */
@@ -719,7 +719,7 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
zap_page_range(vma, vmaddr, size);
}
- up_read(&gmap->mm->mmap_sem);
+ mmap_read_unlock(gmap->mm);
}
EXPORT_SYMBOL_GPL(gmap_discard);
@@ -882,7 +882,7 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
return -EFAULT;
if (unlocked)
- /* lost mmap_sem, caller has to retry __gmap_translate */
+ /* lost mmap_lock, caller has to retry __gmap_translate */
return 0;
/* Connect the page tables */
return __gmap_link(gmap, gaddr, vmaddr);
@@ -953,7 +953,7 @@ static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
* -EAGAIN if a fixup is needed
* -EINVAL if unsupported notifier bits have been specified
*
- * Expected to be called with sg->mm->mmap_sem in read and
+ * Expected to be called with sg->mm->mmap_lock in read and
* guest_table_lock held.
*/
static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
@@ -999,7 +999,7 @@ static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
* Returns 0 if successfully protected, -ENOMEM if out of memory and
* -EAGAIN if a fixup is needed.
*
- * Expected to be called with sg->mm->mmap_sem in read
+ * Expected to be called with sg->mm->mmap_lock in read
*/
static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
pmd_t *pmdp, int prot, unsigned long bits)
@@ -1035,7 +1035,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
* Returns 0 if successfully protected, -ENOMEM if out of memory and
* -EFAULT if gaddr is invalid (or mapping for shadows is missing).
*
- * Called with sg->mm->mmap_sem in read.
+ * Called with sg->mm->mmap_lock in read.
*/
static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
unsigned long len, int prot, unsigned long bits)
@@ -1106,9 +1106,9 @@ int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
return -EINVAL;
if (!MACHINE_HAS_ESOP && prot == PROT_READ)
return -EINVAL;
- down_read(&gmap->mm->mmap_sem);
+ mmap_read_lock(gmap->mm);
rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
- up_read(&gmap->mm->mmap_sem);
+ mmap_read_unlock(gmap->mm);
return rc;
}
EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
@@ -1124,7 +1124,7 @@ EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
* if reading using the virtual address failed. -EINVAL if called on a gmap
* shadow.
*
- * Called with gmap->mm->mmap_sem in read.
+ * Called with gmap->mm->mmap_lock in read.
*/
int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
{
@@ -1696,11 +1696,11 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
}
spin_unlock(&parent->shadow_lock);
/* protect after insertion, so it will get properly invalidated */
- down_read(&parent->mm->mmap_sem);
+ mmap_read_lock(parent->mm);
rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
PROT_READ, GMAP_NOTIFY_SHADOW);
- up_read(&parent->mm->mmap_sem);
+ mmap_read_unlock(parent->mm);
spin_lock(&parent->shadow_lock);
new->initialized = true;
if (rc) {
@@ -1729,7 +1729,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow);
* shadow table structure is incomplete, -ENOMEM if out of memory and
* -EFAULT if an address in the parent gmap could not be resolved.
*
- * Called with sg->mm->mmap_sem in read.
+ * Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
int fake)
@@ -1813,7 +1813,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
* shadow table structure is incomplete, -ENOMEM if out of memory and
* -EFAULT if an address in the parent gmap could not be resolved.
*
- * Called with sg->mm->mmap_sem in read.
+ * Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
int fake)
@@ -1897,7 +1897,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
* shadow table structure is incomplete, -ENOMEM if out of memory and
* -EFAULT if an address in the parent gmap could not be resolved.
*
- * Called with sg->mm->mmap_sem in read.
+ * Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
int fake)
@@ -1981,7 +1981,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
* Returns 0 if the shadow page table was found and -EAGAIN if the page
* table was not found.
*
- * Called with sg->mm->mmap_sem in read.
+ * Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
unsigned long *pgt, int *dat_protection,
@@ -2021,7 +2021,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
* shadow table structure is incomplete, -ENOMEM if out of memory,
* -EFAULT if an address in the parent gmap could not be resolved and
*
- * Called with gmap->mm->mmap_sem in read
+ * Called with gmap->mm->mmap_lock in read
*/
int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
int fake)
@@ -2100,7 +2100,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
* shadow table structure is incomplete, -ENOMEM if out of memory and
* -EFAULT if an address in the parent gmap could not be resolved.
*
- * Called with sg->mm->mmap_sem in read.
+ * Called with sg->mm->mmap_lock in read.
*/
int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
{
@@ -2543,12 +2543,12 @@ int s390_enable_sie(void)
/* Fail if the page tables are 2K */
if (!mm_alloc_pgste(mm))
return -EINVAL;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
mm->context.has_pgste = 1;
/* split thp mappings and disable thp for future mappings */
thp_split_mm(mm);
walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return 0;
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
@@ -2617,7 +2617,7 @@ int s390_enable_skey(void)
struct mm_struct *mm = current->mm;
int rc = 0;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
if (mm_uses_skeys(mm))
goto out_up;
@@ -2630,7 +2630,7 @@ int s390_enable_skey(void)
walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
out_up:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return rc;
}
EXPORT_SYMBOL_GPL(s390_enable_skey);
@@ -2651,9 +2651,9 @@ static const struct mm_walk_ops reset_cmma_walk_ops = {
void s390_reset_cmma(struct mm_struct *mm)
{
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
}
EXPORT_SYMBOL_GPL(s390_reset_cmma);
@@ -2685,9 +2685,9 @@ void s390_reset_acc(struct mm_struct *mm)
*/
if (!mmget_not_zero(mm))
return;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
walk_page_range(mm, 0, TASK_SIZE, &reset_acc_walk_ops, NULL);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
EXPORT_SYMBOL_GPL(s390_reset_acc);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index b11bcf4da531..6dc7c3b60ef6 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -33,7 +33,6 @@
#include <linux/dma-direct.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/lowcore.h>
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
index 06345616a646..99dd1c63a065 100644
--- a/arch/s390/mm/kasan_init.c
+++ b/arch/s390/mm/kasan_init.c
@@ -2,8 +2,8 @@
#include <linux/kasan.h>
#include <linux/sched/task.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/kasan.h>
#include <asm/mem_detect.h>
#include <asm/processor.h>
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index e22c06d5f206..c5c52ec2b46f 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -7,7 +7,6 @@
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/facility.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/set_memory.h>
@@ -86,7 +85,7 @@ static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
{
pte_t *ptep, new;
- ptep = pte_offset(pmdp, addr);
+ ptep = pte_offset_kernel(pmdp, addr);
do {
new = *ptep;
if (pte_none(new))
@@ -338,19 +337,11 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long address;
int nr, i, j;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
pte_t *pte;
for (i = 0; i < numpages;) {
address = page_to_phys(page + i);
- pgd = pgd_offset_k(address);
- p4d = p4d_offset(pgd, address);
- pud = pud_offset(p4d, address);
- pmd = pmd_offset(pud, address);
- pte = pte_offset_kernel(pmd, address);
+ pte = virt_to_kpte(address);
nr = (unsigned long)pte >> ilog2(sizeof(long));
nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
nr = min(numpages - i, nr);
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index fff169d64711..11d2c8395e2a 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -114,7 +114,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
spin_lock_bh(&mm->page_table_lock);
/*
- * This routine gets called with mmap_sem lock held and there is
+ * This routine gets called with mmap_lock lock held and there is
* no reason to optimize for the case of otherwise. However, if
* that would ever change, the below check will let us know.
*/
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 9ebd01219812..2e0cc19f4cd7 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -19,7 +19,6 @@
#include <linux/ksm.h>
#include <linux/mman.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index f810930aff42..8b6282cf7d13 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -13,7 +13,6 @@
#include <linux/slab.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index 748626a33028..b4e3c84772a1 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -4,4 +4,5 @@
#
obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \
- pci_event.o pci_debug.o pci_insn.o pci_mmio.o
+ pci_event.o pci_debug.o pci_insn.o pci_mmio.o \
+ pci_bus.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 94ca121933de..3902c9f6f2d6 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -36,18 +36,21 @@
#include <asm/pci_clp.h>
#include <asm/pci_dma.h>
+#include "pci_bus.h"
+
/* list of all detected zpci devices */
static LIST_HEAD(zpci_list);
static DEFINE_SPINLOCK(zpci_list_lock);
static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
static DEFINE_SPINLOCK(zpci_domain_lock);
-static unsigned int zpci_num_domains_allocated;
#define ZPCI_IOMAP_ENTRIES \
min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
ZPCI_IOMAP_MAX_ENTRIES)
+unsigned int s390_pci_no_rid;
+
static DEFINE_SPINLOCK(zpci_iomap_lock);
static unsigned long *zpci_iomap_bitmap;
struct zpci_iomap_entry *zpci_iomap_start;
@@ -88,17 +91,12 @@ void zpci_remove_reserved_devices(void)
spin_unlock(&zpci_list_lock);
list_for_each_entry_safe(zdev, tmp, &remove, entry)
- zpci_remove_device(zdev);
-}
-
-static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
-{
- return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
+ zpci_zdev_put(zdev);
}
int pci_domain_nr(struct pci_bus *bus)
{
- return ((struct zpci_dev *) bus->sysdata)->domain;
+ return ((struct zpci_bus *) bus->sysdata)->domain_nr;
}
EXPORT_SYMBOL_GPL(pci_domain_nr);
@@ -228,28 +226,29 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
zpci_memcpy_toio(to, from, count);
}
-void __iomem *ioremap(unsigned long ioaddr, unsigned long size)
+void __iomem *ioremap(phys_addr_t addr, size_t size)
{
+ unsigned long offset, vaddr;
struct vm_struct *area;
- unsigned long offset;
+ phys_addr_t last_addr;
- if (!size)
+ last_addr = addr + size - 1;
+ if (!size || last_addr < addr)
return NULL;
if (!static_branch_unlikely(&have_mio))
- return (void __iomem *) ioaddr;
+ return (void __iomem *) addr;
- offset = ioaddr & ~PAGE_MASK;
- ioaddr &= PAGE_MASK;
+ offset = addr & ~PAGE_MASK;
+ addr &= PAGE_MASK;
size = PAGE_ALIGN(size + offset);
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
- if (ioremap_page_range((unsigned long) area->addr,
- (unsigned long) area->addr + size,
- ioaddr, PAGE_KERNEL)) {
- vunmap(area->addr);
+ vaddr = (unsigned long) area->addr;
+ if (ioremap_page_range(vaddr, vaddr + size, addr, PAGE_KERNEL)) {
+ free_vm_area(area);
return NULL;
}
return (void __iomem *) ((unsigned long) area->addr + offset);
@@ -373,29 +372,17 @@ EXPORT_SYMBOL(pci_iounmap);
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
- struct zpci_dev *zdev = get_zdev_by_bus(bus);
- int ret;
+ struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
- if (!zdev || devfn != ZPCI_DEVFN)
- ret = -ENODEV;
- else
- ret = zpci_cfg_load(zdev, where, val, size);
-
- return ret;
+ return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
}
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
- struct zpci_dev *zdev = get_zdev_by_bus(bus);
- int ret;
+ struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
- if (!zdev || devfn != ZPCI_DEVFN)
- ret = -ENODEV;
- else
- ret = zpci_cfg_store(zdev, where, val, size);
-
- return ret;
+ return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
}
static struct pci_ops pci_root_ops = {
@@ -506,15 +493,15 @@ static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
return r;
}
-static int zpci_setup_bus_resources(struct zpci_dev *zdev,
- struct list_head *resources)
+int zpci_setup_bus_resources(struct zpci_dev *zdev,
+ struct list_head *resources)
{
unsigned long addr, size, flags;
struct resource *res;
int i, entry;
snprintf(zdev->res_name, sizeof(zdev->res_name),
- "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR);
+ "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (!zdev->bars[i].size)
@@ -608,98 +595,53 @@ void pcibios_disable_device(struct pci_dev *pdev)
zpci_debug_exit_device(zdev);
}
-static int zpci_alloc_domain(struct zpci_dev *zdev)
+static int __zpci_register_domain(int domain)
{
spin_lock(&zpci_domain_lock);
- if (zpci_num_domains_allocated > (ZPCI_NR_DEVICES - 1)) {
+ if (test_bit(domain, zpci_domain)) {
spin_unlock(&zpci_domain_lock);
- pr_err("Adding PCI function %08x failed because the configured limit of %d is reached\n",
- zdev->fid, ZPCI_NR_DEVICES);
- return -ENOSPC;
+ pr_err("Domain %04x is already assigned\n", domain);
+ return -EEXIST;
}
+ set_bit(domain, zpci_domain);
+ spin_unlock(&zpci_domain_lock);
+ return domain;
+}
- if (zpci_unique_uid) {
- zdev->domain = (u16) zdev->uid;
- if (zdev->domain == 0) {
- pr_warn("UID checking is active but no UID is set for PCI function %08x, so automatic domain allocation is used instead\n",
- zdev->fid);
- update_uid_checking(false);
- goto auto_allocate;
- }
+static int __zpci_alloc_domain(void)
+{
+ int domain;
- if (test_bit(zdev->domain, zpci_domain)) {
- spin_unlock(&zpci_domain_lock);
- pr_err("Adding PCI function %08x failed because domain %04x is already assigned\n",
- zdev->fid, zdev->domain);
- return -EEXIST;
- }
- set_bit(zdev->domain, zpci_domain);
- zpci_num_domains_allocated++;
- spin_unlock(&zpci_domain_lock);
- return 0;
- }
-auto_allocate:
+ spin_lock(&zpci_domain_lock);
/*
* We can always auto allocate domains below ZPCI_NR_DEVICES.
* There is either a free domain or we have reached the maximum in
* which case we would have bailed earlier.
*/
- zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
- set_bit(zdev->domain, zpci_domain);
- zpci_num_domains_allocated++;
+ domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
+ set_bit(domain, zpci_domain);
spin_unlock(&zpci_domain_lock);
- return 0;
+ return domain;
}
-static void zpci_free_domain(struct zpci_dev *zdev)
+int zpci_alloc_domain(int domain)
{
- spin_lock(&zpci_domain_lock);
- clear_bit(zdev->domain, zpci_domain);
- zpci_num_domains_allocated--;
- spin_unlock(&zpci_domain_lock);
+ if (zpci_unique_uid) {
+ if (domain)
+ return __zpci_register_domain(domain);
+ pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
+ update_uid_checking(false);
+ }
+ return __zpci_alloc_domain();
}
-void pcibios_remove_bus(struct pci_bus *bus)
+void zpci_free_domain(int domain)
{
- struct zpci_dev *zdev = get_zdev_by_bus(bus);
-
- zpci_exit_slot(zdev);
- zpci_cleanup_bus_resources(zdev);
- zpci_destroy_iommu(zdev);
- zpci_free_domain(zdev);
-
- spin_lock(&zpci_list_lock);
- list_del(&zdev->entry);
- spin_unlock(&zpci_list_lock);
-
- zpci_dbg(3, "rem fid:%x\n", zdev->fid);
- kfree(zdev);
+ spin_lock(&zpci_domain_lock);
+ clear_bit(domain, zpci_domain);
+ spin_unlock(&zpci_domain_lock);
}
-static int zpci_scan_bus(struct zpci_dev *zdev)
-{
- LIST_HEAD(resources);
- int ret;
-
- ret = zpci_setup_bus_resources(zdev, &resources);
- if (ret)
- goto error;
-
- zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
- zdev, &resources);
- if (!zdev->bus) {
- ret = -EIO;
- goto error;
- }
- zdev->bus->max_bus_speed = zdev->max_bus_speed;
- pci_bus_add_devices(zdev->bus);
- return 0;
-
-error:
- zpci_cleanup_bus_resources(zdev);
- pci_free_resource_list(&resources);
- return ret;
-}
int zpci_enable_device(struct zpci_dev *zdev)
{
@@ -734,13 +676,15 @@ int zpci_create_device(struct zpci_dev *zdev)
{
int rc;
- rc = zpci_alloc_domain(zdev);
- if (rc)
- goto out;
+ kref_init(&zdev->kref);
+
+ spin_lock(&zpci_list_lock);
+ list_add_tail(&zdev->entry, &zpci_list);
+ spin_unlock(&zpci_list_lock);
rc = zpci_init_iommu(zdev);
if (rc)
- goto out_free;
+ goto out;
mutex_init(&zdev->lock);
if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
@@ -748,36 +692,59 @@ int zpci_create_device(struct zpci_dev *zdev)
if (rc)
goto out_destroy_iommu;
}
- rc = zpci_scan_bus(zdev);
+
+ rc = zpci_bus_device_register(zdev, &pci_root_ops);
if (rc)
goto out_disable;
- spin_lock(&zpci_list_lock);
- list_add_tail(&zdev->entry, &zpci_list);
- spin_unlock(&zpci_list_lock);
-
- zpci_init_slot(zdev);
-
return 0;
out_disable:
if (zdev->state == ZPCI_FN_STATE_ONLINE)
zpci_disable_device(zdev);
+
out_destroy_iommu:
zpci_destroy_iommu(zdev);
-out_free:
- zpci_free_domain(zdev);
out:
+ spin_lock(&zpci_list_lock);
+ list_del(&zdev->entry);
+ spin_unlock(&zpci_list_lock);
return rc;
}
-void zpci_remove_device(struct zpci_dev *zdev)
+void zpci_release_device(struct kref *kref)
{
- if (!zdev->bus)
- return;
+ struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
+
+ if (zdev->zbus->bus) {
+ struct pci_dev *pdev;
+
+ pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
+ if (pdev)
+ pci_stop_and_remove_bus_device_locked(pdev);
+ }
+
+ switch (zdev->state) {
+ case ZPCI_FN_STATE_ONLINE:
+ case ZPCI_FN_STATE_CONFIGURED:
+ zpci_disable_device(zdev);
+ fallthrough;
+ case ZPCI_FN_STATE_STANDBY:
+ if (zdev->has_hp_slot)
+ zpci_exit_slot(zdev);
+ zpci_cleanup_bus_resources(zdev);
+ zpci_bus_device_unregister(zdev);
+ zpci_destroy_iommu(zdev);
+ fallthrough;
+ default:
+ break;
+ }
- pci_stop_root_bus(zdev->bus);
- pci_remove_root_bus(zdev->bus);
+ spin_lock(&zpci_list_lock);
+ list_del(&zdev->entry);
+ spin_unlock(&zpci_list_lock);
+ zpci_dbg(3, "rem fid:%x\n", zdev->fid);
+ kfree(zdev);
}
int zpci_report_error(struct pci_dev *pdev,
@@ -844,6 +811,10 @@ char * __init pcibios_setup(char *str)
s390_pci_force_floating = 1;
return NULL;
}
+ if (!strcmp(str, "norid")) {
+ s390_pci_no_rid = 1;
+ return NULL;
+ }
return str;
}
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
new file mode 100644
index 000000000000..642a99384688
--- /dev/null
+++ b/arch/s390/pci/pci_bus.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s):
+ * Pierre Morel <pmorel@linux.ibm.com>
+ *
+ */
+
+#define KMSG_COMPONENT "zpci"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/jump_label.h>
+#include <linux/pci.h>
+#include <linux/printk.h>
+
+#include <asm/pci_clp.h>
+#include <asm/pci_dma.h>
+
+#include "pci_bus.h"
+
+static LIST_HEAD(zbus_list);
+static DEFINE_SPINLOCK(zbus_list_lock);
+static int zpci_nb_devices;
+
+/* zpci_bus_scan
+ * @zbus: the zbus holding the zdevices
+ * @ops: the pci operations
+ *
+ * The domain number must be set before pci_scan_root_bus is called.
+ * This function can be called once the domain is known, hence
+ * when the function_0 is dicovered.
+ */
+static int zpci_bus_scan(struct zpci_bus *zbus, int domain, struct pci_ops *ops)
+{
+ struct pci_bus *bus;
+ int rc;
+
+ rc = zpci_alloc_domain(domain);
+ if (rc < 0)
+ return rc;
+ zbus->domain_nr = rc;
+
+ bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, ops, zbus, &zbus->resources);
+ if (!bus) {
+ zpci_free_domain(zbus->domain_nr);
+ return -EFAULT;
+ }
+
+ zbus->bus = bus;
+ pci_bus_add_devices(bus);
+ return 0;
+}
+
+static void zpci_bus_release(struct kref *kref)
+{
+ struct zpci_bus *zbus = container_of(kref, struct zpci_bus, kref);
+
+ if (zbus->bus) {
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(zbus->bus);
+
+ zpci_free_domain(zbus->domain_nr);
+ pci_free_resource_list(&zbus->resources);
+
+ pci_remove_root_bus(zbus->bus);
+ pci_unlock_rescan_remove();
+ }
+
+ spin_lock(&zbus_list_lock);
+ list_del(&zbus->bus_next);
+ spin_unlock(&zbus_list_lock);
+ kfree(zbus);
+}
+
+static void zpci_bus_put(struct zpci_bus *zbus)
+{
+ kref_put(&zbus->kref, zpci_bus_release);
+}
+
+static struct zpci_bus *zpci_bus_get(int pchid)
+{
+ struct zpci_bus *zbus;
+
+ spin_lock(&zbus_list_lock);
+ list_for_each_entry(zbus, &zbus_list, bus_next) {
+ if (pchid == zbus->pchid) {
+ kref_get(&zbus->kref);
+ goto out_unlock;
+ }
+ }
+ zbus = NULL;
+out_unlock:
+ spin_unlock(&zbus_list_lock);
+ return zbus;
+}
+
+static struct zpci_bus *zpci_bus_alloc(int pchid)
+{
+ struct zpci_bus *zbus;
+
+ zbus = kzalloc(sizeof(*zbus), GFP_KERNEL);
+ if (!zbus)
+ return NULL;
+
+ zbus->pchid = pchid;
+ INIT_LIST_HEAD(&zbus->bus_next);
+ spin_lock(&zbus_list_lock);
+ list_add_tail(&zbus->bus_next, &zbus_list);
+ spin_unlock(&zbus_list_lock);
+
+ kref_init(&zbus->kref);
+ INIT_LIST_HEAD(&zbus->resources);
+
+ zbus->bus_resource.start = 0;
+ zbus->bus_resource.end = ZPCI_BUS_NR;
+ zbus->bus_resource.flags = IORESOURCE_BUS;
+ pci_add_resource(&zbus->resources, &zbus->bus_resource);
+
+ return zbus;
+}
+
+#ifdef CONFIG_PCI_IOV
+static int zpci_bus_link_virtfn(struct pci_dev *pdev,
+ struct pci_dev *virtfn, int vfid)
+{
+ int rc;
+
+ virtfn->physfn = pci_dev_get(pdev);
+ rc = pci_iov_sysfs_link(pdev, virtfn, vfid);
+ if (rc) {
+ pci_dev_put(pdev);
+ virtfn->physfn = NULL;
+ return rc;
+ }
+ return 0;
+}
+
+static int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
+ struct pci_dev *virtfn, int vfn)
+{
+ int i, cand_devfn;
+ struct zpci_dev *zdev;
+ struct pci_dev *pdev;
+ int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
+ int rc = 0;
+
+ virtfn->is_virtfn = 1;
+ virtfn->multifunction = 0;
+ WARN_ON(vfid < 0);
+ /* If the parent PF for the given VF is also configured in the
+ * instance, it must be on the same zbus.
+ * We can then identify the parent PF by checking what
+ * devfn the VF would have if it belonged to that PF using the PF's
+ * stride and offset. Only if this candidate devfn matches the
+ * actual devfn will we link both functions.
+ */
+ for (i = 0; i < ZPCI_FUNCTIONS_PER_BUS; i++) {
+ zdev = zbus->function[i];
+ if (zdev && zdev->is_physfn) {
+ pdev = pci_get_slot(zbus->bus, zdev->devfn);
+ cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
+ if (cand_devfn == virtfn->devfn) {
+ rc = zpci_bus_link_virtfn(pdev, virtfn, vfid);
+ break;
+ }
+ }
+ }
+ return rc;
+}
+#else
+static inline int zpci_bus_setup_virtfn(struct zpci_bus *zbus,
+ struct pci_dev *virtfn, int vfn)
+{
+ virtfn->is_virtfn = 1;
+ virtfn->multifunction = 0;
+ return 0;
+}
+#endif
+
+static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
+{
+ struct pci_bus *bus;
+ struct resource_entry *window, *n;
+ struct resource *res;
+ struct pci_dev *pdev;
+ int rc;
+
+ bus = zbus->bus;
+ if (!bus)
+ return -EINVAL;
+
+ pdev = pci_get_slot(bus, zdev->devfn);
+ if (pdev) {
+ /* Device is already known. */
+ pci_dev_put(pdev);
+ return 0;
+ }
+
+ rc = zpci_init_slot(zdev);
+ if (rc)
+ return rc;
+ zdev->has_hp_slot = 1;
+
+ resource_list_for_each_entry_safe(window, n, &zbus->resources) {
+ res = window->res;
+ pci_bus_add_resource(bus, res, 0);
+ }
+
+ pdev = pci_scan_single_device(bus, zdev->devfn);
+ if (pdev) {
+ if (!zdev->is_physfn) {
+ rc = zpci_bus_setup_virtfn(zbus, pdev, zdev->vfn);
+ if (rc)
+ goto failed_with_pdev;
+ }
+ pci_bus_add_device(pdev);
+ }
+ return 0;
+
+failed_with_pdev:
+ pci_stop_and_remove_bus_device(pdev);
+ pci_dev_put(pdev);
+ return rc;
+}
+
+static void zpci_bus_add_devices(struct zpci_bus *zbus)
+{
+ int i;
+
+ for (i = 1; i < ZPCI_FUNCTIONS_PER_BUS; i++)
+ if (zbus->function[i])
+ zpci_bus_add_device(zbus, zbus->function[i]);
+
+ pci_lock_rescan_remove();
+ pci_bus_add_devices(zbus->bus);
+ pci_unlock_rescan_remove();
+}
+
+int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
+{
+ struct zpci_bus *zbus = NULL;
+ int rc = -EBADF;
+
+ if (zpci_nb_devices == ZPCI_NR_DEVICES) {
+ pr_warn("Adding PCI function %08x failed because the configured limit of %d is reached\n",
+ zdev->fid, ZPCI_NR_DEVICES);
+ return -ENOSPC;
+ }
+ zpci_nb_devices++;
+
+ if (zdev->devfn >= ZPCI_FUNCTIONS_PER_BUS)
+ return -EINVAL;
+
+ if (!s390_pci_no_rid && zdev->rid_available)
+ zbus = zpci_bus_get(zdev->pchid);
+
+ if (!zbus) {
+ zbus = zpci_bus_alloc(zdev->pchid);
+ if (!zbus)
+ return -ENOMEM;
+ }
+
+ zdev->zbus = zbus;
+ if (zbus->function[zdev->devfn]) {
+ pr_err("devfn %04x is already assigned\n", zdev->devfn);
+ goto error; /* rc already set */
+ }
+ zbus->function[zdev->devfn] = zdev;
+
+ zpci_setup_bus_resources(zdev, &zbus->resources);
+
+ if (zbus->bus) {
+ if (!zbus->multifunction) {
+ WARN_ONCE(1, "zbus is not multifunction\n");
+ goto error_bus;
+ }
+ if (!zdev->rid_available) {
+ WARN_ONCE(1, "rid_available not set for multifunction\n");
+ goto error_bus;
+ }
+ rc = zpci_bus_add_device(zbus, zdev);
+ if (rc)
+ goto error_bus;
+ } else if (zdev->devfn == 0) {
+ if (zbus->multifunction && !zdev->rid_available) {
+ WARN_ONCE(1, "rid_available not set on function 0 for multifunction\n");
+ goto error_bus;
+ }
+ rc = zpci_bus_scan(zbus, (u16)zdev->uid, ops);
+ if (rc)
+ goto error_bus;
+ zpci_bus_add_devices(zbus);
+ rc = zpci_init_slot(zdev);
+ if (rc)
+ goto error_bus;
+ zdev->has_hp_slot = 1;
+ zbus->multifunction = zdev->rid_available;
+ zbus->max_bus_speed = zdev->max_bus_speed;
+ } else {
+ zbus->multifunction = 1;
+ }
+
+ return 0;
+
+error_bus:
+ zpci_nb_devices--;
+ zbus->function[zdev->devfn] = NULL;
+error:
+ pr_err("Adding PCI function %08x failed\n", zdev->fid);
+ zpci_bus_put(zbus);
+ return rc;
+}
+
+void zpci_bus_device_unregister(struct zpci_dev *zdev)
+{
+ struct zpci_bus *zbus = zdev->zbus;
+
+ zpci_nb_devices--;
+ zbus->function[zdev->devfn] = NULL;
+ zpci_bus_put(zbus);
+}
diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
new file mode 100644
index 000000000000..89be3c354b7b
--- /dev/null
+++ b/arch/s390/pci/pci_bus.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s):
+ * Pierre Morel <pmorel@linux.ibm.com>
+ *
+ */
+
+int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops);
+void zpci_bus_device_unregister(struct zpci_dev *zdev);
+int zpci_bus_init(void);
+
+void zpci_release_device(struct kref *kref);
+static inline void zpci_zdev_put(struct zpci_dev *zdev)
+{
+ kref_put(&zdev->kref, zpci_release_device);
+}
+
+int zpci_alloc_domain(int domain);
+void zpci_free_domain(int domain);
+int zpci_setup_bus_resources(struct zpci_dev *zdev,
+ struct list_head *resources);
+
+static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus,
+ unsigned int devfn)
+{
+ struct zpci_bus *zbus = bus->sysdata;
+
+ return (devfn >= ZPCI_FUNCTIONS_PER_BUS) ? NULL : zbus->function[devfn];
+}
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index ea794ae755ae..7e735f41a0a6 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -155,8 +155,13 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
zdev->pfgid = response->pfgid;
zdev->pft = response->pft;
zdev->vfn = response->vfn;
+ zdev->port = response->port;
zdev->uid = response->uid;
zdev->fmb_length = sizeof(u32) * response->fmb_len;
+ zdev->rid_available = response->rid_avail;
+ zdev->is_physfn = response->is_physfn;
+ if (!s390_pci_no_rid && zdev->rid_available)
+ zdev->devfn = response->rid & ZPCI_RID_MASK_DEVFN;
memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
if (response->util_str_avail) {
@@ -309,14 +314,13 @@ out:
int clp_disable_fh(struct zpci_dev *zdev)
{
- u32 fh = zdev->fh;
int rc;
if (!zdev_enabled(zdev))
return 0;
rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN);
- zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
+ zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 8d6ee4af4230..08e1d619398e 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -14,6 +14,8 @@
#include <asm/pci_debug.h>
#include <asm/sclp.h>
+#include "pci_bus.h"
+
/* Content Code Description for PCI Function Error */
struct zpci_ccdf_err {
u32 reserved1;
@@ -53,7 +55,7 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
zpci_err_hex(ccdf, sizeof(*ccdf));
if (zdev)
- pdev = pci_get_slot(zdev->bus, ZPCI_DEVFN);
+ pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
@@ -78,36 +80,28 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
enum zpci_state state;
int ret;
- if (zdev)
- pdev = pci_get_slot(zdev->bus, ZPCI_DEVFN);
+ if (zdev && zdev->zbus && zdev->zbus->bus)
+ pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
- pr_info("%s: Event 0x%x reconfigured PCI function 0x%x\n",
- pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
zpci_err("avail CCDF:\n");
zpci_err_hex(ccdf, sizeof(*ccdf));
switch (ccdf->pec) {
case 0x0301: /* Reserved|Standby -> Configured */
if (!zdev) {
- ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
- if (ret)
- break;
- zdev = get_zdev_by_fid(ccdf->fid);
- }
- if (!zdev || zdev->state != ZPCI_FN_STATE_STANDBY)
+ ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 1);
break;
- zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ }
zdev->fh = ccdf->fh;
- ret = zpci_enable_device(zdev);
- if (ret)
- break;
- pci_lock_rescan_remove();
- pci_rescan_bus(zdev->bus);
- pci_unlock_rescan_remove();
+ zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ zpci_create_device(zdev);
break;
case 0x0302: /* Reserved -> Standby */
- if (!zdev)
+ if (!zdev) {
clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
+ break;
+ }
+ zdev->fh = ccdf->fh;
break;
case 0x0303: /* Deconfiguration requested */
if (!zdev)
@@ -135,12 +129,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
pci_stop_and_remove_bus_device_locked(pdev);
}
- zdev->fh = ccdf->fh;
- zpci_disable_device(zdev);
zdev->state = ZPCI_FN_STATE_STANDBY;
if (!clp_get_state(ccdf->fid, &state) &&
state == ZPCI_FN_STATE_RESERVED) {
- zpci_remove_device(zdev);
+ zpci_zdev_put(zdev);
}
break;
case 0x0306: /* 0x308 or 0x302 for multiple devices */
@@ -149,12 +141,11 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
case 0x0308: /* Standby -> Reserved */
if (!zdev)
break;
- zpci_remove_device(zdev);
+ zpci_zdev_put(zdev);
break;
default:
break;
}
- pci_dev_put(pdev);
}
void zpci_event_availability(void *data)
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 020a2c514d96..38efa3e852c4 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -125,7 +125,7 @@ static long get_pfn(unsigned long user_addr, unsigned long access,
struct vm_area_struct *vma;
long ret;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
ret = -EINVAL;
vma = find_vma(current->mm, user_addr);
if (!vma)
@@ -135,7 +135,7 @@ static long get_pfn(unsigned long user_addr, unsigned long access,
goto out;
ret = follow_pfn(vma, user_addr, pfn);
out:
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return ret;
}
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index 215f17437a4f..5c028bee91b9 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -33,6 +33,7 @@ zpci_attr(pchid, "0x%04x\n", pchid);
zpci_attr(pfgid, "0x%02x\n", pfgid);
zpci_attr(vfn, "0x%04x\n", vfn);
zpci_attr(pft, "0x%02x\n", pft);
+zpci_attr(port, "%d\n", port);
zpci_attr(uid, "0x%x\n", uid);
zpci_attr(segment0, "0x%02x\n", pfip[0]);
zpci_attr(segment1, "0x%02x\n", pfip[1]);
@@ -88,7 +89,7 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
ret = zpci_enable_device(zdev);
if (ret)
goto out;
- pci_rescan_bus(zdev->bus);
+ pci_rescan_bus(zdev->zbus->bus);
}
out:
pci_unlock_rescan_remove();
@@ -142,6 +143,7 @@ static struct attribute *zpci_dev_attrs[] = {
&dev_attr_pchid.attr,
&dev_attr_pfgid.attr,
&dev_attr_pft.attr,
+ &dev_attr_port.attr,
&dev_attr_vfn.attr,
&dev_attr_uid.attr,
&dev_attr_recover.attr,
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 0424b8f2f8d3..a7cc1464011a 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -53,15 +53,6 @@ config SUPERH
select HAVE_NMI
select NEED_SG_DMA_LENGTH
select ARCH_HAS_GIGANTIC_PAGE
-
- help
- The SuperH is a RISC processor targeted for use in embedded systems
- and consumer electronics; it was also used in the Sega Dreamcast
- gaming console. The SuperH port has a home page at
- <http://www.linux-sh.org/>.
-
-config SUPERH32
- def_bool "$(ARCH)" = "sh"
select ARCH_32BIT_OFF_T
select GUP_GET_PTE_LOW_HIGH if X2TLB
select HAVE_KPROBES
@@ -79,19 +70,15 @@ config SUPERH32
select ARCH_HIBERNATION_POSSIBLE if MMU
select SPARSE_IRQ
select HAVE_STACKPROTECTOR
-
-config SUPERH64
- def_bool "$(ARCH)" = "sh64"
- select HAVE_EXIT_THREAD
- select KALLSYMS
+ help
+ The SuperH is a RISC processor targeted for use in embedded systems
+ and consumer electronics; it was also used in the Sega Dreamcast
+ gaming console. The SuperH port has a home page at
+ <http://www.linux-sh.org/>.
config GENERIC_BUG
def_bool y
- depends on BUG && SUPERH32
-
-config GENERIC_CSUM
- def_bool y
- depends on SUPERH64
+ depends on BUG
config GENERIC_HWEIGHT
def_bool y
@@ -201,12 +188,6 @@ config CPU_SH4AL_DSP
select CPU_SH4A
select CPU_HAS_DSP
-config CPU_SH5
- bool
- select CPU_HAS_FPU
- select SYS_SUPPORTS_SH_TMU
- select SYS_SUPPORTS_HUGETLBFS if MMU
-
config CPU_SHX2
bool
@@ -226,8 +207,6 @@ config CPU_HAS_PMU
default y
bool
-if SUPERH32
-
choice
prompt "Processor sub-type selection"
@@ -516,27 +495,6 @@ config CPU_SUBTYPE_SH7366
endchoice
-endif
-
-if SUPERH64
-
-choice
- prompt "Processor sub-type selection"
-
-# SH-5 Processor Support
-
-config CPU_SUBTYPE_SH5_101
- bool "Support SH5-101 processor"
- select CPU_SH5
-
-config CPU_SUBTYPE_SH5_103
- bool "Support SH5-103 processor"
- select CPU_SH5
-
-endchoice
-
-endif
-
source "arch/sh/mm/Kconfig"
source "arch/sh/Kconfig.cpu"
@@ -590,7 +548,7 @@ source "kernel/Kconfig.hz"
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
- depends on SUPERH32 && MMU
+ depends on MMU
select KEXEC_CORE
help
kexec is a system call that implements the ability to shutdown your
@@ -608,7 +566,7 @@ config KEXEC
config CRASH_DUMP
bool "kernel crash dumps (EXPERIMENTAL)"
- depends on SUPERH32 && BROKEN_ON_SMP
+ depends on BROKEN_ON_SMP
help
Generate crash dump after being started by kexec.
This should be normally only set in special crash dump kernels
@@ -622,7 +580,7 @@ config CRASH_DUMP
config KEXEC_JUMP
bool "kexec jump (EXPERIMENTAL)"
- depends on SUPERH32 && KEXEC && HIBERNATION
+ depends on KEXEC && HIBERNATION
help
Jump between original kernel and kexeced kernel and invoke
code via KEXEC
@@ -699,7 +657,7 @@ config HOTPLUG_CPU
config GUSA
def_bool y
- depends on !SMP && SUPERH32
+ depends on !SMP
help
This enables support for gUSA (general UserSpace Atomicity).
This is the default implementation for both UP and non-ll/sc
diff --git a/arch/sh/Kconfig.cpu b/arch/sh/Kconfig.cpu
index 4a4edc7e03d4..97ca35f2cd37 100644
--- a/arch/sh/Kconfig.cpu
+++ b/arch/sh/Kconfig.cpu
@@ -13,7 +13,6 @@ config CPU_LITTLE_ENDIAN
config CPU_BIG_ENDIAN
bool "Big Endian"
- depends on !CPU_SH5
endchoice
@@ -27,10 +26,6 @@ config SH_FPU
This option must be set in order to enable the FPU.
-config SH64_FPU_DENORM_FLUSH
- bool "Flush floating point denorms to zero"
- depends on SH_FPU && SUPERH64
-
config SH_FPU_EMU
def_bool n
prompt "FPU emulation support"
@@ -77,10 +72,6 @@ config SPECULATIVE_EXECUTION
If unsure, say N.
-config SH64_ID2815_WORKAROUND
- bool "Include workaround for SH5-101 cut2 silicon defect ID2815"
- depends on CPU_SUBTYPE_SH5_101
-
config CPU_HAS_INTEVT
bool
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug
index 010b6c33bbba..28a43d63bde1 100644
--- a/arch/sh/Kconfig.debug
+++ b/arch/sh/Kconfig.debug
@@ -5,7 +5,6 @@ config TRACE_IRQFLAGS_SUPPORT
config SH_STANDARD_BIOS
bool "Use LinuxSH standard BIOS"
- depends on SUPERH32
help
Say Y here if your target has the gdb-sh-stub
package from www.m17n.org (or any conforming standard LinuxSH BIOS)
@@ -19,7 +18,7 @@ config SH_STANDARD_BIOS
config STACK_DEBUG
bool "Check for stack overflows"
- depends on DEBUG_KERNEL && SUPERH32
+ depends on DEBUG_KERNEL
help
This option will cause messages to be printed if free stack space
drops below a certain limit. Saying Y here will add overhead to
@@ -38,7 +37,7 @@ config 4KSTACKS
config IRQSTACKS
bool "Use separate kernel stacks when processing interrupts"
- depends on DEBUG_KERNEL && SUPERH32 && BROKEN
+ depends on DEBUG_KERNEL && BROKEN
help
If you say Y here the kernel will use separate kernel stacks
for handling hard and soft interrupts. This can help avoid
@@ -46,7 +45,7 @@ config IRQSTACKS
config DUMP_CODE
bool "Show disassembly of nearby code in register dumps"
- depends on DEBUG_KERNEL && SUPERH32
+ depends on DEBUG_KERNEL
default y if DEBUG_BUGVERBOSE
default n
help
@@ -59,7 +58,6 @@ config DUMP_CODE
config DWARF_UNWINDER
bool "Enable the DWARF unwinder for stacktraces"
select FRAME_POINTER
- depends on SUPERH32
default n
help
Enabling this option will make stacktraces more accurate, at
@@ -77,11 +75,6 @@ config SH_NO_BSS_INIT
For all other cases, say N. If this option seems perplexing, or
you aren't sure, say N.
-config SH64_SR_WATCH
- bool "Debug: set SR.WATCH to enable hardware watchpoints and trace"
- depends on SUPERH64
-
config MCOUNT
def_bool y
- depends on SUPERH32
depends on STACK_DEBUG || FUNCTION_TRACER
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index b4a86f27e048..da9cf952f33c 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -11,7 +11,7 @@
#
ifneq ($(SUBARCH),$(ARCH))
ifeq ($(CROSS_COMPILE),)
- CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-)
+ CROSS_COMPILE := $(call cc-cross-prefix, sh-linux- sh-linux-gnu- sh-unknown-linux-gnu-)
endif
endif
@@ -29,12 +29,9 @@ isa-$(CONFIG_CPU_SH3) := sh3
isa-$(CONFIG_CPU_SH4) := sh4
isa-$(CONFIG_CPU_SH4A) := sh4a
isa-$(CONFIG_CPU_SH4AL_DSP) := sh4al
-isa-$(CONFIG_CPU_SH5) := shmedia
-ifeq ($(CONFIG_SUPERH32),y)
isa-$(CONFIG_SH_DSP) := $(isa-y)-dsp
isa-y := $(isa-y)-up
-endif
cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
cflags-$(CONFIG_CPU_J2) += $(call cc-option,-mj2,)
@@ -47,7 +44,6 @@ cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
cflags-$(CONFIG_CPU_SH4A) += $(call cc-option,-m4a,) \
$(call cc-option,-m4a-nofpu,)
cflags-$(CONFIG_CPU_SH4AL_DSP) += $(call cc-option,-m4al,)
-cflags-$(CONFIG_CPU_SH5) := $(call cc-option,-m5-32media-nofpu,)
ifeq ($(cflags-y),)
#
@@ -88,7 +84,7 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment \
-R .stab -R .stabstr -S
# Give the various platforms the opportunity to set default image types
-defaultimage-$(CONFIG_SUPERH32) := zImage
+defaultimage-y := zImage
defaultimage-$(CONFIG_SH_SH7785LCR) := uImage
defaultimage-$(CONFIG_SH_RSK) := uImage
defaultimage-$(CONFIG_SH_URQUELL) := uImage
@@ -107,31 +103,22 @@ KBUILD_IMAGE := $(boot)/$(defaultimage-y)
# Choosing incompatible machines durings configuration will result in
# error messages during linking.
#
-ifdef CONFIG_SUPERH32
UTS_MACHINE := sh
-BITS := 32
LDFLAGS_vmlinux += -e _stext
-else
-UTS_MACHINE := sh64
-BITS := 64
-LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
- --defsym phys_stext_shmedia=phys_stext+1 \
- -e phys_stext_shmedia
-endif
ifdef CONFIG_CPU_LITTLE_ENDIAN
-ld-bfd := elf32-$(UTS_MACHINE)-linux
+ld-bfd := elf32-sh-linux
LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
KBUILD_LDFLAGS += -EL
else
-ld-bfd := elf32-$(UTS_MACHINE)big-linux
+ld-bfd := elf32-shbig-linux
LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
KBUILD_LDFLAGS += -EB
endif
-export ld-bfd BITS
+export ld-bfd
-head-y := arch/sh/kernel/head_$(BITS).o
+head-y := arch/sh/kernel/head_32.o
core-y += arch/sh/kernel/ arch/sh/mm/ arch/sh/boards/
core-$(CONFIG_SH_FPU_EMU) += arch/sh/math-emu/
@@ -185,7 +172,6 @@ cpuincdir-$(CONFIG_CPU_SH2) += cpu-sh2
cpuincdir-$(CONFIG_CPU_SH3) += cpu-sh3
cpuincdir-$(CONFIG_CPU_SH4A) += cpu-sh4a
cpuincdir-$(CONFIG_CPU_SH4) += cpu-sh4
-cpuincdir-$(CONFIG_CPU_SH5) += cpu-sh5
cpuincdir-y += cpu-common # Must be last
drivers-y += arch/sh/drivers/
@@ -206,8 +192,7 @@ ifeq ($(CONFIG_DWARF_UNWINDER),y)
KBUILD_CFLAGS += -fasynchronous-unwind-tables
endif
-libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y)
-libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y)
+libs-y := arch/sh/lib/ $(libs-y)
BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.xz uImage.lzo \
uImage.srec uImage.bin zImage vmlinux.bin vmlinux.srec \
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index cee24c308337..fb0ca0c1efe1 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -7,6 +7,11 @@ config SOLUTION_ENGINE
config SH_ALPHA_BOARD
bool
+config SH_CUSTOM_CLK
+ def_bool y
+ depends on !SH_DEVICE_TREE
+ select HAVE_LEGACY_CLK
+
config SH_DEVICE_TREE
bool
select OF
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index f5e1bd779789..ad0e2403e56f 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -8,9 +8,9 @@
targets := vmlinux vmlinux.bin vmlinux.bin.gz \
vmlinux.bin.bz2 vmlinux.bin.lzma \
vmlinux.bin.xz vmlinux.bin.lzo \
- head_$(BITS).o misc.o piggy.o
+ head_32.o misc.o piggy.o
-OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o
+OBJECTS = $(obj)/head_32.o $(obj)/misc.o $(obj)/cache.o
GCOV_PROFILE := n
@@ -39,15 +39,11 @@ LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \
#
# Pull in the necessary libgcc bits from the in-kernel implementation.
#
-lib1funcs-$(CONFIG_SUPERH32) := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S \
- lshrsi3.S
-lib1funcs-obj := \
+lib1funcs-y := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S lshrsi3.S
+lib1funcs-obj := \
$(addsuffix .o, $(basename $(addprefix $(obj)/, $(lib1funcs-y))))
lib1funcs-dir := $(srctree)/arch/$(SRCARCH)/lib
-ifeq ($(BITS),64)
- lib1funcs-dir := $(addsuffix $(BITS), $(lib1funcs-dir))
-endif
KBUILD_CFLAGS += -I$(lib1funcs-dir) -DDISABLE_BRANCH_PROFILING
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index e69ec12cbbe6..a03b6680a9d9 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -116,11 +116,7 @@ void ftrace_stub(void)
{
}
-#ifdef CONFIG_SUPERH64
-#define stackalign 8
-#else
#define stackalign 4
-#endif
#define STACK_SIZE (4096)
long __attribute__ ((aligned(stackalign))) user_stack[STACK_SIZE];
@@ -130,14 +126,10 @@ void decompress_kernel(void)
{
unsigned long output_addr;
-#ifdef CONFIG_SUPERH64
- output_addr = (CONFIG_MEMORY_START + 0x2000);
-#else
output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
#if defined(CONFIG_29BIT)
output_addr |= P2SEG;
#endif
-#endif
output = (unsigned char *)output_addr;
free_mem_ptr = (unsigned long)&_end;
diff --git a/arch/sh/boot/compressed/vmlinux.scr b/arch/sh/boot/compressed/vmlinux.scr
index 862d74808236..dd292b4b9082 100644
--- a/arch/sh/boot/compressed/vmlinux.scr
+++ b/arch/sh/boot/compressed/vmlinux.scr
@@ -1,6 +1,6 @@
SECTIONS
{
- .rodata..compressed : {
+ .rodata..compressed : ALIGN(8) {
input_len = .;
LONG(input_data_end - input_data) input_data = .;
*(.data)
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
index 6dd0da73ca5a..6abd9bd70106 100644
--- a/arch/sh/configs/apsh4ad0a_defconfig
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -20,7 +20,8 @@ CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_CPU_SUBTYPE_SH7786=y
CONFIG_MEMORY_SIZE=0x10000000
CONFIG_HUGETLB_PAGE_SIZE_1MB=y
diff --git a/arch/sh/configs/kfr2r09_defconfig b/arch/sh/configs/kfr2r09_defconfig
index 1dc3f670c481..833404490cfe 100644
--- a/arch/sh/configs/kfr2r09_defconfig
+++ b/arch/sh/configs/kfr2r09_defconfig
@@ -10,8 +10,6 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7724=y
CONFIG_MEMORY_SIZE=0x08000000
CONFIG_FLATMEM_MANUAL=y
diff --git a/arch/sh/configs/magicpanelr2_defconfig b/arch/sh/configs/magicpanelr2_defconfig
index 664c4dee6e6a..0989ed929540 100644
--- a/arch/sh/configs/magicpanelr2_defconfig
+++ b/arch/sh/configs/magicpanelr2_defconfig
@@ -14,8 +14,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7720=y
CONFIG_MEMORY_START=0x0C000000
CONFIG_MEMORY_SIZE=0x03F00000
diff --git a/arch/sh/configs/polaris_defconfig b/arch/sh/configs/polaris_defconfig
index e3a1d3d2694a..246408ec7462 100644
--- a/arch/sh/configs/polaris_defconfig
+++ b/arch/sh/configs/polaris_defconfig
@@ -12,7 +12,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_CPU_SUBTYPE_SH7709=y
CONFIG_MEMORY_START=0x0C000000
CONFIG_FLATMEM_MANUAL=y
diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig
index 0a18f8011c55..c97ec60cff27 100644
--- a/arch/sh/configs/r7780mp_defconfig
+++ b/arch/sh/configs/r7780mp_defconfig
@@ -12,8 +12,6 @@ CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7780=y
CONFIG_MEMORY_SIZE=0x08000000
CONFIG_FLATMEM_MANUAL=y
diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig
index 7226ac5a1d44..55fce65eb454 100644
--- a/arch/sh/configs/r7785rp_defconfig
+++ b/arch/sh/configs/r7785rp_defconfig
@@ -15,8 +15,6 @@ CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7785=y
CONFIG_MEMORY_SIZE=0x08000000
CONFIG_HUGETLB_PAGE_SIZE_1MB=y
diff --git a/arch/sh/configs/rsk7201_defconfig b/arch/sh/configs/rsk7201_defconfig
index 9f4f474705b7..841809b5c2dc 100644
--- a/arch/sh/configs/rsk7201_defconfig
+++ b/arch/sh/configs/rsk7201_defconfig
@@ -15,8 +15,6 @@ CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7201=y
CONFIG_MEMORY_SIZE=0x01000000
CONFIG_FLATMEM_MANUAL=y
diff --git a/arch/sh/configs/rsk7203_defconfig b/arch/sh/configs/rsk7203_defconfig
index 10a32bd4cf66..0055031664ad 100644
--- a/arch/sh/configs/rsk7203_defconfig
+++ b/arch/sh/configs/rsk7203_defconfig
@@ -16,8 +16,6 @@ CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7203=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x01000000
diff --git a/arch/sh/configs/rsk7264_defconfig b/arch/sh/configs/rsk7264_defconfig
index 78643191c99e..f7b9c528c6df 100644
--- a/arch/sh/configs/rsk7264_defconfig
+++ b/arch/sh/configs/rsk7264_defconfig
@@ -17,8 +17,6 @@ CONFIG_MMAP_ALLOW_UNINITIALIZED=y
CONFIG_PROFILING=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7264=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_FLATMEM_MANUAL=y
diff --git a/arch/sh/configs/rsk7269_defconfig b/arch/sh/configs/rsk7269_defconfig
index fb9fa7faf635..4bff14fb185d 100644
--- a/arch/sh/configs/rsk7269_defconfig
+++ b/arch/sh/configs/rsk7269_defconfig
@@ -4,8 +4,6 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_SWAP_IO_SPACE=y
CONFIG_CPU_SUBTYPE_SH7269=y
CONFIG_MEMORY_START=0x0c000000
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index 7fa116b436c3..61bec46ebd66 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -39,7 +39,8 @@ CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_CPU_SUBTYPE_SH7786=y
CONFIG_MEMORY_START=0x40000000
CONFIG_MEMORY_SIZE=0x20000000
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index a93402b3a319..21a43f14ffac 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -28,8 +28,6 @@ CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7206=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_FLATMEM_MANUAL=y
diff --git a/arch/sh/configs/se7343_defconfig b/arch/sh/configs/se7343_defconfig
index 06d067c842cd..4e794e719a28 100644
--- a/arch/sh/configs/se7343_defconfig
+++ b/arch/sh/configs/se7343_defconfig
@@ -11,7 +11,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7343=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x01000000
diff --git a/arch/sh/configs/se7619_defconfig b/arch/sh/configs/se7619_defconfig
index f54722dbc8f5..3264415a5931 100644
--- a/arch/sh/configs/se7619_defconfig
+++ b/arch/sh/configs/se7619_defconfig
@@ -11,8 +11,6 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_MEMORY_START=0x0c000000
CONFIG_FLATMEM_MANUAL=y
CONFIG_CPU_BIG_ENDIAN=y
diff --git a/arch/sh/configs/se7705_defconfig b/arch/sh/configs/se7705_defconfig
index ddfc69841955..4496b94b7d88 100644
--- a/arch/sh/configs/se7705_defconfig
+++ b/arch/sh/configs/se7705_defconfig
@@ -8,8 +8,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_SLAB=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7705=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x02000000
diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig
index 9a527f978106..ee6d28ae08de 100644
--- a/arch/sh/configs/se7712_defconfig
+++ b/arch/sh/configs/se7712_defconfig
@@ -12,8 +12,6 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7712=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x02000000
diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig
index 3b0e1eb6e874..bad921bc10f8 100644
--- a/arch/sh/configs/se7721_defconfig
+++ b/arch/sh/configs/se7721_defconfig
@@ -12,8 +12,6 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_SLAB=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7721=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x02000000
diff --git a/arch/sh/configs/se7722_defconfig b/arch/sh/configs/se7722_defconfig
index 88bf9e849008..09e455817447 100644
--- a/arch/sh/configs/se7722_defconfig
+++ b/arch/sh/configs/se7722_defconfig
@@ -8,8 +8,6 @@ CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7722=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_NUMA=y
diff --git a/arch/sh/configs/se7780_defconfig b/arch/sh/configs/se7780_defconfig
index ec32c82646ed..dcd85b858ac8 100644
--- a/arch/sh/configs/se7780_defconfig
+++ b/arch/sh/configs/se7780_defconfig
@@ -9,7 +9,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7780=y
CONFIG_MEMORY_SIZE=0x08000000
CONFIG_SH_7780_SOLUTION_ENGINE=y
diff --git a/arch/sh/configs/sh7710voipgw_defconfig b/arch/sh/configs/sh7710voipgw_defconfig
index c86f28442a80..08426913c0e3 100644
--- a/arch/sh/configs/sh7710voipgw_defconfig
+++ b/arch/sh/configs/sh7710voipgw_defconfig
@@ -11,7 +11,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7710=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x00800000
diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig
index 9f2aed0b3bca..d0933a9b9799 100644
--- a/arch/sh/configs/sh7757lcr_defconfig
+++ b/arch/sh/configs/sh7757lcr_defconfig
@@ -36,7 +36,7 @@ CONFIG_IPV6=y
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_M25P80=y
+CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_RAM=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
diff --git a/arch/sh/configs/shmin_defconfig b/arch/sh/configs/shmin_defconfig
index d589cfdfb7eb..a27b129b93c5 100644
--- a/arch/sh/configs/shmin_defconfig
+++ b/arch/sh/configs/shmin_defconfig
@@ -12,8 +12,6 @@ CONFIG_LOG_BUF_SHIFT=14
# CONFIG_SHMEM is not set
CONFIG_SLOB=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7706=y
CONFIG_MEMORY_START=0x0c000000
CONFIG_MEMORY_SIZE=0x00800000
diff --git a/arch/sh/configs/ul2_defconfig b/arch/sh/configs/ul2_defconfig
index dc2e3061130f..103b81ec1ffb 100644
--- a/arch/sh/configs/ul2_defconfig
+++ b/arch/sh/configs/ul2_defconfig
@@ -8,8 +8,6 @@ CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_CPU_SUBTYPE_SH7366=y
CONFIG_MEMORY_SIZE=0x01f00000
CONFIG_NUMA=y
diff --git a/arch/sh/drivers/pci/Makefile b/arch/sh/drivers/pci/Makefile
index 947bfe8bb0a7..a5c1e9066f83 100644
--- a/arch/sh/drivers/pci/Makefile
+++ b/arch/sh/drivers/pci/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7763) += pci-sh7780.o ops-sh4.o
obj-$(CONFIG_CPU_SUBTYPE_SH7780) += pci-sh7780.o ops-sh4.o
obj-$(CONFIG_CPU_SUBTYPE_SH7785) += pci-sh7780.o ops-sh4.o
obj-$(CONFIG_CPU_SUBTYPE_SH7786) += pcie-sh7786.o ops-sh7786.o
-obj-$(CONFIG_CPU_SH5) += pci-sh5.o ops-sh5.o
obj-$(CONFIG_SH_DREAMCAST) += ops-dreamcast.o fixups-dreamcast.o \
pci-dreamcast.o
diff --git a/arch/sh/drivers/pci/ops-sh5.c b/arch/sh/drivers/pci/ops-sh5.c
deleted file mode 100644
index 9fbaf72949ab..000000000000
--- a/arch/sh/drivers/pci/ops-sh5.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support functions for the SH5 PCI hardware.
- *
- * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
- * Copyright (C) 2003, 2004 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#include <linux/kernel.h>
-#include <linux/rwsem.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/irq.h>
-#include <asm/io.h>
-#include "pci-sh5.h"
-
-static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where,
- int size, u32 *val)
-{
- SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
-
- switch (size) {
- case 1:
- *val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3));
- break;
- case 2:
- *val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2));
- break;
- case 4:
- *val = SH5PCI_READ(PDR);
- break;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where,
- int size, u32 val)
-{
- SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
-
- switch (size) {
- case 1:
- SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val);
- break;
- case 2:
- SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val);
- break;
- case 4:
- SH5PCI_WRITE(PDR, val);
- break;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-struct pci_ops sh5_pci_ops = {
- .read = sh5pci_read,
- .write = sh5pci_write,
-};
diff --git a/arch/sh/drivers/pci/pci-sh5.c b/arch/sh/drivers/pci/pci-sh5.c
deleted file mode 100644
index 03225d27770b..000000000000
--- a/arch/sh/drivers/pci/pci-sh5.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
- * Copyright (C) 2003, 2004 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- *
- * Support functions for the SH5 PCI hardware.
- */
-
-#include <linux/kernel.h>
-#include <linux/rwsem.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/irq.h>
-#include <cpu/irq.h>
-#include <asm/io.h>
-#include "pci-sh5.h"
-
-unsigned long pcicr_virt;
-unsigned long PCI_IO_AREA;
-
-/* Rounds a number UP to the nearest power of two. Used for
- * sizing the PCI window.
- */
-static u32 __init r2p2(u32 num)
-{
- int i = 31;
- u32 tmp = num;
-
- if (num == 0)
- return 0;
-
- do {
- if (tmp & (1 << 31))
- break;
- i--;
- tmp <<= 1;
- } while (i >= 0);
-
- tmp = 1 << i;
- /* If the original number isn't a power of 2, round it up */
- if (tmp != num)
- tmp <<= 1;
-
- return tmp;
-}
-
-static irqreturn_t pcish5_err_irq(int irq, void *dev_id)
-{
- struct pt_regs *regs = get_irq_regs();
- unsigned pci_int, pci_air, pci_cir, pci_aint;
-
- pci_int = SH5PCI_READ(INT);
- pci_cir = SH5PCI_READ(CIR);
- pci_air = SH5PCI_READ(AIR);
-
- if (pci_int) {
- printk("PCI INTERRUPT (at %08llx)!\n", regs->pc);
- printk("PCI INT -> 0x%x\n", pci_int & 0xffff);
- printk("PCI AIR -> 0x%x\n", pci_air);
- printk("PCI CIR -> 0x%x\n", pci_cir);
- SH5PCI_WRITE(INT, ~0);
- }
-
- pci_aint = SH5PCI_READ(AINT);
- if (pci_aint) {
- printk("PCI ARB INTERRUPT!\n");
- printk("PCI AINT -> 0x%x\n", pci_aint);
- printk("PCI AIR -> 0x%x\n", pci_air);
- printk("PCI CIR -> 0x%x\n", pci_cir);
- SH5PCI_WRITE(AINT, ~0);
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t pcish5_serr_irq(int irq, void *dev_id)
-{
- printk("SERR IRQ\n");
-
- return IRQ_NONE;
-}
-
-static struct resource sh5_pci_resources[2];
-
-static struct pci_channel sh5pci_controller = {
- .pci_ops = &sh5_pci_ops,
- .resources = sh5_pci_resources,
- .nr_resources = ARRAY_SIZE(sh5_pci_resources),
- .mem_offset = 0x00000000,
- .io_offset = 0x00000000,
-};
-
-static int __init sh5pci_init(void)
-{
- unsigned long memStart = __pa(memory_start);
- unsigned long memSize = __pa(memory_end) - memStart;
- u32 lsr0;
- u32 uval;
-
- if (request_irq(IRQ_ERR, pcish5_err_irq,
- 0, "PCI Error",NULL) < 0) {
- printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n");
- return -EINVAL;
- }
-
- if (request_irq(IRQ_SERR, pcish5_serr_irq,
- 0, "PCI SERR interrupt", NULL) < 0) {
- printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n");
- return -EINVAL;
- }
-
- pcicr_virt = (unsigned long)ioremap(SH5PCI_ICR_BASE, 1024);
- if (!pcicr_virt) {
- panic("Unable to remap PCICR\n");
- }
-
- PCI_IO_AREA = (unsigned long)ioremap(SH5PCI_IO_BASE, 0x10000);
- if (!PCI_IO_AREA) {
- panic("Unable to remap PCIIO\n");
- }
-
- /* Clear snoop registers */
- SH5PCI_WRITE(CSCR0, 0);
- SH5PCI_WRITE(CSCR1, 0);
-
- /* Switch off interrupts */
- SH5PCI_WRITE(INTM, 0);
- SH5PCI_WRITE(AINTM, 0);
- SH5PCI_WRITE(PINTM, 0);
-
- /* Set bus active, take it out of reset */
- uval = SH5PCI_READ(CR);
-
- /* Set command Register */
- SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE |
- CR_PFCS | CR_BMAM);
-
- uval=SH5PCI_READ(CR);
-
- /* Allow it to be a master */
- /* NB - WE DISABLE I/O ACCESS to stop overlap */
- /* set WAIT bit to enable stepping, an attempt to improve stability */
- SH5PCI_WRITE_SHORT(CSR_CMD,
- PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
- PCI_COMMAND_WAIT);
-
- /*
- ** Set translation mapping memory in order to convert the address
- ** used for the main bus, to the PCI internal address.
- */
- SH5PCI_WRITE(MBR,0x40000000);
-
- /* Always set the max size 512M */
- SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024));
-
- /*
- ** I/O addresses are mapped at internal PCI specific address
- ** as is described into the configuration bridge table.
- ** These are changed to 0, to allow cards that have legacy
- ** io such as vga to function correctly. We set the SH5 IOBAR to
- ** 256K, which is a bit big as we can only have 64K of address space
- */
-
- SH5PCI_WRITE(IOBR,0x0);
-
- /* Set up a 256K window. Totally pointless waste of address space */
- SH5PCI_WRITE(IOBMR,0);
-
- /* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec.
- * Ideally, we would want to map the I/O region somewhere, but it
- * is so big this is not that easy!
- */
- SH5PCI_WRITE(CSR_IBAR0,~0);
- /* Set memory size value */
- memSize = memory_end - memory_start;
-
- /* Now we set up the mbars so the PCI bus can see the memory of
- * the machine */
- if (memSize < (1024 * 1024)) {
- printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%lx?\n",
- memSize);
- return -EINVAL;
- }
-
- /* Set LSR 0 */
- lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 :
- ((r2p2(memSize) - 0x100000) | 0x1);
- SH5PCI_WRITE(LSR0, lsr0);
-
- /* Set MBAR 0 */
- SH5PCI_WRITE(CSR_MBAR0, memory_start);
- SH5PCI_WRITE(LAR0, memory_start);
-
- SH5PCI_WRITE(CSR_MBAR1,0);
- SH5PCI_WRITE(LAR1,0);
- SH5PCI_WRITE(LSR1,0);
-
- /* Enable the PCI interrupts on the device */
- SH5PCI_WRITE(INTM, ~0);
- SH5PCI_WRITE(AINTM, ~0);
- SH5PCI_WRITE(PINTM, ~0);
-
- sh5_pci_resources[0].start = PCI_IO_AREA;
- sh5_pci_resources[0].end = PCI_IO_AREA + 0x10000;
-
- sh5_pci_resources[1].start = memStart;
- sh5_pci_resources[1].end = memStart + memSize;
-
- return register_pci_controller(&sh5pci_controller);
-}
-arch_initcall(sh5pci_init);
diff --git a/arch/sh/drivers/pci/pci-sh5.h b/arch/sh/drivers/pci/pci-sh5.h
deleted file mode 100644
index 91348af0ef6c..000000000000
--- a/arch/sh/drivers/pci/pci-sh5.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
- *
- * Definitions for the SH5 PCI hardware.
- */
-#ifndef __PCI_SH5_H
-#define __PCI_SH5_H
-
-/* Product ID */
-#define PCISH5_PID 0x350d
-
-/* vendor ID */
-#define PCISH5_VID 0x1054
-
-/* Configuration types */
-#define ST_TYPE0 0x00 /* Configuration cycle type 0 */
-#define ST_TYPE1 0x01 /* Configuration cycle type 1 */
-
-/* VCR data */
-#define PCISH5_VCR_STATUS 0x00
-#define PCISH5_VCR_VERSION 0x08
-
-/*
-** ICR register offsets and bits
-*/
-#define PCISH5_ICR_CR 0x100 /* PCI control register values */
-#define CR_PBAM (1<<12)
-#define CR_PFCS (1<<11)
-#define CR_FTO (1<<10)
-#define CR_PFE (1<<9)
-#define CR_TBS (1<<8)
-#define CR_SPUE (1<<7)
-#define CR_BMAM (1<<6)
-#define CR_HOST (1<<5)
-#define CR_CLKEN (1<<4)
-#define CR_SOCS (1<<3)
-#define CR_IOCS (1<<2)
-#define CR_RSTCTL (1<<1)
-#define CR_CFINT (1<<0)
-#define CR_LOCK_MASK 0xa5000000
-
-#define PCISH5_ICR_INT 0x114 /* Interrupt registert values */
-#define INT_MADIM (1<<2)
-
-#define PCISH5_ICR_LSR0 0X104 /* Local space register values */
-#define PCISH5_ICR_LSR1 0X108 /* Local space register values */
-#define PCISH5_ICR_LAR0 0x10c /* Local address register values */
-#define PCISH5_ICR_LAR1 0x110 /* Local address register values */
-#define PCISH5_ICR_INTM 0x118 /* Interrupt mask register values */
-#define PCISH5_ICR_AIR 0x11c /* Interrupt error address information register values */
-#define PCISH5_ICR_CIR 0x120 /* Interrupt error command information register values */
-#define PCISH5_ICR_AINT 0x130 /* Interrupt error arbiter interrupt register values */
-#define PCISH5_ICR_AINTM 0x134 /* Interrupt error arbiter interrupt mask register values */
-#define PCISH5_ICR_BMIR 0x138 /* Interrupt error info register of bus master values */
-#define PCISH5_ICR_PAR 0x1c0 /* Pio address register values */
-#define PCISH5_ICR_MBR 0x1c4 /* Memory space bank register values */
-#define PCISH5_ICR_IOBR 0x1c8 /* I/O space bank register values */
-#define PCISH5_ICR_PINT 0x1cc /* power management interrupt register values */
-#define PCISH5_ICR_PINTM 0x1d0 /* power management interrupt mask register values */
-#define PCISH5_ICR_MBMR 0x1d8 /* memory space bank mask register values */
-#define PCISH5_ICR_IOBMR 0x1dc /* I/O space bank mask register values */
-#define PCISH5_ICR_CSCR0 0x210 /* PCI cache snoop control register 0 */
-#define PCISH5_ICR_CSCR1 0x214 /* PCI cache snoop control register 1 */
-#define PCISH5_ICR_PDR 0x220 /* Pio data register values */
-
-/* These are configs space registers */
-#define PCISH5_ICR_CSR_VID 0x000 /* Vendor id */
-#define PCISH5_ICR_CSR_DID 0x002 /* Device id */
-#define PCISH5_ICR_CSR_CMD 0x004 /* Command register */
-#define PCISH5_ICR_CSR_STATUS 0x006 /* Stautus */
-#define PCISH5_ICR_CSR_IBAR0 0x010 /* I/O base address register */
-#define PCISH5_ICR_CSR_MBAR0 0x014 /* First Memory base address register */
-#define PCISH5_ICR_CSR_MBAR1 0x018 /* Second Memory base address register */
-
-/* Base address of registers */
-#define SH5PCI_ICR_BASE (PHYS_PCI_BLOCK + 0x00040000)
-#define SH5PCI_IO_BASE (PHYS_PCI_BLOCK + 0x00800000)
-/* #define SH5PCI_VCR_BASE (P2SEG_PCICB_BLOCK + P2SEG) */
-
-extern unsigned long pcicr_virt;
-/* Register selection macro */
-#define PCISH5_ICR_REG(x) ( pcicr_virt + (PCISH5_ICR_##x))
-/* #define PCISH5_VCR_REG(x) ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */
-
-/* Write I/O functions */
-#define SH5PCI_WRITE(reg,val) __raw_writel((u32)(val),PCISH5_ICR_REG(reg))
-#define SH5PCI_WRITE_SHORT(reg,val) __raw_writew((u16)(val),PCISH5_ICR_REG(reg))
-#define SH5PCI_WRITE_BYTE(reg,val) __raw_writeb((u8)(val),PCISH5_ICR_REG(reg))
-
-/* Read I/O functions */
-#define SH5PCI_READ(reg) __raw_readl(PCISH5_ICR_REG(reg))
-#define SH5PCI_READ_SHORT(reg) __raw_readw(PCISH5_ICR_REG(reg))
-#define SH5PCI_READ_BYTE(reg) __raw_readb(PCISH5_ICR_REG(reg))
-
-/* Set PCI config bits */
-#define SET_CONFIG_BITS(bus,devfn,where) ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000)
-
-/* Set PCI command register */
-#define CONFIG_CMD(bus, devfn, where) SET_CONFIG_BITS(bus->number,devfn,where)
-
-/* Size converters */
-#define PCISH5_MEM_SIZCONV(x) (((x / 0x40000) - 1) << 18)
-#define PCISH5_IO_SIZCONV(x) (((x / 0x40000) - 1) << 18)
-
-extern struct pci_ops sh5_pci_ops;
-
-#endif /* __PCI_SH5_H */
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h
index 66faae19d254..0d58a0159aa6 100644
--- a/arch/sh/include/asm/barrier.h
+++ b/arch/sh/include/asm/barrier.h
@@ -6,7 +6,7 @@
#ifndef __ASM_SH_BARRIER_H
#define __ASM_SH_BARRIER_H
-#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
+#if defined(CONFIG_CPU_SH4A)
#include <asm/cache_insns.h>
#endif
@@ -24,7 +24,7 @@
* Historically we have only done this type of barrier for the MMUCR, but
* it's also necessary for the CCR, so we make it generic here instead.
*/
-#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
+#if defined(CONFIG_CPU_SH4A)
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define wmb() mb()
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h
index 8c3578288db5..445dd14c448a 100644
--- a/arch/sh/include/asm/bitops.h
+++ b/arch/sh/include/asm/bitops.h
@@ -26,7 +26,6 @@
#include <asm-generic/bitops/non-atomic.h>
#endif
-#ifdef CONFIG_SUPERH32
static inline unsigned long ffz(unsigned long word)
{
unsigned long result;
@@ -60,31 +59,6 @@ static inline unsigned long __ffs(unsigned long word)
: "t");
return result;
}
-#else
-static inline unsigned long ffz(unsigned long word)
-{
- unsigned long result, __d2, __d3;
-
- __asm__("gettr tr0, %2\n\t"
- "pta $+32, tr0\n\t"
- "andi %1, 1, %3\n\t"
- "beq %3, r63, tr0\n\t"
- "pta $+4, tr0\n"
- "0:\n\t"
- "shlri.l %1, 1, %1\n\t"
- "addi %0, 1, %0\n\t"
- "andi %1, 1, %3\n\t"
- "beqi %3, 1, tr0\n"
- "1:\n\t"
- "ptabs %2, tr0\n\t"
- : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3)
- : "0" (0L), "1" (word));
-
- return result;
-}
-
-#include <asm-generic/bitops/__ffs.h>
-#endif
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ffs.h>
diff --git a/arch/sh/include/asm/bl_bit.h b/arch/sh/include/asm/bl_bit.h
index 7e3d81691ad5..5d04f2c62563 100644
--- a/arch/sh/include/asm/bl_bit.h
+++ b/arch/sh/include/asm/bl_bit.h
@@ -1,11 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_BL_BIT_H
-#define __ASM_SH_BL_BIT_H
-
-#ifdef CONFIG_SUPERH32
-# include <asm/bl_bit_32.h>
-#else
-# include <asm/bl_bit_64.h>
-#endif
-
-#endif /* __ASM_SH_BL_BIT_H */
+#include <asm/bl_bit_32.h>
diff --git a/arch/sh/include/asm/bl_bit_64.h b/arch/sh/include/asm/bl_bit_64.h
deleted file mode 100644
index aac9780fe864..000000000000
--- a/arch/sh/include/asm/bl_bit_64.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#ifndef __ASM_SH_BL_BIT_64_H
-#define __ASM_SH_BL_BIT_64_H
-
-#include <asm/processor.h>
-
-#define SR_BL_LL 0x0000000010000000LL
-
-static inline void set_bl_bit(void)
-{
- unsigned long long __dummy0, __dummy1 = SR_BL_LL;
-
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "or %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy0)
- : "r" (__dummy1));
-
-}
-
-static inline void clear_bl_bit(void)
-{
- unsigned long long __dummy0, __dummy1 = ~SR_BL_LL;
-
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "and %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy0)
- : "r" (__dummy1));
-}
-
-#endif /* __ASM_SH_BL_BIT_64_H */
diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h
index 030df56bfdb2..fe52abb69cea 100644
--- a/arch/sh/include/asm/bugs.h
+++ b/arch/sh/include/asm/bugs.h
@@ -53,10 +53,6 @@ static void __init check_bugs(void)
*p++ = 's';
*p++ = 'p';
break;
- case CPU_FAMILY_SH5:
- *p++ = '6';
- *p++ = '4';
- break;
case CPU_FAMILY_UNKNOWN:
/*
* Specifically use CPU_FAMILY_UNKNOWN rather than
diff --git a/arch/sh/include/asm/cache_insns.h b/arch/sh/include/asm/cache_insns.h
index c5a4acdc53f9..d7edd5297bd0 100644
--- a/arch/sh/include/asm/cache_insns.h
+++ b/arch/sh/include/asm/cache_insns.h
@@ -1,12 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CACHE_INSNS_H
-#define __ASM_SH_CACHE_INSNS_H
-
-
-#ifdef CONFIG_SUPERH32
-# include <asm/cache_insns_32.h>
-#else
-# include <asm/cache_insns_64.h>
-#endif
-
-#endif /* __ASM_SH_CACHE_INSNS_H */
+#include <asm/cache_insns_32.h>
diff --git a/arch/sh/include/asm/cache_insns_64.h b/arch/sh/include/asm/cache_insns_64.h
deleted file mode 100644
index ed682b987b0d..000000000000
--- a/arch/sh/include/asm/cache_insns_64.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#ifndef __ASM_SH_CACHE_INSNS_64_H
-#define __ASM_SH_CACHE_INSNS_64_H
-
-#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
-#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
-#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
-#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr))
-
-static inline reg_size_t register_align(void *val)
-{
- return (unsigned long long)(signed long long)(signed long)val;
-}
-
-#endif /* __ASM_SH_CACHE_INSNS_64_H */
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index b932e42ef028..fe7400079b97 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -46,6 +46,7 @@ extern void flush_cache_range(struct vm_area_struct *vma,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *page);
extern void flush_icache_range(unsigned long start, unsigned long end);
+#define flush_icache_user_range flush_icache_range
extern void flush_icache_page(struct vm_area_struct *vma,
struct page *page);
extern void flush_cache_sigtramp(unsigned long address);
diff --git a/arch/sh/include/asm/checksum.h b/arch/sh/include/asm/checksum.h
index a460a108969d..00e39dd0d146 100644
--- a/arch/sh/include/asm/checksum.h
+++ b/arch/sh/include/asm/checksum.h
@@ -1,6 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_SUPERH32
-# include <asm/checksum_32.h>
-#else
-# include <asm-generic/checksum.h>
-#endif
+#include <asm/checksum_32.h>
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index 5ec8db1ddc20..7661fb5d548a 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -133,28 +133,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
#define ELF_PLATFORM (utsname()->machine)
-#ifdef __SH5__
-#define ELF_PLAT_INIT(_r, load_addr) \
- do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
- _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
- _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
- _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \
- _r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \
- _r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \
- _r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \
- _r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \
- _r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \
- _r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \
- _r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \
- _r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \
- _r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \
- _r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \
- _r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \
- _r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \
- _r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \
- _r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \
- _r->sr = SR_FD | SR_MMU; } while (0)
-#else
#define ELF_PLAT_INIT(_r, load_addr) \
do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
_r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
@@ -182,7 +160,6 @@ do { \
_r->regs[14] = 0; \
_r->sr = SR_FD; \
} while (0)
-#endif
#define SET_PERSONALITY(ex) \
set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
diff --git a/arch/sh/include/asm/extable.h b/arch/sh/include/asm/extable.h
index ed46f8bebb9f..5658d2bae372 100644
--- a/arch/sh/include/asm/extable.h
+++ b/arch/sh/include/asm/extable.h
@@ -4,8 +4,4 @@
#include <asm-generic/extable.h>
-#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU)
-#define ARCH_HAS_SEARCH_EXTABLE
-#endif
-
#endif
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h
index e30348c58073..f38adc189b83 100644
--- a/arch/sh/include/asm/fixmap.h
+++ b/arch/sh/include/asm/fixmap.h
@@ -83,11 +83,7 @@ extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags);
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
-#ifdef CONFIG_SUPERH32
#define FIXADDR_TOP (P4SEG - PAGE_SIZE)
-#else
-#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
-#endif
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 39c9ead489e5..26f0f9b4658b 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -17,7 +17,7 @@
#include <asm/cache.h>
#include <asm/addrspace.h>
#include <asm/machvec.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm-generic/iomap.h>
#ifdef __KERNEL__
@@ -115,12 +115,8 @@ static inline void pfx##reads##bwlq(volatile void __iomem *mem, \
__BUILD_MEMORY_STRING(__raw_, b, u8)
__BUILD_MEMORY_STRING(__raw_, w, u16)
-#ifdef CONFIG_SUPERH32
void __raw_writesl(void __iomem *addr, const void *data, int longlen);
void __raw_readsl(const void __iomem *addr, void *data, int longlen);
-#else
-__BUILD_MEMORY_STRING(__raw_, l, u32)
-#endif
__BUILD_MEMORY_STRING(__raw_, q, u64)
@@ -328,7 +324,7 @@ __ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
#else
#define __ioremap(offset, size, prot) ((void __iomem *)(offset))
#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
-#define iounmap(addr) do { } while (0)
+static inline void iounmap(void __iomem *addr) {}
#endif /* CONFIG_MMU */
static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
diff --git a/arch/sh/include/asm/io_noioport.h b/arch/sh/include/asm/io_noioport.h
index 90d6109f1622..f7938fe0f911 100644
--- a/arch/sh/include/asm/io_noioport.h
+++ b/arch/sh/include/asm/io_noioport.h
@@ -53,12 +53,34 @@ static inline void ioport_unmap(void __iomem *addr)
#define outw_p(x, addr) outw((x), (addr))
#define outl_p(x, addr) outl((x), (addr))
-#define insb(a, b, c) BUG()
-#define insw(a, b, c) BUG()
-#define insl(a, b, c) BUG()
+static inline void insb(unsigned long port, void *dst, unsigned long count)
+{
+ BUG();
+}
+
+static inline void insw(unsigned long port, void *dst, unsigned long count)
+{
+ BUG();
+}
+
+static inline void insl(unsigned long port, void *dst, unsigned long count)
+{
+ BUG();
+}
-#define outsb(a, b, c) BUG()
-#define outsw(a, b, c) BUG()
-#define outsl(a, b, c) BUG()
+static inline void outsb(unsigned long port, const void *src, unsigned long count)
+{
+ BUG();
+}
+
+static inline void outsw(unsigned long port, const void *src, unsigned long count)
+{
+ BUG();
+}
+
+static inline void outsl(unsigned long port, const void *src, unsigned long count)
+{
+ BUG();
+}
#endif /* __ASM_SH_IO_NOIOPORT_H */
diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h
index 8065a3222e19..6d44c32ef047 100644
--- a/arch/sh/include/asm/irq.h
+++ b/arch/sh/include/asm/irq.h
@@ -66,8 +66,5 @@ extern void irq_finish(unsigned int irq);
#endif
#include <asm-generic/irq.h>
-#ifdef CONFIG_CPU_SH5
-#include <cpu/irq.h>
-#endif
#endif /* __ASM_SH_IRQ_H */
diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h
index 5212f5fcd752..960545306afa 100644
--- a/arch/sh/include/asm/kdebug.h
+++ b/arch/sh/include/asm/kdebug.h
@@ -12,7 +12,9 @@ enum die_val {
};
/* arch/sh/kernel/dumpstack.c */
-extern void printk_address(unsigned long address, int reliable);
-extern void dump_mem(const char *str, unsigned long bottom, unsigned long top);
+extern void printk_address(unsigned long address, int reliable,
+ const char *loglvl);
+extern void dump_mem(const char *str, const char *loglvl,
+ unsigned long bottom, unsigned long top);
#endif /* __ASM_SH_KDEBUG_H */
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h
index 2d09650093c7..48e67d544d53 100644
--- a/arch/sh/include/asm/mmu_context.h
+++ b/arch/sh/include/asm/mmu_context.h
@@ -48,11 +48,7 @@
*/
#define MMU_VPN_MASK 0xfffff000
-#if defined(CONFIG_SUPERH32)
#include <asm/mmu_context_32.h>
-#else
-#include <asm/mmu_context_64.h>
-#endif
/*
* Get MMU context if needed.
@@ -74,14 +70,6 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
*/
local_flush_tlb_all();
-#ifdef CONFIG_SUPERH64
- /*
- * The SH-5 cache uses the ASIDs, requiring both the I and D
- * cache to be flushed when the ASID is exhausted. Weak.
- */
- flush_cache_all();
-#endif
-
/*
* Fix version; Note that we avoid version #0
* to distinguish NO_CONTEXT.
diff --git a/arch/sh/include/asm/mmu_context_64.h b/arch/sh/include/asm/mmu_context_64.h
deleted file mode 100644
index bacafe0b887d..000000000000
--- a/arch/sh/include/asm/mmu_context_64.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_MMU_CONTEXT_64_H
-#define __ASM_SH_MMU_CONTEXT_64_H
-
-/*
- * sh64-specific mmu_context interface.
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2007 Paul Mundt
- */
-#include <cpu/registers.h>
-#include <asm/cacheflush.h>
-
-#define SR_ASID_MASK 0xffffffffff00ffffULL
-#define SR_ASID_SHIFT 16
-
-/*
- * Destroy context related info for an mm_struct that is about
- * to be put to rest.
- */
-static inline void destroy_context(struct mm_struct *mm)
-{
- /* Well, at least free TLB entries */
- flush_tlb_mm(mm);
-}
-
-static inline unsigned long get_asid(void)
-{
- unsigned long long sr;
-
- asm volatile ("getcon " __SR ", %0\n\t"
- : "=r" (sr));
-
- sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK;
- return (unsigned long) sr;
-}
-
-/* Set ASID into SR */
-static inline void set_asid(unsigned long asid)
-{
- unsigned long long sr, pc;
-
- asm volatile ("getcon " __SR ", %0" : "=r" (sr));
-
- sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
-
- /*
- * It is possible that this function may be inlined and so to avoid
- * the assembler reporting duplicate symbols we make use of the
- * gas trick of generating symbols using numerics and forward
- * reference.
- */
- asm volatile ("movi 1, %1\n\t"
- "shlli %1, 28, %1\n\t"
- "or %0, %1, %1\n\t"
- "putcon %1, " __SR "\n\t"
- "putcon %0, " __SSR "\n\t"
- "movi 1f, %1\n\t"
- "ori %1, 1 , %1\n\t"
- "putcon %1, " __SPC "\n\t"
- "rte\n"
- "1:\n\t"
- : "=r" (sr), "=r" (pc) : "0" (sr));
-}
-
-/* arch/sh/kernel/cpu/sh5/entry.S */
-extern unsigned long switch_and_save_asid(unsigned long new_asid);
-
-/* No spare register to twiddle, so use a software cache */
-extern pgd_t *mmu_pdtp_cache;
-
-#define set_TTB(pgd) (mmu_pdtp_cache = (pgd))
-#define get_TTB() (mmu_pdtp_cache)
-
-#endif /* __ASM_SH_MMU_CONTEXT_64_H */
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index ea8d68f58e39..eca5daa43b93 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -35,8 +35,6 @@
#define HPAGE_SHIFT 22
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
#define HPAGE_SHIFT 26
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
-#define HPAGE_SHIFT 29
#endif
#ifdef CONFIG_HUGETLB_PAGE
@@ -82,18 +80,12 @@ typedef struct { unsigned long long pgd; } pgd_t;
((x).pte_low | ((unsigned long long)(x).pte_high << 32))
#define __pte(x) \
({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
-#elif defined(CONFIG_SUPERH32)
+#else
typedef struct { unsigned long pte_low; } pte_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low)
#define __pte(x) ((pte_t) { (x) } )
-#else
-typedef struct { unsigned long long pte_low; } pte_t;
-typedef struct { unsigned long long pgprot; } pgprot_t;
-typedef struct { unsigned long pgd; } pgd_t;
-#define pte_val(x) ((x).pte_low)
-#define __pte(x) ((pte_t) { (x) } )
#endif
#define pgd_val(x) ((x).pgd)
@@ -191,15 +183,4 @@ typedef struct page *pgtable_t;
*/
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
-#ifdef CONFIG_SUPERH64
-/*
- * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
- * happily generate {ld/st}.q pairs, requiring us to have 8-byte
- * alignment to avoid traps. The kmalloc alignment is guaranteed by
- * virtue of L1_CACHE_BYTES, requiring this to only be special cased
- * for slab caches.
- */
-#define ARCH_SLAB_MINALIGN 8
-#endif
-
#endif /* __ASM_SH_PAGE_H */
diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
index 0f80097e5c9c..82d74472dfcd 100644
--- a/arch/sh/include/asm/pgtable-3level.h
+++ b/arch/sh/include/asm/pgtable-3level.h
@@ -39,13 +39,6 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
/* only used by the stubbed out hugetlb gup code, should never be called */
#define pud_page(pud) NULL
-
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
-{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
-}
-
#define pud_none(x) (!pud_val(x))
#define pud_present(x) (pud_val(x))
#define pud_clear(xp) do { set_pud(xp, __pud(0)); } while (0)
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index cbd0f3c55a0c..27751e9470df 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -76,18 +76,10 @@ static inline unsigned long phys_addr_mask(void)
#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK)
#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT)
-#ifdef CONFIG_SUPERH32
#define VMALLOC_START (P3SEG)
-#else
-#define VMALLOC_START (0xf0000000)
-#endif
#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
-#if defined(CONFIG_SUPERH32)
#include <asm/pgtable_32.h>
-#else
-#include <asm/pgtable_64.h>
-#endif
/*
* SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
@@ -159,15 +151,6 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
prot |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
return __pte_access_permitted(pte, prot);
}
-#elif defined(CONFIG_SUPERH64)
-static inline bool pte_access_permitted(pte_t pte, bool write)
-{
- u64 prot = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
-
- if (write)
- prot |= _PAGE_WRITE;
- return __pte_access_permitted(pte, prot);
-}
#else
static inline bool pte_access_permitted(pte_t pte, bool write)
{
@@ -185,6 +168,4 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#include <asm-generic/pgtable.h>
-
#endif /* __ASM_SH_PGTABLE_H */
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index 4acce5f2cbf9..41be43e99cff 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -401,27 +401,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
return pte;
}
-#define pmd_page_vaddr(pmd) ((unsigned long)pmd_val(pmd))
-#define pmd_page(pmd) (virt_to_page(pmd_val(pmd)))
-
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define __pte_offset(address) pte_index(address)
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)pmd_val(pmd);
+}
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
-#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
-#define pte_unmap(pte) do { } while (0)
+#define pmd_page(pmd) (virt_to_page(pmd_val(pmd)))
#ifdef CONFIG_X2TLB
#define pte_ERROR(e) \
diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h
deleted file mode 100644
index 27cc282ec6c0..000000000000
--- a/arch/sh/include/asm/pgtable_64.h
+++ /dev/null
@@ -1,306 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_PGTABLE_64_H
-#define __ASM_SH_PGTABLE_64_H
-
-/*
- * include/asm-sh/pgtable_64.h
- *
- * This file contains the functions and defines necessary to modify and use
- * the SuperH page table tree.
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003, 2004 Paul Mundt
- * Copyright (C) 2003, 2004 Richard Curnow
- */
-#include <linux/threads.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-
-/*
- * Error outputs.
- */
-#define pte_ERROR(e) \
- printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
-#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-/*
- * Table setting routines. Used within arch/mm only.
- */
-#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
-
-static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
-{
- unsigned long long x = ((unsigned long long) pteval.pte_low);
- unsigned long long *xp = (unsigned long long *) pteptr;
- /*
- * Sign-extend based on NPHYS.
- */
- *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
-}
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
-/*
- * PGD defines. Top level.
- */
-
-/* To find an entry in a generic PGD. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
-/* To find an entry in a kernel PGD. */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-/* #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) */
-
-/*
- * PMD level access routines. Same notes as above.
- */
-#define _PMD_EMPTY 0x0
-/* Either the PMD is empty or present, it's not paged out */
-#define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT)
-#define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
-#define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY)
-#define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-
-#define pmd_page_vaddr(pmd_entry) \
- ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
-
-#define pmd_page(pmd) \
- (virt_to_page(pmd_val(pmd)))
-
-/* PMD to PTE dereferencing */
-#define pte_index(address) \
- ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define __pte_offset(address) pte_index(address)
-
-#define pte_offset_kernel(dir, addr) \
- ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
-#define pte_unmap(pte) do { } while (0)
-
-#ifndef __ASSEMBLY__
-/*
- * PTEL coherent flags.
- * See Chapter 17 ST50 CPU Core Volume 1, Architecture.
- */
-/* The bits that are required in the SH-5 TLB are placed in the h/w-defined
- positions, to avoid expensive bit shuffling on every refill. The remaining
- bits are used for s/w purposes and masked out on each refill.
-
- Note, the PTE slots are used to hold data of type swp_entry_t when a page is
- swapped out. Only the _PAGE_PRESENT flag is significant when the page is
- swapped out, and it must be placed so that it doesn't overlap either the
- type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type
- at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This
- scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit
- [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
- into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */
-#define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */
-#define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
-#define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */
-#define _PAGE_PRESENT 0x004 /* software: page referenced */
-#define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */
-#define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */
-#define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */
-#define _PAGE_READ 0x040 /* PR0-bit : read access allowed */
-#define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */
-#define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */
-#define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */
-#define _PAGE_DIRTY 0x400 /* software: page accessed in write */
-#define _PAGE_ACCESSED 0x800 /* software: page referenced */
-
-/* Wrapper for extended mode pgprot twiddling */
-#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
-
-/*
- * We can use the sign-extended bits in the PTEL to get 32 bits of
- * software flags. This works for now because no implementations uses
- * anything above the PPN field.
- */
-#define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */
-#define _PAGE_SPECIAL _PAGE_EXT(0x002)
-
-#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_SHARED | \
- _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
-
-/* Mask which drops software flags */
-#define _PAGE_FLAGS_HARDWARE_MASK (NEFF_MASK & ~(_PAGE_CLEAR_FLAGS))
-
-/*
- * HugeTLB support
- */
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-#define _PAGE_SZHUGE (_PAGE_SIZE0)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
-#define _PAGE_SZHUGE (_PAGE_SIZE1)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
-#define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1)
-#endif
-
-/*
- * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
- * to make pte_mkhuge() happy.
- */
-#ifndef _PAGE_SZHUGE
-# define _PAGE_SZHUGE (0)
-#endif
-
-/*
- * Default flags for a Kernel page.
- * This is fundametally also SHARED because the main use of this define
- * (other than for PGD/PMD entries) is for the VMALLOC pool which is
- * contextless.
- *
- * _PAGE_EXECUTE is required for modules
- *
- */
-#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
- _PAGE_EXECUTE | \
- _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_SHARED)
-
-/* Default flags for a User page */
-#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
-
-#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_SPECIAL)
-
-/*
- * We have full permissions (Read/Write/Execute/Shared).
- */
-#define _PAGE_COMMON (_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_CACHABLE | _PAGE_ACCESSED)
-
-#define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
-#define PAGE_SHARED __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \
- _PAGE_SHARED)
-#define PAGE_EXECREAD __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE)
-
-/*
- * We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
- * protection mode for the stack.
- */
-#define PAGE_COPY PAGE_EXECREAD
-
-#define PAGE_READONLY __pgprot(_PAGE_COMMON | _PAGE_READ)
-#define PAGE_WRITEONLY __pgprot(_PAGE_COMMON | _PAGE_WRITE)
-#define PAGE_RWX __pgprot(_PAGE_COMMON | _PAGE_READ | \
- _PAGE_WRITE | _PAGE_EXECUTE)
-#define PAGE_KERNEL __pgprot(_KERNPG_TABLE)
-
-#define PAGE_KERNEL_NOCACHE \
- __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
- _PAGE_EXECUTE | _PAGE_ACCESSED | \
- _PAGE_DIRTY | _PAGE_SHARED)
-
-/* Make it a device mapping for maximum safety (e.g. for mapping device
- registers into user-space via /dev/map). */
-#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
-#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
-
-/*
- * PTE level access routines.
- *
- * Note1:
- * It's the tree walk leaf. This is physical address to be stored.
- *
- * Note 2:
- * Regarding the choice of _PTE_EMPTY:
-
- We must choose a bit pattern that cannot be valid, whether or not the page
- is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped
- out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
- left for us to select. If we force bit[7]==0 when swapped out, we could use
- the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if
- we force bit[7]==1 when swapped out, we can use all zeroes to indicate
- empty. This is convenient, because the page tables get cleared to zero
- when they are allocated.
-
- */
-#define _PTE_EMPTY 0x0
-#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
-#define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
-#define pte_none(x) (pte_val(x) == _PTE_EMPTY)
-
-/*
- * Some definitions to translate between mem_map, PTEs, and page
- * addresses:
- */
-
-/*
- * Given a PTE, return the index of the mem_map[] entry corresponding
- * to the page frame the PTE. Get the absolute physical address, make
- * a relative physical address and translate it to an index.
- */
-#define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \
- __MEMORY_START) >> PAGE_SHIFT)
-
-/*
- * Given a PTE, return the "struct page *".
- */
-#define pte_page(x) (mem_map + pte_pagenr(x))
-
-/*
- * Return number of (down rounded) MB corresponding to x pages.
- */
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-
-
-/*
- * The following have defined behavior only work if pte_present() is true.
- */
-static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
-static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
-
-static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
-static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
-static inline pte_t pte_mkspecial(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; }
-
-/*
- * Conversion functions: convert a page and protection to a page entry.
- *
- * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
- */
-#define mk_pte(page,pgprot) \
-({ \
- pte_t __pte; \
- \
- set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \
- __MEMORY_START | pgprot_val((pgprot)))); \
- __pte; \
-})
-
-/*
- * This takes a (absolute) physical page address that is used
- * by the remapping functions
- */
-#define mk_pte_phys(physpage, pgprot) \
-({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
-
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
-
-/* Encode and decode a swap entry */
-#define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c))
-#define __swp_offset(x) ((x).val >> 8)
-#define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-
-#endif /* !__ASSEMBLY__ */
-
-#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-
-#endif /* __ASM_SH_PGTABLE_64_H */
diff --git a/arch/sh/include/asm/posix_types.h b/arch/sh/include/asm/posix_types.h
index 0d670fd94fe7..f8982b757c33 100644
--- a/arch/sh/include/asm/posix_types.h
+++ b/arch/sh/include/asm/posix_types.h
@@ -1,6 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
-# ifdef CONFIG_SUPERH32
-# include <asm/posix_types_32.h>
-# else
-# include <asm/posix_types_64.h>
-# endif
+#include <asm/posix_types_32.h>
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 6fbf8c80e498..3820d698846e 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -39,9 +39,6 @@ enum cpu_type {
/* SH4AL-DSP types */
CPU_SH7343, CPU_SH7722, CPU_SH7366, CPU_SH7372,
- /* SH-5 types */
- CPU_SH5_101, CPU_SH5_103,
-
/* Unknown subtype */
CPU_SH_NONE
};
@@ -53,7 +50,6 @@ enum cpu_family {
CPU_FAMILY_SH4,
CPU_FAMILY_SH4A,
CPU_FAMILY_SH4AL_DSP,
- CPU_FAMILY_SH5,
CPU_FAMILY_UNKNOWN,
};
@@ -167,18 +163,12 @@ int vsyscall_init(void);
*/
#ifdef CONFIG_CPU_SH2A
extern unsigned int instruction_size(unsigned int insn);
-#elif defined(CONFIG_SUPERH32)
-#define instruction_size(insn) (2)
#else
-#define instruction_size(insn) (4)
+#define instruction_size(insn) (2)
#endif
#endif /* __ASSEMBLY__ */
-#ifdef CONFIG_SUPERH32
-# include <asm/processor_32.h>
-#else
-# include <asm/processor_64.h>
-#endif
+#include <asm/processor_32.h>
#endif /* __ASM_SH_PROCESSOR_H */
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 0e0ecc0132e3..d44409413418 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -171,7 +171,7 @@ static __inline__ void enable_fpu(void)
#define thread_saved_pc(tsk) (tsk->thread.pc)
void show_trace(struct task_struct *tsk, unsigned long *sp,
- struct pt_regs *regs);
+ struct pt_regs *regs, const char *loglvl);
#ifdef CONFIG_DUMP_CODE
void show_code(struct pt_regs *regs);
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
deleted file mode 100644
index 53efc9f51ef1..000000000000
--- a/arch/sh/include/asm/processor_64.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_PROCESSOR_64_H
-#define __ASM_SH_PROCESSOR_64_H
-
-/*
- * include/asm-sh/processor_64.h
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#ifndef __ASSEMBLY__
-
-#include <linux/compiler.h>
-#include <asm/page.h>
-#include <asm/types.h>
-#include <cpu/registers.h>
-
-#endif
-
-/*
- * User space process size: 2GB - 4k.
- */
-#define TASK_SIZE 0x7ffff000UL
-
-#define STACK_TOP TASK_SIZE
-#define STACK_TOP_MAX STACK_TOP
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
-
-/*
- * Bit of SR register
- *
- * FD-bit:
- * When it's set, it means the processor doesn't have right to use FPU,
- * and it results exception when the floating operation is executed.
- *
- * IMASK-bit:
- * Interrupt level mask
- *
- * STEP-bit:
- * Single step bit
- *
- */
-#if defined(CONFIG_SH64_SR_WATCH)
-#define SR_MMU 0x84000000
-#else
-#define SR_MMU 0x80000000
-#endif
-
-#define SR_IMASK 0x000000f0
-#define SR_FD 0x00008000
-#define SR_SSTEP 0x08000000
-
-#ifndef __ASSEMBLY__
-
-/*
- * FPU structure and data : require 8-byte alignment as we need to access it
- with fld.p, fst.p
- */
-
-struct sh_fpu_hard_struct {
- unsigned long fp_regs[64];
- unsigned int fpscr;
- /* long status; * software status information */
-};
-
-/* Dummy fpu emulator */
-struct sh_fpu_soft_struct {
- unsigned long fp_regs[64];
- unsigned int fpscr;
- unsigned char lookahead;
- unsigned long entry_pc;
-};
-
-union thread_xstate {
- struct sh_fpu_hard_struct hardfpu;
- struct sh_fpu_soft_struct softfpu;
- /*
- * The structure definitions only produce 32 bit alignment, yet we need
- * to access them using 64 bit load/store as well.
- */
- unsigned long long alignment_dummy;
-};
-
-struct thread_struct {
- unsigned long sp;
- unsigned long pc;
-
- /* Various thread flags, see SH_THREAD_xxx */
- unsigned long flags;
-
- /* This stores the address of the pt_regs built during a context
- switch, or of the register save area built for a kernel mode
- exception. It is used for backtracing the stack of a sleeping task
- or one that traps in kernel mode. */
- struct pt_regs *kregs;
- /* This stores the address of the pt_regs constructed on entry from
- user mode. It is a fixed value over the lifetime of a process, or
- NULL for a kernel thread. */
- struct pt_regs *uregs;
-
- unsigned long address;
- /* Hardware debugging registers may come here */
-
- /* floating point info */
- union thread_xstate *xstate;
-
- /*
- * fpu_counter contains the number of consecutive context switches
- * that the FPU is used. If this is over a threshold, the lazy fpu
- * saving becomes unlazy to save the trap. This is an unsigned char
- * so that after 256 times the counter wraps and the behavior turns
- * lazy again; this to deal with bursty apps that only use FPU for
- * a short time
- */
- unsigned char fpu_counter;
-};
-
-#define INIT_MMAP \
-{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
-
-#define INIT_THREAD { \
- .sp = sizeof(init_stack) + \
- (long) &init_stack, \
- .pc = 0, \
- .kregs = &fake_swapper_regs, \
- .uregs = NULL, \
- .address = 0, \
- .flags = 0, \
-}
-
-/*
- * Do necessary setup to start up a newly executed thread.
- */
-#define SR_USER (SR_MMU | SR_FD)
-
-#define start_thread(_regs, new_pc, new_sp) \
- _regs->sr = SR_USER; /* User mode. */ \
- _regs->pc = new_pc - 4; /* Compensate syscall exit */ \
- _regs->pc |= 1; /* Set SHmedia ! */ \
- _regs->regs[18] = 0; \
- _regs->regs[15] = new_sp
-
-/* Forward declaration, a strange C thing */
-struct task_struct;
-struct mm_struct;
-
-/* Free all resources held by a thread. */
-extern void release_thread(struct task_struct *);
-
-/*
- * FPU lazy state save handling.
- */
-
-static inline void disable_fpu(void)
-{
- unsigned long long __dummy;
-
- /* Set FD flag in SR */
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "or %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy)
- : "r" (SR_FD));
-}
-
-static inline void enable_fpu(void)
-{
- unsigned long long __dummy;
-
- /* Clear out FD flag in SR */
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "and %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy)
- : "r" (~SR_FD));
-}
-
-/* Round to nearest, no exceptions on inexact, overflow, underflow,
- zero-divide, invalid. Configure option for whether to flush denorms to
- zero, or except if a denorm is encountered. */
-#if defined(CONFIG_SH64_FPU_DENORM_FLUSH)
-#define FPSCR_INIT 0x00040000
-#else
-#define FPSCR_INIT 0x00000000
-#endif
-
-#ifdef CONFIG_SH_FPU
-/* Initialise the FP state of a task */
-void fpinit(struct sh_fpu_hard_struct *fpregs);
-#else
-#define fpinit(fpregs) do { } while (0)
-#endif
-
-extern struct task_struct *last_task_used_math;
-
-/*
- * Return saved PC of a blocked thread.
- */
-#define thread_saved_pc(tsk) (tsk->thread.pc)
-
-extern unsigned long get_wchan(struct task_struct *p);
-
-#define KSTK_EIP(tsk) ((tsk)->thread.pc)
-#define KSTK_ESP(tsk) ((tsk)->thread.sp)
-
-#endif /* __ASSEMBLY__ */
-#endif /* __ASM_SH_PROCESSOR_64_H */
diff --git a/arch/sh/include/asm/ptrace_64.h b/arch/sh/include/asm/ptrace_64.h
deleted file mode 100644
index 6ee08229b433..000000000000
--- a/arch/sh/include/asm/ptrace_64.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_PTRACE_64_H
-#define __ASM_SH_PTRACE_64_H
-
-#include <uapi/asm/ptrace_64.h>
-
-
-#define MAX_REG_OFFSET offsetof(struct pt_regs, tregs[7])
-static inline long regs_return_value(struct pt_regs *regs)
-{
- return regs->regs[3];
-}
-
-#endif /* __ASM_SH_PTRACE_64_H */
diff --git a/arch/sh/include/asm/string.h b/arch/sh/include/asm/string.h
index 84fc5ed9c5b3..0f6331ec28ed 100644
--- a/arch/sh/include/asm/string.h
+++ b/arch/sh/include/asm/string.h
@@ -1,6 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_SUPERH32
-# include <asm/string_32.h>
-#else
-# include <asm/string_64.h>
-#endif
+#include <asm/string_32.h>
diff --git a/arch/sh/include/asm/string_64.h b/arch/sh/include/asm/string_64.h
deleted file mode 100644
index d51d6150a4e2..000000000000
--- a/arch/sh/include/asm/string_64.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_STRING_64_H
-#define __ASM_SH_STRING_64_H
-
-#ifdef __KERNEL__
-
-#define __HAVE_ARCH_MEMSET
-extern void *memset(void *__s, int __c, size_t __count);
-
-#define __HAVE_ARCH_MEMCPY
-extern void *memcpy(void *dest, const void *src, size_t count);
-
-#define __HAVE_ARCH_STRLEN
-extern size_t strlen(const char *);
-
-#define __HAVE_ARCH_STRCPY
-extern char *strcpy(char *__dest, const char *__src);
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASM_SH_STRING_64_H */
diff --git a/arch/sh/include/asm/switch_to.h b/arch/sh/include/asm/switch_to.h
index 9eec80ab5aa2..bd139bcdeec1 100644
--- a/arch/sh/include/asm/switch_to.h
+++ b/arch/sh/include/asm/switch_to.h
@@ -4,13 +4,4 @@
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
-#ifndef __ASM_SH_SWITCH_TO_H
-#define __ASM_SH_SWITCH_TO_H
-
-#ifdef CONFIG_SUPERH32
-# include <asm/switch_to_32.h>
-#else
-# include <asm/switch_to_64.h>
-#endif
-
-#endif /* __ASM_SH_SWITCH_TO_H */
+#include <asm/switch_to_32.h>
diff --git a/arch/sh/include/asm/switch_to_64.h b/arch/sh/include/asm/switch_to_64.h
deleted file mode 100644
index 2dbf2311669f..000000000000
--- a/arch/sh/include/asm/switch_to_64.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#ifndef __ASM_SH_SWITCH_TO_64_H
-#define __ASM_SH_SWITCH_TO_64_H
-
-struct thread_struct;
-struct task_struct;
-
-/*
- * switch_to() should switch tasks to task nr n, first
- */
-struct task_struct *sh64_switch_to(struct task_struct *prev,
- struct thread_struct *prev_thread,
- struct task_struct *next,
- struct thread_struct *next_thread);
-
-#define switch_to(prev,next,last) \
-do { \
- if (last_task_used_math != next) { \
- struct pt_regs *regs = next->thread.uregs; \
- if (regs) regs->sr |= SR_FD; \
- } \
- last = sh64_switch_to(prev, &prev->thread, next, \
- &next->thread); \
-} while (0)
-
-
-#endif /* __ASM_SH_SWITCH_TO_64_H */
diff --git a/arch/sh/include/asm/syscall.h b/arch/sh/include/asm/syscall.h
index 90ba00002626..570699eb0e58 100644
--- a/arch/sh/include/asm/syscall.h
+++ b/arch/sh/include/asm/syscall.h
@@ -4,10 +4,6 @@
extern const unsigned long sys_call_table[];
-#ifdef CONFIG_SUPERH32
-# include <asm/syscall_32.h>
-#else
-# include <asm/syscall_64.h>
-#endif
+#include <asm/syscall_32.h>
#endif /* __ASM_SH_SYSCALL_H */
diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h
deleted file mode 100644
index 72efcbc76f91..000000000000
--- a/arch/sh/include/asm/syscall_64.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_SYSCALL_64_H
-#define __ASM_SH_SYSCALL_64_H
-
-#include <uapi/linux/audit.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <asm/ptrace.h>
-
-/* The system call number is given by the user in R9 */
-static inline long syscall_get_nr(struct task_struct *task,
- struct pt_regs *regs)
-{
- return (regs->syscall_nr >= 0) ? regs->regs[9] : -1L;
-}
-
-static inline void syscall_rollback(struct task_struct *task,
- struct pt_regs *regs)
-{
- /*
- * XXX: This needs some thought. On SH we don't
- * save away the original R9 value anywhere.
- */
-}
-
-static inline long syscall_get_error(struct task_struct *task,
- struct pt_regs *regs)
-{
- return IS_ERR_VALUE(regs->regs[9]) ? regs->regs[9] : 0;
-}
-
-static inline long syscall_get_return_value(struct task_struct *task,
- struct pt_regs *regs)
-{
- return regs->regs[9];
-}
-
-static inline void syscall_set_return_value(struct task_struct *task,
- struct pt_regs *regs,
- int error, long val)
-{
- if (error)
- regs->regs[9] = -error;
- else
- regs->regs[9] = val;
-}
-
-static inline void syscall_get_arguments(struct task_struct *task,
- struct pt_regs *regs,
- unsigned long *args)
-{
- memcpy(args, &regs->regs[2], 6 * sizeof(args[0]));
-}
-
-static inline void syscall_set_arguments(struct task_struct *task,
- struct pt_regs *regs,
- const unsigned long *args)
-{
- memcpy(&regs->regs[2], args, 6 * sizeof(args[0]));
-}
-
-static inline int syscall_get_arch(struct task_struct *task)
-{
- int arch = AUDIT_ARCH_SH;
-
-#ifdef CONFIG_64BIT
- arch |= __AUDIT_ARCH_64BIT;
-#endif
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
- arch |= __AUDIT_ARCH_LE;
-#endif
-
- return arch;
-}
-#endif /* __ASM_SH_SYSCALL_64_H */
diff --git a/arch/sh/include/asm/syscalls.h b/arch/sh/include/asm/syscalls.h
index 995ef046232c..387105316d28 100644
--- a/arch/sh/include/asm/syscalls.h
+++ b/arch/sh/include/asm/syscalls.h
@@ -2,8 +2,6 @@
#ifndef __ASM_SH_SYSCALLS_H
#define __ASM_SH_SYSCALLS_H
-#ifdef __KERNEL__
-
asmlinkage int old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
int fd, unsigned long off);
@@ -11,11 +9,6 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
-#ifdef CONFIG_SUPERH32
-# include <asm/syscalls_32.h>
-#else
-# include <asm/syscalls_64.h>
-#endif
+#include <asm/syscalls_32.h>
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_SYSCALLS_H */
diff --git a/arch/sh/include/asm/syscalls_64.h b/arch/sh/include/asm/syscalls_64.h
deleted file mode 100644
index df42656cebea..000000000000
--- a/arch/sh/include/asm/syscalls_64.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_SYSCALLS_64_H
-#define __ASM_SH_SYSCALLS_64_H
-
-#ifdef __KERNEL__
-
-#include <linux/compiler.h>
-#include <linux/linkage.h>
-#include <linux/types.h>
-
-struct pt_regs;
-
-/* Misc syscall related bits */
-asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs);
-asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
-
-#endif /* __KERNEL__ */
-#endif /* __ASM_SH_SYSCALLS_64_H */
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index cf5c792bf70b..6404be69d5fa 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -70,9 +70,7 @@ register unsigned long current_stack_pointer asm("r15") __used;
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
-#if defined(CONFIG_SUPERH64)
- __asm__ __volatile__ ("getcon cr17, %0" : "=r" (ti));
-#elif defined(CONFIG_CPU_HAS_SR_RB)
+#if defined(CONFIG_CPU_HAS_SR_RB)
__asm__ __volatile__ ("stc r7_bank, %0" : "=r" (ti));
#else
unsigned long __dummy;
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index bc77f3dd4261..360f713d009b 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -2,10 +2,6 @@
#ifndef __ASM_SH_TLB_H
#define __ASM_SH_TLB_H
-#ifdef CONFIG_SUPERH64
-# include <asm/tlb_64.h>
-#endif
-
#ifndef __ASSEMBLY__
#include <linux/pagemap.h>
@@ -14,7 +10,7 @@
#include <asm-generic/tlb.h>
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
+#if defined(CONFIG_CPU_SH4)
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
extern void tlb_unwire_entry(void);
#else
diff --git a/arch/sh/include/asm/tlb_64.h b/arch/sh/include/asm/tlb_64.h
deleted file mode 100644
index 59fa0a23dad7..000000000000
--- a/arch/sh/include/asm/tlb_64.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * include/asm-sh/tlb_64.h
- *
- * Copyright (C) 2003 Paul Mundt
- */
-#ifndef __ASM_SH_TLB_64_H
-#define __ASM_SH_TLB_64_H
-
-/* ITLB defines */
-#define ITLB_FIXED 0x00000000 /* First fixed ITLB, see head.S */
-#define ITLB_LAST_VAR_UNRESTRICTED 0x000003F0 /* Last ITLB */
-
-/* DTLB defines */
-#define DTLB_FIXED 0x00800000 /* First fixed DTLB, see head.S */
-#define DTLB_LAST_VAR_UNRESTRICTED 0x008003F0 /* Last DTLB */
-
-#ifndef __ASSEMBLY__
-
-/**
- * for_each_dtlb_entry - Iterate over free (non-wired) DTLB entries
- *
- * @tlb: TLB entry
- */
-#define for_each_dtlb_entry(tlb) \
- for (tlb = cpu_data->dtlb.first; \
- tlb <= cpu_data->dtlb.last; \
- tlb += cpu_data->dtlb.step)
-
-/**
- * for_each_itlb_entry - Iterate over free (non-wired) ITLB entries
- *
- * @tlb: TLB entry
- */
-#define for_each_itlb_entry(tlb) \
- for (tlb = cpu_data->itlb.first; \
- tlb <= cpu_data->itlb.last; \
- tlb += cpu_data->itlb.step)
-
-/**
- * __flush_tlb_slot - Flushes TLB slot @slot.
- *
- * @slot: Address of TLB slot.
- */
-static inline void __flush_tlb_slot(unsigned long long slot)
-{
- __asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot));
-}
-
-#ifdef CONFIG_MMU
-/* arch/sh64/mm/tlb.c */
-int sh64_tlb_init(void);
-unsigned long long sh64_next_free_dtlb_entry(void);
-unsigned long long sh64_get_wired_dtlb_entry(void);
-int sh64_put_wired_dtlb_entry(unsigned long long entry);
-void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
- unsigned long asid, unsigned long paddr);
-void sh64_teardown_tlb_slot(unsigned long long config_addr);
-#else
-#define sh64_tlb_init() do { } while (0)
-#define sh64_next_free_dtlb_entry() (0)
-#define sh64_get_wired_dtlb_entry() (0)
-#define sh64_put_wired_dtlb_entry(entry) do { } while (0)
-#define sh64_setup_tlb_slot(conf, virt, asid, phys) do { } while (0)
-#define sh64_teardown_tlb_slot(addr) do { } while (0)
-#endif /* CONFIG_MMU */
-#endif /* __ASSEMBLY__ */
-#endif /* __ASM_SH_TLB_64_H */
diff --git a/arch/sh/include/asm/traps.h b/arch/sh/include/asm/traps.h
index 8844ed0c0fde..ba831bc7e08f 100644
--- a/arch/sh/include/asm/traps.h
+++ b/arch/sh/include/asm/traps.h
@@ -4,11 +4,7 @@
#include <linux/compiler.h>
-#ifdef CONFIG_SUPERH32
# include <asm/traps_32.h>
-#else
-# include <asm/traps_64.h>
-#endif
BUILD_TRAP_HANDLER(address_error);
BUILD_TRAP_HANDLER(debug);
diff --git a/arch/sh/include/asm/traps_64.h b/arch/sh/include/asm/traps_64.h
deleted file mode 100644
index f28db6dfbe45..000000000000
--- a/arch/sh/include/asm/traps_64.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#ifndef __ASM_SH_TRAPS_64_H
-#define __ASM_SH_TRAPS_64_H
-
-#include <cpu/registers.h>
-
-extern void phys_stext(void);
-
-#define lookup_exception_vector() \
-({ \
- unsigned long _vec; \
- \
- __asm__ __volatile__ ( \
- "getcon " __EXPEVT ", %0\n\t" \
- : "=r" (_vec) \
- ); \
- \
- _vec; \
-})
-
-static inline void trigger_address_error(void)
-{
- phys_stext();
-}
-
-#define BUILD_TRAP_HANDLER(name) \
-asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs)
-#define TRAP_HANDLER_DECL
-
-#endif /* __ASM_SH_TRAPS_64_H */
diff --git a/arch/sh/include/asm/types.h b/arch/sh/include/asm/types.h
index df96c511bb6e..68eb24ad2013 100644
--- a/arch/sh/include/asm/types.h
+++ b/arch/sh/include/asm/types.h
@@ -9,13 +9,8 @@
*/
#ifndef __ASSEMBLY__
-#ifdef CONFIG_SUPERH32
typedef u16 insn_size_t;
typedef u32 reg_size_t;
-#else
-typedef u32 insn_size_t;
-typedef u64 reg_size_t;
-#endif
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_TYPES_H */
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index 5fe751ad7582..73f3b48d4a34 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -96,11 +96,7 @@ struct __large_struct { unsigned long buf[100]; };
__pu_err; \
})
-#ifdef CONFIG_SUPERH32
# include <asm/uaccess_32.h>
-#else
-# include <asm/uaccess_64.h>
-#endif
extern long strncpy_from_user(char *dest, const char __user *src, long count);
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
deleted file mode 100644
index 0c19d02dc566..000000000000
--- a/arch/sh/include/asm/uaccess_64.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_UACCESS_64_H
-#define __ASM_SH_UACCESS_64_H
-
-/*
- * include/asm-sh/uaccess_64.h
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003, 2004 Paul Mundt
- *
- * User space memory access functions
- *
- * Copyright (C) 1999 Niibe Yutaka
- *
- * Based on:
- * MIPS implementation version 1.15 by
- * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
- * and i386 version.
- */
-
-#define __get_user_size(x,ptr,size,retval) \
-do { \
- retval = 0; \
- x = 0; \
- switch (size) { \
- case 1: \
- retval = __get_user_asm_b((void *)&x, \
- (long)ptr); \
- break; \
- case 2: \
- retval = __get_user_asm_w((void *)&x, \
- (long)ptr); \
- break; \
- case 4: \
- retval = __get_user_asm_l((void *)&x, \
- (long)ptr); \
- break; \
- case 8: \
- retval = __get_user_asm_q((void *)&x, \
- (long)ptr); \
- break; \
- default: \
- __get_user_unknown(); \
- break; \
- } \
-} while (0)
-
-extern long __get_user_asm_b(void *, long);
-extern long __get_user_asm_w(void *, long);
-extern long __get_user_asm_l(void *, long);
-extern long __get_user_asm_q(void *, long);
-extern void __get_user_unknown(void);
-
-#define __put_user_size(x,ptr,size,retval) \
-do { \
- retval = 0; \
- switch (size) { \
- case 1: \
- retval = __put_user_asm_b((void *)&x, \
- (__force long)ptr); \
- break; \
- case 2: \
- retval = __put_user_asm_w((void *)&x, \
- (__force long)ptr); \
- break; \
- case 4: \
- retval = __put_user_asm_l((void *)&x, \
- (__force long)ptr); \
- break; \
- case 8: \
- retval = __put_user_asm_q((void *)&x, \
- (__force long)ptr); \
- break; \
- default: \
- __put_user_unknown(); \
- } \
-} while (0)
-
-extern long __put_user_asm_b(void *, long);
-extern long __put_user_asm_w(void *, long);
-extern long __put_user_asm_l(void *, long);
-extern long __put_user_asm_q(void *, long);
-extern void __put_user_unknown(void);
-
-#endif /* __ASM_SH_UACCESS_64_H */
diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h
index 9c7d9d9999c6..d6e126250136 100644
--- a/arch/sh/include/asm/unistd.h
+++ b/arch/sh/include/asm/unistd.h
@@ -1,9 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-# ifdef CONFIG_SUPERH32
-# include <asm/unistd_32.h>
-# else
-# include <asm/unistd_64.h>
-# endif
+#include <asm/unistd_32.h>
#define NR_syscalls __NR_syscalls
diff --git a/arch/sh/include/asm/user.h b/arch/sh/include/asm/user.h
index e97f2efed527..7dfd3f6461e6 100644
--- a/arch/sh/include/asm/user.h
+++ b/arch/sh/include/asm/user.h
@@ -28,19 +28,12 @@
* to write an integer number of pages.
*/
-#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
-struct user_fpu_struct {
- unsigned long fp_regs[32];
- unsigned int fpscr;
-};
-#else
struct user_fpu_struct {
unsigned long fp_regs[16];
unsigned long xfp_regs[16];
unsigned long fpscr;
unsigned long fpul;
};
-#endif
struct user {
struct pt_regs regs; /* entire machine state */
diff --git a/arch/sh/include/asm/vermagic.h b/arch/sh/include/asm/vermagic.h
index 13d8eaa9188e..5b2057c39170 100644
--- a/arch/sh/include/asm/vermagic.h
+++ b/arch/sh/include/asm/vermagic.h
@@ -10,8 +10,6 @@
# define MODULE_PROC_FAMILY "SH3LE "
# elif defined CONFIG_CPU_SH4
# define MODULE_PROC_FAMILY "SH4LE "
-# elif defined CONFIG_CPU_SH5
-# define MODULE_PROC_FAMILY "SH5LE "
# else
# error unknown processor family
# endif
@@ -22,8 +20,6 @@
# define MODULE_PROC_FAMILY "SH3BE "
# elif defined CONFIG_CPU_SH4
# define MODULE_PROC_FAMILY "SH4BE "
-# elif defined CONFIG_CPU_SH5
-# define MODULE_PROC_FAMILY "SH5BE "
# else
# error unknown processor family
# endif
diff --git a/arch/sh/include/asm/vmlinux.lds.h b/arch/sh/include/asm/vmlinux.lds.h
index 992955685874..8d96c4f9b35b 100644
--- a/arch/sh/include/asm/vmlinux.lds.h
+++ b/arch/sh/include/asm/vmlinux.lds.h
@@ -15,12 +15,4 @@
#define DWARF_EH_FRAME
#endif
-#ifdef CONFIG_SUPERH64
-#define EXTRA_TEXT \
- *(.text64) \
- *(.text..SHmedia32)
-#else
-#define EXTRA_TEXT
-#endif
-
#endif /* __ASM_SH_VMLINUX_LDS_H */
diff --git a/arch/sh/include/cpu-sh5/cpu/addrspace.h b/arch/sh/include/cpu-sh5/cpu/addrspace.h
deleted file mode 100644
index 6dd1e72f31b2..000000000000
--- a/arch/sh/include/cpu-sh5/cpu/addrspace.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CPU_SH5_ADDRSPACE_H
-#define __ASM_SH_CPU_SH5_ADDRSPACE_H
-
-#define PHYS_PERIPHERAL_BLOCK 0x09000000
-#define PHYS_DMAC_BLOCK 0x0e000000
-#define PHYS_PCI_BLOCK 0x60000000
-#define PHYS_EMI_BLOCK 0xff000000
-
-/* No segmentation.. */
-
-#endif /* __ASM_SH_CPU_SH5_ADDRSPACE_H */
diff --git a/arch/sh/include/cpu-sh5/cpu/cache.h b/arch/sh/include/cpu-sh5/cpu/cache.h
deleted file mode 100644
index ef49538f386f..000000000000
--- a/arch/sh/include/cpu-sh5/cpu/cache.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CPU_SH5_CACHE_H
-#define __ASM_SH_CPU_SH5_CACHE_H
-
-/*
- * include/asm-sh/cpu-sh5/cache.h
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003, 2004 Paul Mundt
- */
-
-#define L1_CACHE_SHIFT 5
-
-/* Valid and Dirty bits */
-#define SH_CACHE_VALID (1LL<<0)
-#define SH_CACHE_UPDATED (1LL<<57)
-
-/* Unimplemented compat bits.. */
-#define SH_CACHE_COMBINED 0
-#define SH_CACHE_ASSOC 0
-
-/* Cache flags */
-#define SH_CACHE_MODE_WT (1LL<<0)
-#define SH_CACHE_MODE_WB (1LL<<1)
-
-/*
- * Control Registers.
- */
-#define ICCR_BASE 0x01600000 /* Instruction Cache Control Register */
-#define ICCR_REG0 0 /* Register 0 offset */
-#define ICCR_REG1 1 /* Register 1 offset */
-#define ICCR0 ICCR_BASE+ICCR_REG0
-#define ICCR1 ICCR_BASE+ICCR_REG1
-
-#define ICCR0_OFF 0x0 /* Set ICACHE off */
-#define ICCR0_ON 0x1 /* Set ICACHE on */
-#define ICCR0_ICI 0x2 /* Invalidate all in IC */
-
-#define ICCR1_NOLOCK 0x0 /* Set No Locking */
-
-#define OCCR_BASE 0x01E00000 /* Operand Cache Control Register */
-#define OCCR_REG0 0 /* Register 0 offset */
-#define OCCR_REG1 1 /* Register 1 offset */
-#define OCCR0 OCCR_BASE+OCCR_REG0
-#define OCCR1 OCCR_BASE+OCCR_REG1
-
-#define OCCR0_OFF 0x0 /* Set OCACHE off */
-#define OCCR0_ON 0x1 /* Set OCACHE on */
-#define OCCR0_OCI 0x2 /* Invalidate all in OC */
-#define OCCR0_WT 0x4 /* Set OCACHE in WT Mode */
-#define OCCR0_WB 0x0 /* Set OCACHE in WB Mode */
-
-#define OCCR1_NOLOCK 0x0 /* Set No Locking */
-
-/*
- * SH-5
- * A bit of description here, for neff=32.
- *
- * |<--- tag (19 bits) --->|
- * +-----------------------------+-----------------+------+----------+------+
- * | | | ways |set index |offset|
- * +-----------------------------+-----------------+------+----------+------+
- * ^ 2 bits 8 bits 5 bits
- * +- Bit 31
- *
- * Cacheline size is based on offset: 5 bits = 32 bytes per line
- * A cache line is identified by a tag + set but OCACHETAG/ICACHETAG
- * have a broader space for registers. These are outlined by
- * CACHE_?C_*_STEP below.
- *
- */
-
-/* Instruction cache */
-#define CACHE_IC_ADDRESS_ARRAY 0x01000000
-
-/* Operand Cache */
-#define CACHE_OC_ADDRESS_ARRAY 0x01800000
-
-/* These declarations relate to cache 'synonyms' in the operand cache. A
- 'synonym' occurs where effective address bits overlap between those used for
- indexing the cache sets and those passed to the MMU for translation. In the
- case of SH5-101 & SH5-103, only bit 12 is affected for 4k pages. */
-
-#define CACHE_OC_N_SYNBITS 1 /* Number of synonym bits */
-#define CACHE_OC_SYN_SHIFT 12
-/* Mask to select synonym bit(s) */
-#define CACHE_OC_SYN_MASK (((1UL<<CACHE_OC_N_SYNBITS)-1)<<CACHE_OC_SYN_SHIFT)
-
-/*
- * Instruction cache can't be invalidated based on physical addresses.
- * No Instruction Cache defines required, then.
- */
-
-#endif /* __ASM_SH_CPU_SH5_CACHE_H */
diff --git a/arch/sh/include/cpu-sh5/cpu/irq.h b/arch/sh/include/cpu-sh5/cpu/irq.h
deleted file mode 100644
index 4aa6ac54b9d6..000000000000
--- a/arch/sh/include/cpu-sh5/cpu/irq.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CPU_SH5_IRQ_H
-#define __ASM_SH_CPU_SH5_IRQ_H
-
-/*
- * include/asm-sh/cpu-sh5/irq.h
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- */
-
-
-/*
- * Encoded IRQs are not considered worth to be supported.
- * Main reason is that there's no per-encoded-interrupt
- * enable/disable mechanism (as there was in SH3/4).
- * An all enabled/all disabled is worth only if there's
- * a cascaded IC to disable/enable/ack on. Until such
- * IC is available there's no such support.
- *
- * Presumably Encoded IRQs may use extra IRQs beyond 64,
- * below. Some logic must be added to cope with IRQ_IRL?
- * in an exclusive way.
- *
- * Priorities are set at Platform level, when IRQ_IRL0-3
- * are set to 0 Encoding is allowed. Otherwise it's not
- * allowed.
- */
-
-/* Independent IRQs */
-#define IRQ_IRL0 0
-#define IRQ_IRL1 1
-#define IRQ_IRL2 2
-#define IRQ_IRL3 3
-
-#define IRQ_INTA 4
-#define IRQ_INTB 5
-#define IRQ_INTC 6
-#define IRQ_INTD 7
-
-#define IRQ_SERR 12
-#define IRQ_ERR 13
-#define IRQ_PWR3 14
-#define IRQ_PWR2 15
-#define IRQ_PWR1 16
-#define IRQ_PWR0 17
-
-#define IRQ_DMTE0 18
-#define IRQ_DMTE1 19
-#define IRQ_DMTE2 20
-#define IRQ_DMTE3 21
-#define IRQ_DAERR 22
-
-#define IRQ_TUNI0 32
-#define IRQ_TUNI1 33
-#define IRQ_TUNI2 34
-#define IRQ_TICPI2 35
-
-#define IRQ_ATI 36
-#define IRQ_PRI 37
-#define IRQ_CUI 38
-
-#define IRQ_ERI 39
-#define IRQ_RXI 40
-#define IRQ_BRI 41
-#define IRQ_TXI 42
-
-#define IRQ_ITI 63
-
-#define NR_INTC_IRQS 64
-
-#ifdef CONFIG_SH_CAYMAN
-#define NR_EXT_IRQS 32
-#define START_EXT_IRQS 64
-
-/* PCI bus 2 uses encoded external interrupts on the Cayman board */
-#define IRQ_P2INTA (START_EXT_IRQS + (3*8) + 0)
-#define IRQ_P2INTB (START_EXT_IRQS + (3*8) + 1)
-#define IRQ_P2INTC (START_EXT_IRQS + (3*8) + 2)
-#define IRQ_P2INTD (START_EXT_IRQS + (3*8) + 3)
-
-#define I8042_KBD_IRQ (START_EXT_IRQS + 2)
-#define I8042_AUX_IRQ (START_EXT_IRQS + 6)
-
-#define IRQ_CFCARD (START_EXT_IRQS + 7)
-#define IRQ_PCMCIA (0)
-
-#else
-#define NR_EXT_IRQS 0
-#endif
-
-/* Default IRQs, fixed */
-#define TIMER_IRQ IRQ_TUNI0
-#define RTC_IRQ IRQ_CUI
-
-/* Default Priorities, Platform may choose differently */
-#define NO_PRIORITY 0 /* Disabled */
-#define TIMER_PRIORITY 2
-#define RTC_PRIORITY TIMER_PRIORITY
-#define SCIF_PRIORITY 3
-#define INTD_PRIORITY 3
-#define IRL3_PRIORITY 4
-#define INTC_PRIORITY 6
-#define IRL2_PRIORITY 7
-#define INTB_PRIORITY 9
-#define IRL1_PRIORITY 10
-#define INTA_PRIORITY 12
-#define IRL0_PRIORITY 13
-#define TOP_PRIORITY 15
-
-extern int intc_evt_to_irq[(0xE20/0x20)+1];
-extern int platform_int_priority[NR_INTC_IRQS];
-
-#endif /* __ASM_SH_CPU_SH5_IRQ_H */
diff --git a/arch/sh/include/cpu-sh5/cpu/mmu_context.h b/arch/sh/include/cpu-sh5/cpu/mmu_context.h
deleted file mode 100644
index 23c53be945b7..000000000000
--- a/arch/sh/include/cpu-sh5/cpu/mmu_context.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CPU_SH5_MMU_CONTEXT_H
-#define __ASM_SH_CPU_SH5_MMU_CONTEXT_H
-
-/* Common defines */
-#define TLB_STEP 0x00000010
-#define TLB_PTEH 0x00000000
-#define TLB_PTEL 0x00000008
-
-/* PTEH defines */
-#define PTEH_ASID_SHIFT 2
-#define PTEH_VALID 0x0000000000000001
-#define PTEH_SHARED 0x0000000000000002
-#define PTEH_MATCH_ASID 0x00000000000003ff
-
-#ifndef __ASSEMBLY__
-/* This has to be a common function because the next location to fill
- * information is shared. */
-extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte);
-#endif /* __ASSEMBLY__ */
-
-#endif /* __ASM_SH_CPU_SH5_MMU_CONTEXT_H */
diff --git a/arch/sh/include/cpu-sh5/cpu/registers.h b/arch/sh/include/cpu-sh5/cpu/registers.h
deleted file mode 100644
index 372c1e1978b3..000000000000
--- a/arch/sh/include/cpu-sh5/cpu/registers.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CPU_SH5_REGISTERS_H
-#define __ASM_SH_CPU_SH5_REGISTERS_H
-
-/*
- * include/asm-sh/cpu-sh5/registers.h
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2004 Richard Curnow
- */
-
-#ifdef __ASSEMBLY__
-/* =====================================================================
-**
-** Section 1: acts on assembly sources pre-processed by GPP ( <source.S>).
-** Assigns symbolic names to control & target registers.
-*/
-
-/*
- * Define some useful aliases for control registers.
- */
-#define SR cr0
-#define SSR cr1
-#define PSSR cr2
- /* cr3 UNDEFINED */
-#define INTEVT cr4
-#define EXPEVT cr5
-#define PEXPEVT cr6
-#define TRA cr7
-#define SPC cr8
-#define PSPC cr9
-#define RESVEC cr10
-#define VBR cr11
- /* cr12 UNDEFINED */
-#define TEA cr13
- /* cr14-cr15 UNDEFINED */
-#define DCR cr16
-#define KCR0 cr17
-#define KCR1 cr18
- /* cr19-cr31 UNDEFINED */
- /* cr32-cr61 RESERVED */
-#define CTC cr62
-#define USR cr63
-
-/*
- * ABI dependent registers (general purpose set)
- */
-#define RET r2
-#define ARG1 r2
-#define ARG2 r3
-#define ARG3 r4
-#define ARG4 r5
-#define ARG5 r6
-#define ARG6 r7
-#define SP r15
-#define LINK r18
-#define ZERO r63
-
-/*
- * Status register defines: used only by assembly sources (and
- * syntax independednt)
- */
-#define SR_RESET_VAL 0x0000000050008000
-#define SR_HARMLESS 0x00000000500080f0 /* Write ignores for most */
-#define SR_ENABLE_FPU 0xffffffffffff7fff /* AND with this */
-
-#if defined (CONFIG_SH64_SR_WATCH)
-#define SR_ENABLE_MMU 0x0000000084000000 /* OR with this */
-#else
-#define SR_ENABLE_MMU 0x0000000080000000 /* OR with this */
-#endif
-
-#define SR_UNBLOCK_EXC 0xffffffffefffffff /* AND with this */
-#define SR_BLOCK_EXC 0x0000000010000000 /* OR with this */
-
-#else /* Not __ASSEMBLY__ syntax */
-
-/*
-** Stringify reg. name
-*/
-#define __str(x) #x
-
-/* Stringify control register names for use in inline assembly */
-#define __SR __str(SR)
-#define __SSR __str(SSR)
-#define __PSSR __str(PSSR)
-#define __INTEVT __str(INTEVT)
-#define __EXPEVT __str(EXPEVT)
-#define __PEXPEVT __str(PEXPEVT)
-#define __TRA __str(TRA)
-#define __SPC __str(SPC)
-#define __PSPC __str(PSPC)
-#define __RESVEC __str(RESVEC)
-#define __VBR __str(VBR)
-#define __TEA __str(TEA)
-#define __DCR __str(DCR)
-#define __KCR0 __str(KCR0)
-#define __KCR1 __str(KCR1)
-#define __CTC __str(CTC)
-#define __USR __str(USR)
-
-#endif /* __ASSEMBLY__ */
-#endif /* __ASM_SH_CPU_SH5_REGISTERS_H */
diff --git a/arch/sh/include/cpu-sh5/cpu/rtc.h b/arch/sh/include/cpu-sh5/cpu/rtc.h
deleted file mode 100644
index d7e25d435f4a..000000000000
--- a/arch/sh/include/cpu-sh5/cpu/rtc.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CPU_SH5_RTC_H
-#define __ASM_SH_CPU_SH5_RTC_H
-
-#define rtc_reg_size sizeof(u32)
-#define RTC_BIT_INVERTED 0 /* The SH-5 RTC is surprisingly sane! */
-#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR
-
-#endif /* __ASM_SH_CPU_SH5_RTC_H */
diff --git a/arch/sh/include/uapi/asm/posix_types.h b/arch/sh/include/uapi/asm/posix_types.h
index 2644fdd444e6..adc998a64c76 100644
--- a/arch/sh/include/uapi/asm/posix_types.h
+++ b/arch/sh/include/uapi/asm/posix_types.h
@@ -1,8 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __KERNEL__
-# ifdef __SH5__
-# include <asm/posix_types_64.h>
-# else
-# include <asm/posix_types_32.h>
-# endif
-#endif /* __KERNEL__ */
+#include <asm/posix_types_32.h>
diff --git a/arch/sh/include/uapi/asm/posix_types_64.h b/arch/sh/include/uapi/asm/posix_types_64.h
deleted file mode 100644
index 3a9128d4aee3..000000000000
--- a/arch/sh/include/uapi/asm/posix_types_64.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __ASM_SH_POSIX_TYPES_64_H
-#define __ASM_SH_POSIX_TYPES_64_H
-
-typedef unsigned short __kernel_mode_t;
-#define __kernel_mode_t __kernel_mode_t
-typedef unsigned short __kernel_ipc_pid_t;
-#define __kernel_ipc_pid_t __kernel_ipc_pid_t
-typedef unsigned short __kernel_uid_t;
-#define __kernel_uid_t __kernel_uid_t
-typedef unsigned short __kernel_gid_t;
-#define __kernel_gid_t __kernel_gid_t
-typedef long unsigned int __kernel_size_t;
-#define __kernel_size_t __kernel_size_t
-typedef int __kernel_ssize_t;
-#define __kernel_ssize_t __kernel_ssize_t
-typedef int __kernel_ptrdiff_t;
-#define __kernel_ptrdiff_t __kernel_ptrdiff_t
-
-typedef unsigned short __kernel_old_uid_t;
-#define __kernel_old_uid_t __kernel_old_uid_t
-typedef unsigned short __kernel_old_gid_t;
-#define __kernel_old_gid_t __kernel_old_gid_t
-typedef unsigned short __kernel_old_dev_t;
-#define __kernel_old_dev_t __kernel_old_dev_t
-
-#include <asm-generic/posix_types.h>
-
-#endif /* __ASM_SH_POSIX_TYPES_64_H */
diff --git a/arch/sh/include/uapi/asm/ptrace.h b/arch/sh/include/uapi/asm/ptrace.h
index 4ec9c2b65fdb..5c88e46b7773 100644
--- a/arch/sh/include/uapi/asm/ptrace.h
+++ b/arch/sh/include/uapi/asm/ptrace.h
@@ -25,11 +25,6 @@
#define PT_DATA_ADDR 248 /* &(struct user)->start_data */
#define PT_TEXT_LEN 252
-#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
-#include <asm/ptrace_64.h>
-#else
#include <asm/ptrace_32.h>
-#endif
-
#endif /* _UAPI__ASM_SH_PTRACE_H */
diff --git a/arch/sh/include/uapi/asm/ptrace_64.h b/arch/sh/include/uapi/asm/ptrace_64.h
deleted file mode 100644
index a6f84eba5277..000000000000
--- a/arch/sh/include/uapi/asm/ptrace_64.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _UAPI__ASM_SH_PTRACE_64_H
-#define _UAPI__ASM_SH_PTRACE_64_H
-
-struct pt_regs {
- unsigned long long pc;
- unsigned long long sr;
- long long syscall_nr;
- unsigned long long regs[63];
- unsigned long long tregs[8];
- unsigned long long pad[2];
-};
-
-
-#endif /* _UAPI__ASM_SH_PTRACE_64_H */
diff --git a/arch/sh/include/uapi/asm/sigcontext.h b/arch/sh/include/uapi/asm/sigcontext.h
index d2b7e4f033c0..a9cc8bad0f36 100644
--- a/arch/sh/include/uapi/asm/sigcontext.h
+++ b/arch/sh/include/uapi/asm/sigcontext.h
@@ -5,18 +5,6 @@
struct sigcontext {
unsigned long oldmask;
-#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
- /* CPU registers */
- unsigned long long sc_regs[63];
- unsigned long long sc_tregs[8];
- unsigned long long sc_pc;
- unsigned long long sc_sr;
-
- /* FPU registers */
- unsigned long long sc_fpregs[32];
- unsigned int sc_fpscr;
- unsigned int sc_fpvalid;
-#else
/* CPU registers */
unsigned long sc_regs[16];
unsigned long sc_pc;
@@ -32,7 +20,6 @@ struct sigcontext {
unsigned int sc_fpscr;
unsigned int sc_fpul;
unsigned int sc_ownedfp;
-#endif
};
#endif /* __ASM_SH_SIGCONTEXT_H */
diff --git a/arch/sh/include/uapi/asm/stat.h b/arch/sh/include/uapi/asm/stat.h
index 659b87c7c25a..b0ca755ea08d 100644
--- a/arch/sh/include/uapi/asm/stat.h
+++ b/arch/sh/include/uapi/asm/stat.h
@@ -16,66 +16,6 @@ struct __old_kernel_stat {
unsigned long st_ctime;
};
-#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
-struct stat {
- unsigned short st_dev;
- unsigned short __pad1;
- unsigned long st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
- unsigned short st_rdev;
- unsigned short __pad2;
- unsigned long st_size;
- unsigned long st_blksize;
- unsigned long st_blocks;
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- unsigned long __unused4;
- unsigned long __unused5;
-};
-
-/* This matches struct stat64 in glibc2.1, hence the absolutely
- * insane amounts of padding around dev_t's.
- */
-struct stat64 {
- unsigned short st_dev;
- unsigned char __pad0[10];
-
- unsigned long st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
-
- unsigned long st_uid;
- unsigned long st_gid;
-
- unsigned short st_rdev;
- unsigned char __pad3[10];
-
- long long st_size;
- unsigned long st_blksize;
-
- unsigned long st_blocks; /* Number 512-byte blocks allocated. */
- unsigned long __pad4; /* future possible st_blocks high bits */
-
- unsigned long st_atime;
- unsigned long st_atime_nsec;
-
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
-
- unsigned long st_ctime;
- unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */
-
- unsigned long __unused1;
- unsigned long __unused2;
-};
-#else
struct stat {
unsigned long st_dev;
unsigned long st_ino;
@@ -134,6 +74,5 @@ struct stat64 {
};
#define STAT_HAVE_NSEC 1
-#endif
#endif /* __ASM_SH_STAT_H */
diff --git a/arch/sh/include/uapi/asm/swab.h b/arch/sh/include/uapi/asm/swab.h
index f0b02152745c..c727d381a30a 100644
--- a/arch/sh/include/uapi/asm/swab.h
+++ b/arch/sh/include/uapi/asm/swab.h
@@ -13,14 +13,9 @@
static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
__asm__(
-#ifdef __SH5__
- "byterev %1, %0\n\t"
- "shari %0, 32, %0"
-#else
"swap.b %1, %0\n\t"
"swap.w %0, %0\n\t"
"swap.b %0, %0"
-#endif
: "=r" (x)
: "r" (x));
@@ -31,12 +26,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
{
__asm__(
-#ifdef __SH5__
- "byterev %1, %0\n\t"
- "shari %0, 32, %0"
-#else
"swap.b %1, %0"
-#endif
: "=r" (x)
: "r" (x));
diff --git a/arch/sh/include/uapi/asm/unistd.h b/arch/sh/include/uapi/asm/unistd.h
index 9e0b4e5e6da2..0f7c7772a2fb 100644
--- a/arch/sh/include/uapi/asm/unistd.h
+++ b/arch/sh/include/uapi/asm/unistd.h
@@ -1,8 +1,2 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __KERNEL__
-# ifdef __SH5__
-# include <asm/unistd_64.h>
-# else
-# include <asm/unistd_32.h>
-# endif
-#endif
+#include <asm/unistd_32.h>
diff --git a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h
deleted file mode 100644
index 75da54851f02..000000000000
--- a/arch/sh/include/uapi/asm/unistd_64.h
+++ /dev/null
@@ -1,423 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __ASM_SH_UNISTD_64_H
-#define __ASM_SH_UNISTD_64_H
-
-/*
- * include/asm-sh/unistd_64.h
- *
- * This file contains the system call numbers.
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2007 Paul Mundt
- * Copyright (C) 2004 Sean McGoogan
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_waitpid 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_lchown 16
- /* 17 was sys_break */
-#define __NR_oldstat 18
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_oldfstat 28
-#define __NR_pause 29
-#define __NR_utime 30
- /* 31 was sys_stty */
- /* 32 was sys_gtty */
-#define __NR_access 33
-#define __NR_nice 34
- /* 35 was sys_ftime */
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
- /* 44 was sys_prof */
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_signal 48
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_umount2 52
- /* 53 was sys_lock */
-#define __NR_ioctl 54
-#define __NR_fcntl 55
- /* 56 was sys_mpx */
-#define __NR_setpgid 57
- /* 58 was sys_ulimit */
- /* 59 was sys_olduname */
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_sigaction 67
-#define __NR_sgetmask 68
-#define __NR_ssetmask 69
-#define __NR_setreuid 70
-#define __NR_setregid 71
-#define __NR_sigsuspend 72
-#define __NR_sigpending 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
- /* 82 was sys_select */
-#define __NR_symlink 83
-#define __NR_oldlstat 84
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-#define __NR_readdir 89
-#define __NR_mmap 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
- /* 98 was sys_profil */
-#define __NR_statfs 99
-#define __NR_fstatfs 100
- /* 101 was sys_ioperm */
-#define __NR_socketcall 102 /* old implementation of socket systemcall */
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-#define __NR_olduname 109
- /* 110 was sys_iopl */
-#define __NR_vhangup 111
- /* 112 was sys_idle */
- /* 113 was sys_vm86old */
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-#define __NR_sigreturn 119
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-#define __NR_cacheflush 123
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-#define __NR_sigprocmask 126
- /* 127 was sys_create_module */
-#define __NR_init_module 128
-#define __NR_delete_module 129
- /* 130 was sys_get_kernel_syms */
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
- /* 137 was sys_afs_syscall */
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR__llseek 140
-#define __NR_getdents 141
-#define __NR__newselect 142
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_setresuid 164
-#define __NR_getresuid 165
- /* 166 was sys_vm86 */
- /* 167 was sys_query_module */
-#define __NR_poll 168
-#define __NR_nfsservctl 169
-#define __NR_setresgid 170
-#define __NR_getresgid 171
-#define __NR_prctl 172
-#define __NR_rt_sigreturn 173
-#define __NR_rt_sigaction 174
-#define __NR_rt_sigprocmask 175
-#define __NR_rt_sigpending 176
-#define __NR_rt_sigtimedwait 177
-#define __NR_rt_sigqueueinfo 178
-#define __NR_rt_sigsuspend 179
-#define __NR_pread64 180
-#define __NR_pwrite64 181
-#define __NR_chown 182
-#define __NR_getcwd 183
-#define __NR_capget 184
-#define __NR_capset 185
-#define __NR_sigaltstack 186
-#define __NR_sendfile 187
- /* 188 reserved for getpmsg */
- /* 189 reserved for putpmsg */
-#define __NR_vfork 190
-#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#define __NR_lchown32 198
-#define __NR_getuid32 199
-#define __NR_getgid32 200
-#define __NR_geteuid32 201
-#define __NR_getegid32 202
-#define __NR_setreuid32 203
-#define __NR_setregid32 204
-#define __NR_getgroups32 205
-#define __NR_setgroups32 206
-#define __NR_fchown32 207
-#define __NR_setresuid32 208
-#define __NR_getresuid32 209
-#define __NR_setresgid32 210
-#define __NR_getresgid32 211
-#define __NR_chown32 212
-#define __NR_setuid32 213
-#define __NR_setgid32 214
-#define __NR_setfsuid32 215
-#define __NR_setfsgid32 216
-#define __NR_pivot_root 217
-#define __NR_mincore 218
-#define __NR_madvise 219
-
-/* Non-multiplexed socket family */
-#define __NR_socket 220
-#define __NR_bind 221
-#define __NR_connect 222
-#define __NR_listen 223
-#define __NR_accept 224
-#define __NR_getsockname 225
-#define __NR_getpeername 226
-#define __NR_socketpair 227
-#define __NR_send 228
-#define __NR_sendto 229
-#define __NR_recv 230
-#define __NR_recvfrom 231
-#define __NR_shutdown 232
-#define __NR_setsockopt 233
-#define __NR_getsockopt 234
-#define __NR_sendmsg 235
-#define __NR_recvmsg 236
-
-/* Non-multiplexed IPC family */
-#define __NR_semop 237
-#define __NR_semget 238
-#define __NR_semctl 239
-#define __NR_msgsnd 240
-#define __NR_msgrcv 241
-#define __NR_msgget 242
-#define __NR_msgctl 243
-#define __NR_shmat 244
-#define __NR_shmdt 245
-#define __NR_shmget 246
-#define __NR_shmctl 247
-
-#define __NR_getdents64 248
-#define __NR_fcntl64 249
- /* 250 is reserved for tux */
- /* 251 is unused */
-#define __NR_gettid 252
-#define __NR_readahead 253
-#define __NR_setxattr 254
-#define __NR_lsetxattr 255
-#define __NR_fsetxattr 256
-#define __NR_getxattr 257
-#define __NR_lgetxattr 258
-#define __NR_fgetxattr 259
-#define __NR_listxattr 260
-#define __NR_llistxattr 261
-#define __NR_flistxattr 262
-#define __NR_removexattr 263
-#define __NR_lremovexattr 264
-#define __NR_fremovexattr 265
-#define __NR_tkill 266
-#define __NR_sendfile64 267
-#define __NR_futex 268
-#define __NR_sched_setaffinity 269
-#define __NR_sched_getaffinity 270
- /* 271 is reserved for set_thread_area */
- /* 272 is reserved for get_thread_area */
-#define __NR_io_setup 273
-#define __NR_io_destroy 274
-#define __NR_io_getevents 275
-#define __NR_io_submit 276
-#define __NR_io_cancel 277
-#define __NR_fadvise64 278
- /* 279 is unused */
-#define __NR_exit_group 280
-
-#define __NR_lookup_dcookie 281
-#define __NR_epoll_create 282
-#define __NR_epoll_ctl 283
-#define __NR_epoll_wait 284
-#define __NR_remap_file_pages 285
-#define __NR_set_tid_address 286
-#define __NR_timer_create 287
-#define __NR_timer_settime (__NR_timer_create+1)
-#define __NR_timer_gettime (__NR_timer_create+2)
-#define __NR_timer_getoverrun (__NR_timer_create+3)
-#define __NR_timer_delete (__NR_timer_create+4)
-#define __NR_clock_settime (__NR_timer_create+5)
-#define __NR_clock_gettime (__NR_timer_create+6)
-#define __NR_clock_getres (__NR_timer_create+7)
-#define __NR_clock_nanosleep (__NR_timer_create+8)
-#define __NR_statfs64 296
-#define __NR_fstatfs64 297
-#define __NR_tgkill 298
-#define __NR_utimes 299
-#define __NR_fadvise64_64 300
- /* 301 is reserved for vserver */
- /* 302 is reserved for mbind */
- /* 303 is reserved for get_mempolicy */
- /* 304 is reserved for set_mempolicy */
-#define __NR_mq_open 305
-#define __NR_mq_unlink (__NR_mq_open+1)
-#define __NR_mq_timedsend (__NR_mq_open+2)
-#define __NR_mq_timedreceive (__NR_mq_open+3)
-#define __NR_mq_notify (__NR_mq_open+4)
-#define __NR_mq_getsetattr (__NR_mq_open+5)
- /* 311 is reserved for kexec */
-#define __NR_waitid 312
-#define __NR_add_key 313
-#define __NR_request_key 314
-#define __NR_keyctl 315
-#define __NR_ioprio_set 316
-#define __NR_ioprio_get 317
-#define __NR_inotify_init 318
-#define __NR_inotify_add_watch 319
-#define __NR_inotify_rm_watch 320
- /* 321 is unused */
-#define __NR_migrate_pages 322
-#define __NR_openat 323
-#define __NR_mkdirat 324
-#define __NR_mknodat 325
-#define __NR_fchownat 326
-#define __NR_futimesat 327
-#define __NR_fstatat64 328
-#define __NR_unlinkat 329
-#define __NR_renameat 330
-#define __NR_linkat 331
-#define __NR_symlinkat 332
-#define __NR_readlinkat 333
-#define __NR_fchmodat 334
-#define __NR_faccessat 335
-#define __NR_pselect6 336
-#define __NR_ppoll 337
-#define __NR_unshare 338
-#define __NR_set_robust_list 339
-#define __NR_get_robust_list 340
-#define __NR_splice 341
-#define __NR_sync_file_range 342
-#define __NR_tee 343
-#define __NR_vmsplice 344
-#define __NR_move_pages 345
-#define __NR_getcpu 346
-#define __NR_epoll_pwait 347
-#define __NR_utimensat 348
-#define __NR_signalfd 349
-#define __NR_timerfd_create 350
-#define __NR_eventfd 351
-#define __NR_fallocate 352
-#define __NR_timerfd_settime 353
-#define __NR_timerfd_gettime 354
-#define __NR_signalfd4 355
-#define __NR_eventfd2 356
-#define __NR_epoll_create1 357
-#define __NR_dup3 358
-#define __NR_pipe2 359
-#define __NR_inotify_init1 360
-#define __NR_preadv 361
-#define __NR_pwritev 362
-#define __NR_rt_tgsigqueueinfo 363
-#define __NR_perf_event_open 364
-#define __NR_recvmmsg 365
-#define __NR_accept4 366
-#define __NR_fanotify_init 367
-#define __NR_fanotify_mark 368
-#define __NR_prlimit64 369
-#define __NR_name_to_handle_at 370
-#define __NR_open_by_handle_at 371
-#define __NR_clock_adjtime 372
-#define __NR_syncfs 373
-#define __NR_sendmmsg 374
-#define __NR_setns 375
-#define __NR_process_vm_readv 376
-#define __NR_process_vm_writev 377
-#define __NR_kcmp 378
-#define __NR_finit_module 379
-#define __NR_sched_getattr 380
-#define __NR_sched_setattr 381
-#define __NR_renameat2 382
-#define __NR_seccomp 383
-#define __NR_getrandom 384
-#define __NR_memfd_create 385
-#define __NR_bpf 386
-#define __NR_execveat 387
-#define __NR_userfaultfd 388
-#define __NR_membarrier 389
-#define __NR_mlock2 390
-#define __NR_copy_file_range 391
-#define __NR_preadv2 392
-#define __NR_pwritev2 393
-
-#ifdef __KERNEL__
-#define __NR_syscalls 394
-#endif
-
-#endif /* __ASM_SH_UNISTD_64_H */
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 59673f8a3379..b0f5574b6228 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -3,7 +3,7 @@
# Makefile for the Linux/SuperH kernel.
#
-extra-y := head_$(BITS).o vmlinux.lds
+extra-y := head_32.o vmlinux.lds
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
@@ -13,26 +13,26 @@ endif
CFLAGS_REMOVE_return_address.o = -pg
obj-y := debugtraps.o dumpstack.o \
- idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \
+ idle.o io.o irq.o irq_32.o kdebugfs.o \
machvec.o nmi_debug.o process.o \
- process_$(BITS).o ptrace.o ptrace_$(BITS).o \
+ process_32.o ptrace.o ptrace_32.o \
reboot.o return_address.o \
- setup.o signal_$(BITS).o sys_sh.o \
- syscalls_$(BITS).o time.o topology.o traps.o \
- traps_$(BITS).o unwinder.o
+ setup.o signal_32.o sys_sh.o \
+ syscalls_32.o time.o topology.o traps.o \
+ traps_32.o unwinder.o
ifndef CONFIG_GENERIC_IOMAP
obj-y += iomap.o
obj-$(CONFIG_HAS_IOPORT_MAP) += ioport.o
endif
-obj-$(CONFIG_SUPERH32) += sys_sh32.o
+obj-y += sys_sh32.o
obj-y += cpu/
obj-$(CONFIG_VSYSCALL) += vsyscall/
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
obj-$(CONFIG_KGDB) += kgdb.o
-obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
+obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index f7c22ea98b0f..46118236bf04 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_CPU_SH2) = sh2/
obj-$(CONFIG_CPU_SH2A) = sh2a/
obj-$(CONFIG_CPU_SH3) = sh3/
obj-$(CONFIG_CPU_SH4) = sh4/
-obj-$(CONFIG_CPU_SH5) = sh5/
# Special cases for family ancestry.
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index ce7291e12a30..1d008745877f 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -103,7 +103,7 @@ void __attribute__ ((weak)) l2_cache_init(void)
/*
* Generic first-level cache init
*/
-#if defined(CONFIG_SUPERH32) && !defined(CONFIG_CPU_J2)
+#if !defined(CONFIG_CPU_J2)
static void cache_init(void)
{
unsigned long ccr, flags;
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index 8b91cb96411b..e4578cde46ba 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -2,6 +2,5 @@
#
# Makefile for the Linux/SuperH CPU-specific IRQ handlers.
#
-obj-$(CONFIG_SUPERH32) += imask.o
-obj-$(CONFIG_CPU_SH5) += intc-sh5.o
+obj-y += imask.o
obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
deleted file mode 100644
index 1b3050facda8..000000000000
--- a/arch/sh/kernel/cpu/irq/intc-sh5.c
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/irq/intc-sh5.c
- *
- * Interrupt Controller support for SH5 INTC.
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Paul Mundt
- *
- * Per-interrupt selective. IRLM=0 (Fixed priority) is not
- * supported being useless without a cascaded interrupt
- * controller.
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <cpu/irq.h>
-#include <asm/page.h>
-
-/*
- * Maybe the generic Peripheral block could move to a more
- * generic include file. INTC Block will be defined here
- * and only here to make INTC self-contained in a single
- * file.
- */
-#define INTC_BLOCK_OFFSET 0x01000000
-
-/* Base */
-#define INTC_BASE PHYS_PERIPHERAL_BLOCK + \
- INTC_BLOCK_OFFSET
-
-/* Address */
-#define INTC_ICR_SET (intc_virt + 0x0)
-#define INTC_ICR_CLEAR (intc_virt + 0x8)
-#define INTC_INTPRI_0 (intc_virt + 0x10)
-#define INTC_INTSRC_0 (intc_virt + 0x50)
-#define INTC_INTSRC_1 (intc_virt + 0x58)
-#define INTC_INTREQ_0 (intc_virt + 0x60)
-#define INTC_INTREQ_1 (intc_virt + 0x68)
-#define INTC_INTENB_0 (intc_virt + 0x70)
-#define INTC_INTENB_1 (intc_virt + 0x78)
-#define INTC_INTDSB_0 (intc_virt + 0x80)
-#define INTC_INTDSB_1 (intc_virt + 0x88)
-
-#define INTC_ICR_IRLM 0x1
-#define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */
-#define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */
-
-
-/*
- * Mapper between the vector ordinal and the IRQ number
- * passed to kernel/device drivers.
- */
-int intc_evt_to_irq[(0xE20/0x20)+1] = {
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */
- 0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */
- 2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */
- 32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */
- -1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */
- -1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */
- 39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */
- 4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */
- 12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */
- -1, -1 /* 0xE00 - 0xE20 */
-};
-
-static unsigned long intc_virt;
-static int irlm; /* IRL mode */
-
-static void enable_intc_irq(struct irq_data *data)
-{
- unsigned int irq = data->irq;
- unsigned long reg;
- unsigned long bitmask;
-
- if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
- printk("Trying to use straight IRL0-3 with an encoding platform.\n");
-
- if (irq < 32) {
- reg = INTC_INTENB_0;
- bitmask = 1 << irq;
- } else {
- reg = INTC_INTENB_1;
- bitmask = 1 << (irq - 32);
- }
-
- __raw_writel(bitmask, reg);
-}
-
-static void disable_intc_irq(struct irq_data *data)
-{
- unsigned int irq = data->irq;
- unsigned long reg;
- unsigned long bitmask;
-
- if (irq < 32) {
- reg = INTC_INTDSB_0;
- bitmask = 1 << irq;
- } else {
- reg = INTC_INTDSB_1;
- bitmask = 1 << (irq - 32);
- }
-
- __raw_writel(bitmask, reg);
-}
-
-static struct irq_chip intc_irq_type = {
- .name = "INTC",
- .irq_enable = enable_intc_irq,
- .irq_disable = disable_intc_irq,
-};
-
-void __init plat_irq_setup(void)
-{
- unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
- unsigned long reg;
- int i;
-
- intc_virt = (unsigned long)ioremap(INTC_BASE, 1024);
- if (!intc_virt) {
- panic("Unable to remap INTC\n");
- }
-
-
- /* Set default: per-line enable/disable, priority driven ack/eoi */
- for (i = 0; i < NR_INTC_IRQS; i++)
- irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq);
-
-
- /* Disable all interrupts and set all priorities to 0 to avoid trouble */
- __raw_writel(-1, INTC_INTDSB_0);
- __raw_writel(-1, INTC_INTDSB_1);
-
- for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
- __raw_writel( NO_PRIORITY, reg);
-
-
-#ifdef CONFIG_SH_CAYMAN
- {
- unsigned long data;
-
- /* Set IRLM */
- /* If all the priorities are set to 'no priority', then
- * assume we are using encoded mode.
- */
- irlm = platform_int_priority[IRQ_IRL0] +
- platform_int_priority[IRQ_IRL1] +
- platform_int_priority[IRQ_IRL2] +
- platform_int_priority[IRQ_IRL3];
- if (irlm == NO_PRIORITY) {
- /* IRLM = 0 */
- reg = INTC_ICR_CLEAR;
- i = IRQ_INTA;
- printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
- } else {
- /* IRLM = 1 */
- reg = INTC_ICR_SET;
- i = IRQ_IRL0;
- }
- __raw_writel(INTC_ICR_IRLM, reg);
-
- /* Set interrupt priorities according to platform description */
- for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
- data |= platform_int_priority[i] <<
- ((i % INTC_INTPRI_PPREG) * 4);
- if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
- /* Upon the 7th, set Priority Register */
- __raw_writel(data, reg);
- data = 0;
- reg += 8;
- }
- }
- }
-#endif
-
- /*
- * And now let interrupts come in.
- * sti() is not enough, we need to
- * lower priority, too.
- */
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "and %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy0)
- : "r" (__dummy1));
-}
diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c
index 85961b4f9c69..a306bcd6b341 100644
--- a/arch/sh/kernel/cpu/proc.c
+++ b/arch/sh/kernel/cpu/proc.c
@@ -24,7 +24,6 @@ static const char *cpu_name[] = {
[CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
[CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757",
[CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
- [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
[CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
[CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
[CPU_SH7372] = "SH7372", [CPU_SH7734] = "SH7734",
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 4b0db8259e3d..74620f30b19b 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -391,6 +391,7 @@ static struct platform_device *sh7786_early_devices[] __initdata = {
&tmu0_device,
&tmu1_device,
&tmu2_device,
+ &tmu3_device,
};
static struct platform_device *sh7786_devices[] __initdata = {
diff --git a/arch/sh/kernel/cpu/sh5/Makefile b/arch/sh/kernel/cpu/sh5/Makefile
deleted file mode 100644
index 97d23ec3005f..000000000000
--- a/arch/sh/kernel/cpu/sh5/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the Linux/SuperH SH-5 backends.
-#
-obj-y := entry.o probe.o switchto.o
-
-obj-$(CONFIG_SH_FPU) += fpu.o
-obj-$(CONFIG_KALLSYMS) += unwind.o
-
-# CPU subtype setup
-obj-$(CONFIG_CPU_SH5) += setup-sh5.o
-
-# Primary on-chip clocks (common)
-clock-$(CONFIG_CPU_SH5) := clock-sh5.o
-
-obj-y += $(clock-y)
diff --git a/arch/sh/kernel/cpu/sh5/clock-sh5.c b/arch/sh/kernel/cpu/sh5/clock-sh5.c
deleted file mode 100644
index dee6be2c2344..000000000000
--- a/arch/sh/kernel/cpu/sh5/clock-sh5.c
+++ /dev/null
@@ -1,76 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/sh5/clock-sh5.c
- *
- * SH-5 support for the clock framework
- *
- * Copyright (C) 2008 Paul Mundt
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <asm/clock.h>
-#include <asm/io.h>
-
-static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
-
-/* Clock, Power and Reset Controller */
-#define CPRC_BLOCK_OFF 0x01010000
-#define CPRC_BASE (PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF)
-
-static unsigned long cprc_base;
-
-static void master_clk_init(struct clk *clk)
-{
- int idx = (__raw_readl(cprc_base + 0x00) >> 6) & 0x0007;
- clk->rate *= ifc_table[idx];
-}
-
-static struct sh_clk_ops sh5_master_clk_ops = {
- .init = master_clk_init,
-};
-
-static unsigned long module_clk_recalc(struct clk *clk)
-{
- int idx = (__raw_readw(cprc_base) >> 12) & 0x0007;
- return clk->parent->rate / ifc_table[idx];
-}
-
-static struct sh_clk_ops sh5_module_clk_ops = {
- .recalc = module_clk_recalc,
-};
-
-static unsigned long bus_clk_recalc(struct clk *clk)
-{
- int idx = (__raw_readw(cprc_base) >> 3) & 0x0007;
- return clk->parent->rate / ifc_table[idx];
-}
-
-static struct sh_clk_ops sh5_bus_clk_ops = {
- .recalc = bus_clk_recalc,
-};
-
-static unsigned long cpu_clk_recalc(struct clk *clk)
-{
- int idx = (__raw_readw(cprc_base) & 0x0007);
- return clk->parent->rate / ifc_table[idx];
-}
-
-static struct sh_clk_ops sh5_cpu_clk_ops = {
- .recalc = cpu_clk_recalc,
-};
-
-static struct sh_clk_ops *sh5_clk_ops[] = {
- &sh5_master_clk_ops,
- &sh5_module_clk_ops,
- &sh5_bus_clk_ops,
- &sh5_cpu_clk_ops,
-};
-
-void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
-{
- cprc_base = (unsigned long)ioremap(CPRC_BASE, 1024);
- BUG_ON(!cprc_base);
-
- if (idx < ARRAY_SIZE(sh5_clk_ops))
- *ops = sh5_clk_ops[idx];
-}
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
deleted file mode 100644
index 81c8b64b977f..000000000000
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ /dev/null
@@ -1,2000 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * arch/sh/kernel/cpu/sh5/entry.S
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2004 - 2008 Paul Mundt
- * Copyright (C) 2003, 2004 Richard Curnow
- */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/sys.h>
-#include <cpu/registers.h>
-#include <asm/processor.h>
-#include <asm/unistd.h>
-#include <asm/thread_info.h>
-#include <asm/asm-offsets.h>
-
-/*
- * SR fields.
- */
-#define SR_ASID_MASK 0x00ff0000
-#define SR_FD_MASK 0x00008000
-#define SR_SS 0x08000000
-#define SR_BL 0x10000000
-#define SR_MD 0x40000000
-
-/*
- * Event code.
- */
-#define EVENT_INTERRUPT 0
-#define EVENT_FAULT_TLB 1
-#define EVENT_FAULT_NOT_TLB 2
-#define EVENT_DEBUG 3
-
-/* EXPEVT values */
-#define RESET_CAUSE 0x20
-#define DEBUGSS_CAUSE 0x980
-
-/*
- * Frame layout. Quad index.
- */
-#define FRAME_T(x) FRAME_TBASE+(x*8)
-#define FRAME_R(x) FRAME_RBASE+(x*8)
-#define FRAME_S(x) FRAME_SBASE+(x*8)
-#define FSPC 0
-#define FSSR 1
-#define FSYSCALL_ID 2
-
-/* Arrange the save frame to be a multiple of 32 bytes long */
-#define FRAME_SBASE 0
-#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
-#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
-#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
-#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
-
-#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
-#define FP_FRAME_BASE 0
-
-#define SAVED_R2 0*8
-#define SAVED_R3 1*8
-#define SAVED_R4 2*8
-#define SAVED_R5 3*8
-#define SAVED_R18 4*8
-#define SAVED_R6 5*8
-#define SAVED_TR0 6*8
-
-/* These are the registers saved in the TLB path that aren't saved in the first
- level of the normal one. */
-#define TLB_SAVED_R25 7*8
-#define TLB_SAVED_TR1 8*8
-#define TLB_SAVED_TR2 9*8
-#define TLB_SAVED_TR3 10*8
-#define TLB_SAVED_TR4 11*8
-/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
- breakage otherwise. */
-#define TLB_SAVED_R0 12*8
-#define TLB_SAVED_R1 13*8
-
-#define CLI() \
- getcon SR, r6; \
- ori r6, 0xf0, r6; \
- putcon r6, SR;
-
-#define STI() \
- getcon SR, r6; \
- andi r6, ~0xf0, r6; \
- putcon r6, SR;
-
-#ifdef CONFIG_PREEMPTION
-# define preempt_stop() CLI()
-#else
-# define preempt_stop()
-# define resume_kernel restore_all
-#endif
-
- .section .data, "aw"
-
-#define FAST_TLBMISS_STACK_CACHELINES 4
-#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
-
-/* Register back-up area for all exceptions */
- .balign 32
- /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
- * register saves etc. */
- .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
-/* This is 32 byte aligned by construction */
-/* Register back-up area for all exceptions */
-reg_save_area:
- .quad 0
- .quad 0
- .quad 0
- .quad 0
-
- .quad 0
- .quad 0
- .quad 0
- .quad 0
-
- .quad 0
- .quad 0
- .quad 0
- .quad 0
-
- .quad 0
- .quad 0
-
-/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
- * reentrancy. Note this area may be accessed via physical address.
- * Align so this fits a whole single cache line, for ease of purging.
- */
- .balign 32,0,32
-resvec_save_area:
- .quad 0
- .quad 0
- .quad 0
- .quad 0
- .quad 0
- .balign 32,0,32
-
-/* Jump table of 3rd level handlers */
-trap_jtable:
- .long do_exception_error /* 0x000 */
- .long do_exception_error /* 0x020 */
-#ifdef CONFIG_MMU
- .long tlb_miss_load /* 0x040 */
- .long tlb_miss_store /* 0x060 */
-#else
- .long do_exception_error
- .long do_exception_error
-#endif
- ! ARTIFICIAL pseudo-EXPEVT setting
- .long do_debug_interrupt /* 0x080 */
-#ifdef CONFIG_MMU
- .long tlb_miss_load /* 0x0A0 */
- .long tlb_miss_store /* 0x0C0 */
-#else
- .long do_exception_error
- .long do_exception_error
-#endif
- .long do_address_error_load /* 0x0E0 */
- .long do_address_error_store /* 0x100 */
-#ifdef CONFIG_SH_FPU
- .long do_fpu_error /* 0x120 */
-#else
- .long do_exception_error /* 0x120 */
-#endif
- .long do_exception_error /* 0x140 */
- .long system_call /* 0x160 */
- .long do_reserved_inst /* 0x180 */
- .long do_illegal_slot_inst /* 0x1A0 */
- .long do_exception_error /* 0x1C0 - NMI */
- .long do_exception_error /* 0x1E0 */
- .rept 15
- .long do_IRQ /* 0x200 - 0x3C0 */
- .endr
- .long do_exception_error /* 0x3E0 */
- .rept 32
- .long do_IRQ /* 0x400 - 0x7E0 */
- .endr
- .long fpu_error_or_IRQA /* 0x800 */
- .long fpu_error_or_IRQB /* 0x820 */
- .long do_IRQ /* 0x840 */
- .long do_IRQ /* 0x860 */
- .rept 6
- .long do_exception_error /* 0x880 - 0x920 */
- .endr
- .long breakpoint_trap_handler /* 0x940 */
- .long do_exception_error /* 0x960 */
- .long do_single_step /* 0x980 */
-
- .rept 3
- .long do_exception_error /* 0x9A0 - 0x9E0 */
- .endr
- .long do_IRQ /* 0xA00 */
- .long do_IRQ /* 0xA20 */
-#ifdef CONFIG_MMU
- .long itlb_miss_or_IRQ /* 0xA40 */
-#else
- .long do_IRQ
-#endif
- .long do_IRQ /* 0xA60 */
- .long do_IRQ /* 0xA80 */
-#ifdef CONFIG_MMU
- .long itlb_miss_or_IRQ /* 0xAA0 */
-#else
- .long do_IRQ
-#endif
- .long do_exception_error /* 0xAC0 */
- .long do_address_error_exec /* 0xAE0 */
- .rept 8
- .long do_exception_error /* 0xB00 - 0xBE0 */
- .endr
- .rept 18
- .long do_IRQ /* 0xC00 - 0xE20 */
- .endr
-
- .section .text64, "ax"
-
-/*
- * --- Exception/Interrupt/Event Handling Section
- */
-
-/*
- * VBR and RESVEC blocks.
- *
- * First level handler for VBR-based exceptions.
- *
- * To avoid waste of space, align to the maximum text block size.
- * This is assumed to be at most 128 bytes or 32 instructions.
- * DO NOT EXCEED 32 instructions on the first level handlers !
- *
- * Also note that RESVEC is contained within the VBR block
- * where the room left (1KB - TEXT_SIZE) allows placing
- * the RESVEC block (at most 512B + TEXT_SIZE).
- *
- * So first (and only) level handler for RESVEC-based exceptions.
- *
- * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
- * and interrupt) we are a lot tight with register space until
- * saving onto the stack frame, which is done in handle_exception().
- *
- */
-
-#define TEXT_SIZE 128
-#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
-
- .balign TEXT_SIZE
-LVBR_block:
- .space 256, 0 /* Power-on class handler, */
- /* not required here */
-not_a_tlb_miss:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- /* Save original stack pointer into KCR1 */
- putcon SP, KCR1
-
- /* Save other original registers into reg_save_area */
- movi reg_save_area, SP
- st.q SP, SAVED_R2, r2
- st.q SP, SAVED_R3, r3
- st.q SP, SAVED_R4, r4
- st.q SP, SAVED_R5, r5
- st.q SP, SAVED_R6, r6
- st.q SP, SAVED_R18, r18
- gettr tr0, r3
- st.q SP, SAVED_TR0, r3
-
- /* Set args for Non-debug, Not a TLB miss class handler */
- getcon EXPEVT, r2
- movi ret_from_exception, r3
- ori r3, 1, r3
- movi EVENT_FAULT_NOT_TLB, r4
- or SP, ZERO, r5
- getcon KCR1, SP
- pta handle_exception, tr0
- blink tr0, ZERO
-
- .balign 256
- ! VBR+0x200
- nop
- .balign 256
- ! VBR+0x300
- nop
- .balign 256
- /*
- * Instead of the natural .balign 1024 place RESVEC here
- * respecting the final 1KB alignment.
- */
- .balign TEXT_SIZE
- /*
- * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
- * block making sure the final alignment is correct.
- */
-#ifdef CONFIG_MMU
-tlb_miss:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- putcon SP, KCR1
- movi reg_save_area, SP
- /* SP is guaranteed 32-byte aligned. */
- st.q SP, TLB_SAVED_R0 , r0
- st.q SP, TLB_SAVED_R1 , r1
- st.q SP, SAVED_R2 , r2
- st.q SP, SAVED_R3 , r3
- st.q SP, SAVED_R4 , r4
- st.q SP, SAVED_R5 , r5
- st.q SP, SAVED_R6 , r6
- st.q SP, SAVED_R18, r18
-
- /* Save R25 for safety; as/ld may want to use it to achieve the call to
- * the code in mm/tlbmiss.c */
- st.q SP, TLB_SAVED_R25, r25
- gettr tr0, r2
- gettr tr1, r3
- gettr tr2, r4
- gettr tr3, r5
- gettr tr4, r18
- st.q SP, SAVED_TR0 , r2
- st.q SP, TLB_SAVED_TR1 , r3
- st.q SP, TLB_SAVED_TR2 , r4
- st.q SP, TLB_SAVED_TR3 , r5
- st.q SP, TLB_SAVED_TR4 , r18
-
- pt do_fast_page_fault, tr0
- getcon SSR, r2
- getcon EXPEVT, r3
- getcon TEA, r4
- shlri r2, 30, r2
- andi r2, 1, r2 /* r2 = SSR.MD */
- blink tr0, LINK
-
- pt fixup_to_invoke_general_handler, tr1
-
- /* If the fast path handler fixed the fault, just drop through quickly
- to the restore code right away to return to the excepting context.
- */
- bnei/u r2, 0, tr1
-
-fast_tlb_miss_restore:
- ld.q SP, SAVED_TR0, r2
- ld.q SP, TLB_SAVED_TR1, r3
- ld.q SP, TLB_SAVED_TR2, r4
-
- ld.q SP, TLB_SAVED_TR3, r5
- ld.q SP, TLB_SAVED_TR4, r18
-
- ptabs r2, tr0
- ptabs r3, tr1
- ptabs r4, tr2
- ptabs r5, tr3
- ptabs r18, tr4
-
- ld.q SP, TLB_SAVED_R0, r0
- ld.q SP, TLB_SAVED_R1, r1
- ld.q SP, SAVED_R2, r2
- ld.q SP, SAVED_R3, r3
- ld.q SP, SAVED_R4, r4
- ld.q SP, SAVED_R5, r5
- ld.q SP, SAVED_R6, r6
- ld.q SP, SAVED_R18, r18
- ld.q SP, TLB_SAVED_R25, r25
-
- getcon KCR1, SP
- rte
- nop /* for safety, in case the code is run on sh5-101 cut1.x */
-
-fixup_to_invoke_general_handler:
-
- /* OK, new method. Restore stuff that's not expected to get saved into
- the 'first-level' reg save area, then just fall through to setting
- up the registers and calling the second-level handler. */
-
- /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
- r25,tr1-4 and save r6 to get into the right state. */
-
- ld.q SP, TLB_SAVED_TR1, r3
- ld.q SP, TLB_SAVED_TR2, r4
- ld.q SP, TLB_SAVED_TR3, r5
- ld.q SP, TLB_SAVED_TR4, r18
- ld.q SP, TLB_SAVED_R25, r25
-
- ld.q SP, TLB_SAVED_R0, r0
- ld.q SP, TLB_SAVED_R1, r1
-
- ptabs/u r3, tr1
- ptabs/u r4, tr2
- ptabs/u r5, tr3
- ptabs/u r18, tr4
-
- /* Set args for Non-debug, TLB miss class handler */
- getcon EXPEVT, r2
- movi ret_from_exception, r3
- ori r3, 1, r3
- movi EVENT_FAULT_TLB, r4
- or SP, ZERO, r5
- getcon KCR1, SP
- pta handle_exception, tr0
- blink tr0, ZERO
-#else /* CONFIG_MMU */
- .balign 256
-#endif
-
-/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
- DOES END UP AT VBR+0x600 */
- nop
- nop
- nop
- nop
- nop
- nop
-
- .balign 256
- /* VBR + 0x600 */
-
-interrupt:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- /* Save original stack pointer into KCR1 */
- putcon SP, KCR1
-
- /* Save other original registers into reg_save_area */
- movi reg_save_area, SP
- st.q SP, SAVED_R2, r2
- st.q SP, SAVED_R3, r3
- st.q SP, SAVED_R4, r4
- st.q SP, SAVED_R5, r5
- st.q SP, SAVED_R6, r6
- st.q SP, SAVED_R18, r18
- gettr tr0, r3
- st.q SP, SAVED_TR0, r3
-
- /* Set args for interrupt class handler */
- getcon INTEVT, r2
- movi ret_from_irq, r3
- ori r3, 1, r3
- movi EVENT_INTERRUPT, r4
- or SP, ZERO, r5
- getcon KCR1, SP
- pta handle_exception, tr0
- blink tr0, ZERO
- .balign TEXT_SIZE /* let's waste the bare minimum */
-
-LVBR_block_end: /* Marker. Used for total checking */
-
- .balign 256
-LRESVEC_block:
- /* Panic handler. Called with MMU off. Possible causes/actions:
- * - Reset: Jump to program start.
- * - Single Step: Turn off Single Step & return.
- * - Others: Call panic handler, passing PC as arg.
- * (this may need to be extended...)
- */
-reset_or_panic:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- putcon SP, DCR
- /* First save r0-1 and tr0, as we need to use these */
- movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
- st.q SP, 0, r0
- st.q SP, 8, r1
- gettr tr0, r0
- st.q SP, 32, r0
-
- /* Check cause */
- getcon EXPEVT, r0
- movi RESET_CAUSE, r1
- sub r1, r0, r1 /* r1=0 if reset */
- movi _stext-CONFIG_PAGE_OFFSET, r0
- ori r0, 1, r0
- ptabs r0, tr0
- beqi r1, 0, tr0 /* Jump to start address if reset */
-
- getcon EXPEVT, r0
- movi DEBUGSS_CAUSE, r1
- sub r1, r0, r1 /* r1=0 if single step */
- pta single_step_panic, tr0
- beqi r1, 0, tr0 /* jump if single step */
-
- /* Now jump to where we save the registers. */
- movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
- ptabs r1, tr0
- blink tr0, r63
-
-single_step_panic:
- /* We are in a handler with Single Step set. We need to resume the
- * handler, by turning on MMU & turning off Single Step. */
- getcon SSR, r0
- movi SR_MMU, r1
- or r0, r1, r0
- movi ~SR_SS, r1
- and r0, r1, r0
- putcon r0, SSR
- /* Restore EXPEVT, as the rte won't do this */
- getcon PEXPEVT, r0
- putcon r0, EXPEVT
- /* Restore regs */
- ld.q SP, 32, r0
- ptabs r0, tr0
- ld.q SP, 0, r0
- ld.q SP, 8, r1
- getcon DCR, SP
- synco
- rte
-
-
- .balign 256
-debug_exception:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- /*
- * Single step/software_break_point first level handler.
- * Called with MMU off, so the first thing we do is enable it
- * by doing an rte with appropriate SSR.
- */
- putcon SP, DCR
- /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
- movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
-
- /* With the MMU off, we are bypassing the cache, so purge any
- * data that will be made stale by the following stores.
- */
- ocbp SP, 0
- synco
-
- st.q SP, 0, r0
- st.q SP, 8, r1
- getcon SPC, r0
- st.q SP, 16, r0
- getcon SSR, r0
- st.q SP, 24, r0
-
- /* Enable MMU, block exceptions, set priv mode, disable single step */
- movi SR_MMU | SR_BL | SR_MD, r1
- or r0, r1, r0
- movi ~SR_SS, r1
- and r0, r1, r0
- putcon r0, SSR
- /* Force control to debug_exception_2 when rte is executed */
- movi debug_exeception_2, r0
- ori r0, 1, r0 /* force SHmedia, just in case */
- putcon r0, SPC
- getcon DCR, SP
- synco
- rte
-debug_exeception_2:
- /* Restore saved regs */
- putcon SP, KCR1
- movi resvec_save_area, SP
- ld.q SP, 24, r0
- putcon r0, SSR
- ld.q SP, 16, r0
- putcon r0, SPC
- ld.q SP, 0, r0
- ld.q SP, 8, r1
-
- /* Save other original registers into reg_save_area */
- movi reg_save_area, SP
- st.q SP, SAVED_R2, r2
- st.q SP, SAVED_R3, r3
- st.q SP, SAVED_R4, r4
- st.q SP, SAVED_R5, r5
- st.q SP, SAVED_R6, r6
- st.q SP, SAVED_R18, r18
- gettr tr0, r3
- st.q SP, SAVED_TR0, r3
-
- /* Set args for debug class handler */
- getcon EXPEVT, r2
- movi ret_from_exception, r3
- ori r3, 1, r3
- movi EVENT_DEBUG, r4
- or SP, ZERO, r5
- getcon KCR1, SP
- pta handle_exception, tr0
- blink tr0, ZERO
-
- .balign 256
-debug_interrupt:
- /* !!! WE COME HERE IN REAL MODE !!! */
- /* Hook-up debug interrupt to allow various debugging options to be
- * hooked into its handler. */
- /* Save original stack pointer into KCR1 */
- synco
- putcon SP, KCR1
- movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
- ocbp SP, 0
- ocbp SP, 32
- synco
-
- /* Save other original registers into reg_save_area thru real addresses */
- st.q SP, SAVED_R2, r2
- st.q SP, SAVED_R3, r3
- st.q SP, SAVED_R4, r4
- st.q SP, SAVED_R5, r5
- st.q SP, SAVED_R6, r6
- st.q SP, SAVED_R18, r18
- gettr tr0, r3
- st.q SP, SAVED_TR0, r3
-
- /* move (spc,ssr)->(pspc,pssr). The rte will shift
- them back again, so that they look like the originals
- as far as the real handler code is concerned. */
- getcon spc, r6
- putcon r6, pspc
- getcon ssr, r6
- putcon r6, pssr
-
- ! construct useful SR for handle_exception
- movi 3, r6
- shlli r6, 30, r6
- getcon sr, r18
- or r18, r6, r6
- putcon r6, ssr
-
- ! SSR is now the current SR with the MD and MMU bits set
- ! i.e. the rte will switch back to priv mode and put
- ! the mmu back on
-
- ! construct spc
- movi handle_exception, r18
- ori r18, 1, r18 ! for safety (do we need this?)
- putcon r18, spc
-
- /* Set args for Non-debug, Not a TLB miss class handler */
-
- ! EXPEVT==0x80 is unused, so 'steal' this value to put the
- ! debug interrupt handler in the vectoring table
- movi 0x80, r2
- movi ret_from_exception, r3
- ori r3, 1, r3
- movi EVENT_FAULT_NOT_TLB, r4
-
- or SP, ZERO, r5
- movi CONFIG_PAGE_OFFSET, r6
- add r6, r5, r5
- getcon KCR1, SP
-
- synco ! for safety
- rte ! -> handle_exception, switch back to priv mode again
-
-LRESVEC_block_end: /* Marker. Unused. */
-
- .balign TEXT_SIZE
-
-/*
- * Second level handler for VBR-based exceptions. Pre-handler.
- * In common to all stack-frame sensitive handlers.
- *
- * Inputs:
- * (KCR0) Current [current task union]
- * (KCR1) Original SP
- * (r2) INTEVT/EXPEVT
- * (r3) appropriate return address
- * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
- * (r5) Pointer to reg_save_area
- * (SP) Original SP
- *
- * Available registers:
- * (r6)
- * (r18)
- * (tr0)
- *
- */
-handle_exception:
- /* Common 2nd level handler. */
-
- /* First thing we need an appropriate stack pointer */
- getcon SSR, r6
- shlri r6, 30, r6
- andi r6, 1, r6
- pta stack_ok, tr0
- bne r6, ZERO, tr0 /* Original stack pointer is fine */
-
- /* Set stack pointer for user fault */
- getcon KCR0, SP
- movi THREAD_SIZE, r6 /* Point to the end */
- add SP, r6, SP
-
-stack_ok:
-
-/* DEBUG : check for underflow/overflow of the kernel stack */
- pta no_underflow, tr0
- getcon KCR0, r6
- movi 1024, r18
- add r6, r18, r6
- bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
-
-/* Just panic to cause a crash. */
-bad_sp:
- ld.b r63, 0, r6
- nop
-
-no_underflow:
- pta bad_sp, tr0
- getcon kcr0, r6
- movi THREAD_SIZE, r18
- add r18, r6, r6
- bgt SP, r6, tr0 ! sp above the stack
-
- /* Make some room for the BASIC frame. */
- movi -(FRAME_SIZE), r6
- add SP, r6, SP
-
-/* Could do this with no stalling if we had another spare register, but the
- code below will be OK. */
- ld.q r5, SAVED_R2, r6
- ld.q r5, SAVED_R3, r18
- st.q SP, FRAME_R(2), r6
- ld.q r5, SAVED_R4, r6
- st.q SP, FRAME_R(3), r18
- ld.q r5, SAVED_R5, r18
- st.q SP, FRAME_R(4), r6
- ld.q r5, SAVED_R6, r6
- st.q SP, FRAME_R(5), r18
- ld.q r5, SAVED_R18, r18
- st.q SP, FRAME_R(6), r6
- ld.q r5, SAVED_TR0, r6
- st.q SP, FRAME_R(18), r18
- st.q SP, FRAME_T(0), r6
-
- /* Keep old SP around */
- getcon KCR1, r6
-
- /* Save the rest of the general purpose registers */
- st.q SP, FRAME_R(0), r0
- st.q SP, FRAME_R(1), r1
- st.q SP, FRAME_R(7), r7
- st.q SP, FRAME_R(8), r8
- st.q SP, FRAME_R(9), r9
- st.q SP, FRAME_R(10), r10
- st.q SP, FRAME_R(11), r11
- st.q SP, FRAME_R(12), r12
- st.q SP, FRAME_R(13), r13
- st.q SP, FRAME_R(14), r14
-
- /* SP is somewhere else */
- st.q SP, FRAME_R(15), r6
-
- st.q SP, FRAME_R(16), r16
- st.q SP, FRAME_R(17), r17
- /* r18 is saved earlier. */
- st.q SP, FRAME_R(19), r19
- st.q SP, FRAME_R(20), r20
- st.q SP, FRAME_R(21), r21
- st.q SP, FRAME_R(22), r22
- st.q SP, FRAME_R(23), r23
- st.q SP, FRAME_R(24), r24
- st.q SP, FRAME_R(25), r25
- st.q SP, FRAME_R(26), r26
- st.q SP, FRAME_R(27), r27
- st.q SP, FRAME_R(28), r28
- st.q SP, FRAME_R(29), r29
- st.q SP, FRAME_R(30), r30
- st.q SP, FRAME_R(31), r31
- st.q SP, FRAME_R(32), r32
- st.q SP, FRAME_R(33), r33
- st.q SP, FRAME_R(34), r34
- st.q SP, FRAME_R(35), r35
- st.q SP, FRAME_R(36), r36
- st.q SP, FRAME_R(37), r37
- st.q SP, FRAME_R(38), r38
- st.q SP, FRAME_R(39), r39
- st.q SP, FRAME_R(40), r40
- st.q SP, FRAME_R(41), r41
- st.q SP, FRAME_R(42), r42
- st.q SP, FRAME_R(43), r43
- st.q SP, FRAME_R(44), r44
- st.q SP, FRAME_R(45), r45
- st.q SP, FRAME_R(46), r46
- st.q SP, FRAME_R(47), r47
- st.q SP, FRAME_R(48), r48
- st.q SP, FRAME_R(49), r49
- st.q SP, FRAME_R(50), r50
- st.q SP, FRAME_R(51), r51
- st.q SP, FRAME_R(52), r52
- st.q SP, FRAME_R(53), r53
- st.q SP, FRAME_R(54), r54
- st.q SP, FRAME_R(55), r55
- st.q SP, FRAME_R(56), r56
- st.q SP, FRAME_R(57), r57
- st.q SP, FRAME_R(58), r58
- st.q SP, FRAME_R(59), r59
- st.q SP, FRAME_R(60), r60
- st.q SP, FRAME_R(61), r61
- st.q SP, FRAME_R(62), r62
-
- /*
- * Save the S* registers.
- */
- getcon SSR, r61
- st.q SP, FRAME_S(FSSR), r61
- getcon SPC, r62
- st.q SP, FRAME_S(FSPC), r62
- movi -1, r62 /* Reset syscall_nr */
- st.q SP, FRAME_S(FSYSCALL_ID), r62
-
- /* Save the rest of the target registers */
- gettr tr1, r6
- st.q SP, FRAME_T(1), r6
- gettr tr2, r6
- st.q SP, FRAME_T(2), r6
- gettr tr3, r6
- st.q SP, FRAME_T(3), r6
- gettr tr4, r6
- st.q SP, FRAME_T(4), r6
- gettr tr5, r6
- st.q SP, FRAME_T(5), r6
- gettr tr6, r6
- st.q SP, FRAME_T(6), r6
- gettr tr7, r6
- st.q SP, FRAME_T(7), r6
-
- ! setup FP so that unwinder can wind back through nested kernel mode
- ! exceptions
- add SP, ZERO, r14
-
- /* For syscall and debug race condition, get TRA now */
- getcon TRA, r5
-
- /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
- * Also set FD, to catch FPU usage in the kernel.
- *
- * benedict.gaster@superh.com 29/07/2002
- *
- * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
- * same time change BL from 1->0, as any pending interrupt of a level
- * higher than he previous value of IMASK will leak through and be
- * taken unexpectedly.
- *
- * To avoid this we raise the IMASK and then issue another PUTCON to
- * enable interrupts.
- */
- getcon SR, r6
- movi SR_IMASK | SR_FD, r7
- or r6, r7, r6
- putcon r6, SR
- movi SR_UNBLOCK_EXC, r7
- and r6, r7, r6
- putcon r6, SR
-
-
- /* Now call the appropriate 3rd level handler */
- or r3, ZERO, LINK
- movi trap_jtable, r3
- shlri r2, 3, r2
- ldx.l r2, r3, r3
- shlri r2, 2, r2
- ptabs r3, tr0
- or SP, ZERO, r3
- blink tr0, ZERO
-
-/*
- * Second level handler for VBR-based exceptions. Post-handlers.
- *
- * Post-handlers for interrupts (ret_from_irq), exceptions
- * (ret_from_exception) and common reentrance doors (restore_all
- * to get back to the original context, ret_from_syscall loop to
- * check kernel exiting).
- *
- * ret_with_reschedule and work_notifysig are an inner lables of
- * the ret_from_syscall loop.
- *
- * In common to all stack-frame sensitive handlers.
- *
- * Inputs:
- * (SP) struct pt_regs *, original register's frame pointer (basic)
- *
- */
- .global ret_from_irq
-ret_from_irq:
- ld.q SP, FRAME_S(FSSR), r6
- shlri r6, 30, r6
- andi r6, 1, r6
- pta resume_kernel, tr0
- bne r6, ZERO, tr0 /* no further checks */
- STI()
- pta ret_with_reschedule, tr0
- blink tr0, ZERO /* Do not check softirqs */
-
- .global ret_from_exception
-ret_from_exception:
- preempt_stop()
-
- ld.q SP, FRAME_S(FSSR), r6
- shlri r6, 30, r6
- andi r6, 1, r6
- pta resume_kernel, tr0
- bne r6, ZERO, tr0 /* no further checks */
-
- /* Check softirqs */
-
-#ifdef CONFIG_PREEMPTION
- pta ret_from_syscall, tr0
- blink tr0, ZERO
-
-resume_kernel:
- CLI()
-
- pta restore_all, tr0
-
- getcon KCR0, r6
- ld.l r6, TI_PRE_COUNT, r7
- beq/u r7, ZERO, tr0
-
-need_resched:
- ld.l r6, TI_FLAGS, r7
- movi (1 << TIF_NEED_RESCHED), r8
- and r8, r7, r8
- bne r8, ZERO, tr0
-
- getcon SR, r7
- andi r7, 0xf0, r7
- bne r7, ZERO, tr0
-
- movi preempt_schedule_irq, r7
- ori r7, 1, r7
- ptabs r7, tr1
- blink tr1, LINK
-
- pta need_resched, tr1
- blink tr1, ZERO
-#endif
-
- .global ret_from_syscall
-ret_from_syscall:
-
-ret_with_reschedule:
- getcon KCR0, r6 ! r6 contains current_thread_info
- ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
-
- movi _TIF_NEED_RESCHED, r8
- and r8, r7, r8
- pta work_resched, tr0
- bne r8, ZERO, tr0
-
- pta restore_all, tr1
-
- movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
- and r8, r7, r8
- pta work_notifysig, tr0
- bne r8, ZERO, tr0
-
- blink tr1, ZERO
-
-work_resched:
- pta ret_from_syscall, tr0
- gettr tr0, LINK
- movi schedule, r6
- ptabs r6, tr0
- blink tr0, ZERO /* Call schedule(), return on top */
-
-work_notifysig:
- gettr tr1, LINK
-
- movi do_notify_resume, r6
- ptabs r6, tr0
- or SP, ZERO, r2
- or r7, ZERO, r3
- blink tr0, LINK /* Call do_notify_resume(regs, current_thread_info->flags), return here */
-
-restore_all:
- /* Do prefetches */
-
- ld.q SP, FRAME_T(0), r6
- ld.q SP, FRAME_T(1), r7
- ld.q SP, FRAME_T(2), r8
- ld.q SP, FRAME_T(3), r9
- ptabs r6, tr0
- ptabs r7, tr1
- ptabs r8, tr2
- ptabs r9, tr3
- ld.q SP, FRAME_T(4), r6
- ld.q SP, FRAME_T(5), r7
- ld.q SP, FRAME_T(6), r8
- ld.q SP, FRAME_T(7), r9
- ptabs r6, tr4
- ptabs r7, tr5
- ptabs r8, tr6
- ptabs r9, tr7
-
- ld.q SP, FRAME_R(0), r0
- ld.q SP, FRAME_R(1), r1
- ld.q SP, FRAME_R(2), r2
- ld.q SP, FRAME_R(3), r3
- ld.q SP, FRAME_R(4), r4
- ld.q SP, FRAME_R(5), r5
- ld.q SP, FRAME_R(6), r6
- ld.q SP, FRAME_R(7), r7
- ld.q SP, FRAME_R(8), r8
- ld.q SP, FRAME_R(9), r9
- ld.q SP, FRAME_R(10), r10
- ld.q SP, FRAME_R(11), r11
- ld.q SP, FRAME_R(12), r12
- ld.q SP, FRAME_R(13), r13
- ld.q SP, FRAME_R(14), r14
-
- ld.q SP, FRAME_R(16), r16
- ld.q SP, FRAME_R(17), r17
- ld.q SP, FRAME_R(18), r18
- ld.q SP, FRAME_R(19), r19
- ld.q SP, FRAME_R(20), r20
- ld.q SP, FRAME_R(21), r21
- ld.q SP, FRAME_R(22), r22
- ld.q SP, FRAME_R(23), r23
- ld.q SP, FRAME_R(24), r24
- ld.q SP, FRAME_R(25), r25
- ld.q SP, FRAME_R(26), r26
- ld.q SP, FRAME_R(27), r27
- ld.q SP, FRAME_R(28), r28
- ld.q SP, FRAME_R(29), r29
- ld.q SP, FRAME_R(30), r30
- ld.q SP, FRAME_R(31), r31
- ld.q SP, FRAME_R(32), r32
- ld.q SP, FRAME_R(33), r33
- ld.q SP, FRAME_R(34), r34
- ld.q SP, FRAME_R(35), r35
- ld.q SP, FRAME_R(36), r36
- ld.q SP, FRAME_R(37), r37
- ld.q SP, FRAME_R(38), r38
- ld.q SP, FRAME_R(39), r39
- ld.q SP, FRAME_R(40), r40
- ld.q SP, FRAME_R(41), r41
- ld.q SP, FRAME_R(42), r42
- ld.q SP, FRAME_R(43), r43
- ld.q SP, FRAME_R(44), r44
- ld.q SP, FRAME_R(45), r45
- ld.q SP, FRAME_R(46), r46
- ld.q SP, FRAME_R(47), r47
- ld.q SP, FRAME_R(48), r48
- ld.q SP, FRAME_R(49), r49
- ld.q SP, FRAME_R(50), r50
- ld.q SP, FRAME_R(51), r51
- ld.q SP, FRAME_R(52), r52
- ld.q SP, FRAME_R(53), r53
- ld.q SP, FRAME_R(54), r54
- ld.q SP, FRAME_R(55), r55
- ld.q SP, FRAME_R(56), r56
- ld.q SP, FRAME_R(57), r57
- ld.q SP, FRAME_R(58), r58
-
- getcon SR, r59
- movi SR_BLOCK_EXC, r60
- or r59, r60, r59
- putcon r59, SR /* SR.BL = 1, keep nesting out */
- ld.q SP, FRAME_S(FSSR), r61
- ld.q SP, FRAME_S(FSPC), r62
- movi SR_ASID_MASK, r60
- and r59, r60, r59
- andc r61, r60, r61 /* Clear out older ASID */
- or r59, r61, r61 /* Retain current ASID */
- putcon r61, SSR
- putcon r62, SPC
-
- /* Ignore FSYSCALL_ID */
-
- ld.q SP, FRAME_R(59), r59
- ld.q SP, FRAME_R(60), r60
- ld.q SP, FRAME_R(61), r61
- ld.q SP, FRAME_R(62), r62
-
- /* Last touch */
- ld.q SP, FRAME_R(15), SP
- rte
- nop
-
-/*
- * Third level handlers for VBR-based exceptions. Adapting args to
- * and/or deflecting to fourth level handlers.
- *
- * Fourth level handlers interface.
- * Most are C-coded handlers directly pointed by the trap_jtable.
- * (Third = Fourth level)
- * Inputs:
- * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
- * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
- * (r3) struct pt_regs *, original register's frame pointer
- * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
- * (r5) TRA control register (for syscall/debug benefit only)
- * (LINK) return address
- * (SP) = r3
- *
- * Kernel TLB fault handlers will get a slightly different interface.
- * (r2) struct pt_regs *, original register's frame pointer
- * (r3) page fault error code (see asm/thread_info.h)
- * (r4) Effective Address of fault
- * (LINK) return address
- * (SP) = r2
- *
- * fpu_error_or_IRQ? is a helper to deflect to the right cause.
- *
- */
-#ifdef CONFIG_MMU
-tlb_miss_load:
- or SP, ZERO, r2
- or ZERO, ZERO, r3 /* Read */
- getcon TEA, r4
- pta call_do_page_fault, tr0
- beq ZERO, ZERO, tr0
-
-tlb_miss_store:
- or SP, ZERO, r2
- movi FAULT_CODE_WRITE, r3 /* Write */
- getcon TEA, r4
- pta call_do_page_fault, tr0
- beq ZERO, ZERO, tr0
-
-itlb_miss_or_IRQ:
- pta its_IRQ, tr0
- beqi/u r4, EVENT_INTERRUPT, tr0
-
- /* ITLB miss */
- or SP, ZERO, r2
- movi FAULT_CODE_ITLB, r3
- getcon TEA, r4
- /* Fall through */
-
-call_do_page_fault:
- movi do_page_fault, r6
- ptabs r6, tr0
- blink tr0, ZERO
-#endif /* CONFIG_MMU */
-
-fpu_error_or_IRQA:
- pta its_IRQ, tr0
- beqi/l r4, EVENT_INTERRUPT, tr0
-#ifdef CONFIG_SH_FPU
- movi fpu_state_restore_trap_handler, r6
-#else
- movi do_exception_error, r6
-#endif
- ptabs r6, tr0
- blink tr0, ZERO
-
-fpu_error_or_IRQB:
- pta its_IRQ, tr0
- beqi/l r4, EVENT_INTERRUPT, tr0
-#ifdef CONFIG_SH_FPU
- movi fpu_state_restore_trap_handler, r6
-#else
- movi do_exception_error, r6
-#endif
- ptabs r6, tr0
- blink tr0, ZERO
-
-its_IRQ:
- movi do_IRQ, r6
- ptabs r6, tr0
- blink tr0, ZERO
-
-/*
- * system_call/unknown_trap third level handler:
- *
- * Inputs:
- * (r2) fault/interrupt code, entry number (TRAP = 11)
- * (r3) struct pt_regs *, original register's frame pointer
- * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
- * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
- * (SP) = r3
- * (LINK) return address: ret_from_exception
- * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
- *
- * Outputs:
- * (*r3) Syscall reply (Saved r2)
- * (LINK) In case of syscall only it can be scrapped.
- * Common second level post handler will be ret_from_syscall.
- * Common (non-trace) exit point to that is syscall_ret (saving
- * result to r2). Common bad exit point is syscall_bad (returning
- * ENOSYS then saved to r2).
- *
- */
-
-unknown_trap:
- /* Unknown Trap or User Trace */
- movi do_unknown_trapa, r6
- ptabs r6, tr0
- ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
- andi r2, 0x1ff, r2 /* r2 = syscall # */
- blink tr0, LINK
-
- pta syscall_ret, tr0
- blink tr0, ZERO
-
- /* New syscall implementation*/
-system_call:
- pta unknown_trap, tr0
- or r5, ZERO, r4 /* TRA (=r5) -> r4 */
- shlri r4, 20, r4
- bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
-
- /* It's a system call */
- st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
- andi r5, 0x1ff, r5 /* syscall # -> r5 */
-
- STI()
-
- pta syscall_allowed, tr0
- movi NR_syscalls - 1, r4 /* Last valid */
- bgeu/l r4, r5, tr0
-
-syscall_bad:
- /* Return ENOSYS ! */
- movi -(ENOSYS), r2 /* Fall-through */
-
- .global syscall_ret
-syscall_ret:
- st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
- ld.q SP, FRAME_S(FSPC), r2
- addi r2, 4, r2 /* Move PC, being pre-execution event */
- st.q SP, FRAME_S(FSPC), r2
- pta ret_from_syscall, tr0
- blink tr0, ZERO
-
-
-/* A different return path for ret_from_fork, because we now need
- * to call schedule_tail with the later kernels. Because prev is
- * loaded into r2 by switch_to() means we can just call it straight away
- */
-
-.global ret_from_fork
-ret_from_fork:
-
- movi schedule_tail,r5
- ori r5, 1, r5
- ptabs r5, tr0
- blink tr0, LINK
-
- ld.q SP, FRAME_S(FSPC), r2
- addi r2, 4, r2 /* Move PC, being pre-execution event */
- st.q SP, FRAME_S(FSPC), r2
- pta ret_from_syscall, tr0
- blink tr0, ZERO
-
-.global ret_from_kernel_thread
-ret_from_kernel_thread:
-
- movi schedule_tail,r5
- ori r5, 1, r5
- ptabs r5, tr0
- blink tr0, LINK
-
- ld.q SP, FRAME_R(2), r2
- ld.q SP, FRAME_R(3), r3
- ptabs r3, tr0
- blink tr0, LINK
-
- ld.q SP, FRAME_S(FSPC), r2
- addi r2, 4, r2 /* Move PC, being pre-execution event */
- st.q SP, FRAME_S(FSPC), r2
- pta ret_from_syscall, tr0
- blink tr0, ZERO
-
-syscall_allowed:
- /* Use LINK to deflect the exit point, default is syscall_ret */
- pta syscall_ret, tr0
- gettr tr0, LINK
- pta syscall_notrace, tr0
-
- getcon KCR0, r2
- ld.l r2, TI_FLAGS, r4
- movi _TIF_WORK_SYSCALL_MASK, r6
- and r6, r4, r6
- beq/l r6, ZERO, tr0
-
- /* Trace it by calling syscall_trace before and after */
- movi do_syscall_trace_enter, r4
- or SP, ZERO, r2
- ptabs r4, tr0
- blink tr0, LINK
-
- /* Save the retval */
- st.q SP, FRAME_R(2), r2
-
- /* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
- ld.q SP, FRAME_S(FSYSCALL_ID), r5
- andi r5, 0x1ff, r5
-
- pta syscall_ret_trace, tr0
- gettr tr0, LINK
-
-syscall_notrace:
- /* Now point to the appropriate 4th level syscall handler */
- movi sys_call_table, r4
- shlli r5, 2, r5
- ldx.l r4, r5, r5
- ptabs r5, tr0
-
- /* Prepare original args */
- ld.q SP, FRAME_R(2), r2
- ld.q SP, FRAME_R(3), r3
- ld.q SP, FRAME_R(4), r4
- ld.q SP, FRAME_R(5), r5
- ld.q SP, FRAME_R(6), r6
- ld.q SP, FRAME_R(7), r7
-
- /* And now the trick for those syscalls requiring regs * ! */
- or SP, ZERO, r8
-
- /* Call it */
- blink tr0, ZERO /* LINK is already properly set */
-
-syscall_ret_trace:
- /* We get back here only if under trace */
- st.q SP, FRAME_R(9), r2 /* Save return value */
-
- movi do_syscall_trace_leave, LINK
- or SP, ZERO, r2
- ptabs LINK, tr0
- blink tr0, LINK
-
- /* This needs to be done after any syscall tracing */
- ld.q SP, FRAME_S(FSPC), r2
- addi r2, 4, r2 /* Move PC, being pre-execution event */
- st.q SP, FRAME_S(FSPC), r2
-
- pta ret_from_syscall, tr0
- blink tr0, ZERO /* Resume normal return sequence */
-
-/*
- * --- Switch to running under a particular ASID and return the previous ASID value
- * --- The caller is assumed to have done a cli before calling this.
- *
- * Input r2 : new ASID
- * Output r2 : old ASID
- */
-
- .global switch_and_save_asid
-switch_and_save_asid:
- getcon sr, r0
- movi 255, r4
- shlli r4, 16, r4 /* r4 = mask to select ASID */
- and r0, r4, r3 /* r3 = shifted old ASID */
- andi r2, 255, r2 /* mask down new ASID */
- shlli r2, 16, r2 /* align new ASID against SR.ASID */
- andc r0, r4, r0 /* efface old ASID from SR */
- or r0, r2, r0 /* insert the new ASID */
- putcon r0, ssr
- movi 1f, r0
- putcon r0, spc
- rte
- nop
-1:
- ptabs LINK, tr0
- shlri r3, 16, r2 /* r2 = old ASID */
- blink tr0, r63
-
- .global route_to_panic_handler
-route_to_panic_handler:
- /* Switch to real mode, goto panic_handler, don't return. Useful for
- last-chance debugging, e.g. if no output wants to go to the console.
- */
-
- movi panic_handler - CONFIG_PAGE_OFFSET, r1
- ptabs r1, tr0
- pta 1f, tr1
- gettr tr1, r0
- putcon r0, spc
- getcon sr, r0
- movi 1, r1
- shlli r1, 31, r1
- andc r0, r1, r0
- putcon r0, ssr
- rte
- nop
-1: /* Now in real mode */
- blink tr0, r63
- nop
-
- .global peek_real_address_q
-peek_real_address_q:
- /* Two args:
- r2 : real mode address to peek
- r2(out) : result quadword
-
- This is provided as a cheapskate way of manipulating device
- registers for debugging (to avoid the need to ioremap the debug
- module, and to avoid the need to ioremap the watchpoint
- controller in a way that identity maps sufficient bits to avoid the
- SH5-101 cut2 silicon defect).
-
- This code is not performance critical
- */
-
- add.l r2, r63, r2 /* sign extend address */
- getcon sr, r0 /* r0 = saved original SR */
- movi 1, r1
- shlli r1, 28, r1
- or r0, r1, r1 /* r0 with block bit set */
- putcon r1, sr /* now in critical section */
- movi 1, r36
- shlli r36, 31, r36
- andc r1, r36, r1 /* turn sr.mmu off in real mode section */
-
- putcon r1, ssr
- movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
- movi 1f, r37 /* virtual mode return addr */
- putcon r36, spc
-
- synco
- rte
- nop
-
-.peek0: /* come here in real mode, don't touch caches!!
- still in critical section (sr.bl==1) */
- putcon r0, ssr
- putcon r37, spc
- /* Here's the actual peek. If the address is bad, all bets are now off
- * what will happen (handlers invoked in real-mode = bad news) */
- ld.q r2, 0, r2
- synco
- rte /* Back to virtual mode */
- nop
-
-1:
- ptabs LINK, tr0
- blink tr0, r63
-
- .global poke_real_address_q
-poke_real_address_q:
- /* Two args:
- r2 : real mode address to poke
- r3 : quadword value to write.
-
- This is provided as a cheapskate way of manipulating device
- registers for debugging (to avoid the need to ioremap the debug
- module, and to avoid the need to ioremap the watchpoint
- controller in a way that identity maps sufficient bits to avoid the
- SH5-101 cut2 silicon defect).
-
- This code is not performance critical
- */
-
- add.l r2, r63, r2 /* sign extend address */
- getcon sr, r0 /* r0 = saved original SR */
- movi 1, r1
- shlli r1, 28, r1
- or r0, r1, r1 /* r0 with block bit set */
- putcon r1, sr /* now in critical section */
- movi 1, r36
- shlli r36, 31, r36
- andc r1, r36, r1 /* turn sr.mmu off in real mode section */
-
- putcon r1, ssr
- movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
- movi 1f, r37 /* virtual mode return addr */
- putcon r36, spc
-
- synco
- rte
- nop
-
-.poke0: /* come here in real mode, don't touch caches!!
- still in critical section (sr.bl==1) */
- putcon r0, ssr
- putcon r37, spc
- /* Here's the actual poke. If the address is bad, all bets are now off
- * what will happen (handlers invoked in real-mode = bad news) */
- st.q r2, 0, r3
- synco
- rte /* Back to virtual mode */
- nop
-
-1:
- ptabs LINK, tr0
- blink tr0, r63
-
-#ifdef CONFIG_MMU
-/*
- * --- User Access Handling Section
- */
-
-/*
- * User Access support. It all moved to non inlined Assembler
- * functions in here.
- *
- * __kernel_size_t __copy_user(void *__to, const void *__from,
- * __kernel_size_t __n)
- *
- * Inputs:
- * (r2) target address
- * (r3) source address
- * (r4) size in bytes
- *
- * Ouputs:
- * (*r2) target data
- * (r2) non-copied bytes
- *
- * If a fault occurs on the user pointer, bail out early and return the
- * number of bytes not copied in r2.
- * Strategy : for large blocks, call a real memcpy function which can
- * move >1 byte at a time using unaligned ld/st instructions, and can
- * manipulate the cache using prefetch + alloco to improve the speed
- * further. If a fault occurs in that function, just revert to the
- * byte-by-byte approach used for small blocks; this is rare so the
- * performance hit for that case does not matter.
- *
- * For small blocks it's not worth the overhead of setting up and calling
- * the memcpy routine; do the copy a byte at a time.
- *
- */
- .global __copy_user
-__copy_user:
- pta __copy_user_byte_by_byte, tr1
- movi 16, r0 ! this value is a best guess, should tune it by benchmarking
- bge/u r0, r4, tr1
- pta copy_user_memcpy, tr0
- addi SP, -32, SP
- /* Save arguments in case we have to fix-up unhandled page fault */
- st.q SP, 0, r2
- st.q SP, 8, r3
- st.q SP, 16, r4
- st.q SP, 24, r35 ! r35 is callee-save
- /* Save LINK in a register to reduce RTS time later (otherwise
- ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
- ori LINK, 0, r35
- blink tr0, LINK
-
- /* Copy completed normally if we get back here */
- ptabs r35, tr0
- ld.q SP, 24, r35
- /* don't restore r2-r4, pointless */
- /* set result=r2 to zero as the copy must have succeeded. */
- or r63, r63, r2
- addi SP, 32, SP
- blink tr0, r63 ! RTS
-
- .global __copy_user_fixup
-__copy_user_fixup:
- /* Restore stack frame */
- ori r35, 0, LINK
- ld.q SP, 24, r35
- ld.q SP, 16, r4
- ld.q SP, 8, r3
- ld.q SP, 0, r2
- addi SP, 32, SP
- /* Fall through to original code, in the 'same' state we entered with */
-
-/* The slow byte-by-byte method is used if the fast copy traps due to a bad
- user address. In that rare case, the speed drop can be tolerated. */
-__copy_user_byte_by_byte:
- pta ___copy_user_exit, tr1
- pta ___copy_user1, tr0
- beq/u r4, r63, tr1 /* early exit for zero length copy */
- sub r2, r3, r0
- addi r0, -1, r0
-
-___copy_user1:
- ld.b r3, 0, r5 /* Fault address 1 */
-
- /* Could rewrite this to use just 1 add, but the second comes 'free'
- due to load latency */
- addi r3, 1, r3
- addi r4, -1, r4 /* No real fixup required */
-___copy_user2:
- stx.b r3, r0, r5 /* Fault address 2 */
- bne r4, ZERO, tr0
-
-___copy_user_exit:
- or r4, ZERO, r2
- ptabs LINK, tr0
- blink tr0, ZERO
-
-/*
- * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
- *
- * Inputs:
- * (r2) target address
- * (r3) size in bytes
- *
- * Ouputs:
- * (*r2) zero-ed target data
- * (r2) non-zero-ed bytes
- */
- .global __clear_user
-__clear_user:
- pta ___clear_user_exit, tr1
- pta ___clear_user1, tr0
- beq/u r3, r63, tr1
-
-___clear_user1:
- st.b r2, 0, ZERO /* Fault address */
- addi r2, 1, r2
- addi r3, -1, r3 /* No real fixup required */
- bne r3, ZERO, tr0
-
-___clear_user_exit:
- or r3, ZERO, r2
- ptabs LINK, tr0
- blink tr0, ZERO
-
-#endif /* CONFIG_MMU */
-
-/*
- * extern long __get_user_asm_?(void *val, long addr)
- *
- * Inputs:
- * (r2) dest address
- * (r3) source address (in User Space)
- *
- * Ouputs:
- * (r2) -EFAULT (faulting)
- * 0 (not faulting)
- */
- .global __get_user_asm_b
-__get_user_asm_b:
- or r2, ZERO, r4
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___get_user_asm_b1:
- ld.b r3, 0, r5 /* r5 = data */
- st.b r4, 0, r5
- or ZERO, ZERO, r2
-
-___get_user_asm_b_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __get_user_asm_w
-__get_user_asm_w:
- or r2, ZERO, r4
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___get_user_asm_w1:
- ld.w r3, 0, r5 /* r5 = data */
- st.w r4, 0, r5
- or ZERO, ZERO, r2
-
-___get_user_asm_w_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __get_user_asm_l
-__get_user_asm_l:
- or r2, ZERO, r4
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___get_user_asm_l1:
- ld.l r3, 0, r5 /* r5 = data */
- st.l r4, 0, r5
- or ZERO, ZERO, r2
-
-___get_user_asm_l_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __get_user_asm_q
-__get_user_asm_q:
- or r2, ZERO, r4
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___get_user_asm_q1:
- ld.q r3, 0, r5 /* r5 = data */
- st.q r4, 0, r5
- or ZERO, ZERO, r2
-
-___get_user_asm_q_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-/*
- * extern long __put_user_asm_?(void *pval, long addr)
- *
- * Inputs:
- * (r2) kernel pointer to value
- * (r3) dest address (in User Space)
- *
- * Ouputs:
- * (r2) -EFAULT (faulting)
- * 0 (not faulting)
- */
- .global __put_user_asm_b
-__put_user_asm_b:
- ld.b r2, 0, r4 /* r4 = data */
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___put_user_asm_b1:
- st.b r3, 0, r4
- or ZERO, ZERO, r2
-
-___put_user_asm_b_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __put_user_asm_w
-__put_user_asm_w:
- ld.w r2, 0, r4 /* r4 = data */
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___put_user_asm_w1:
- st.w r3, 0, r4
- or ZERO, ZERO, r2
-
-___put_user_asm_w_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __put_user_asm_l
-__put_user_asm_l:
- ld.l r2, 0, r4 /* r4 = data */
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___put_user_asm_l1:
- st.l r3, 0, r4
- or ZERO, ZERO, r2
-
-___put_user_asm_l_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __put_user_asm_q
-__put_user_asm_q:
- ld.q r2, 0, r4 /* r4 = data */
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___put_user_asm_q1:
- st.q r3, 0, r4
- or ZERO, ZERO, r2
-
-___put_user_asm_q_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-panic_stash_regs:
- /* The idea is : when we get an unhandled panic, we dump the registers
- to a known memory location, the just sit in a tight loop.
- This allows the human to look at the memory region through the GDB
- session (assuming the debug module's SHwy initiator isn't locked up
- or anything), to hopefully analyze the cause of the panic. */
-
- /* On entry, former r15 (SP) is in DCR
- former r0 is at resvec_saved_area + 0
- former r1 is at resvec_saved_area + 8
- former tr0 is at resvec_saved_area + 32
- DCR is the only register whose value is lost altogether.
- */
-
- movi 0xffffffff80000000, r0 ! phy of dump area
- ld.q SP, 0x000, r1 ! former r0
- st.q r0, 0x000, r1
- ld.q SP, 0x008, r1 ! former r1
- st.q r0, 0x008, r1
- st.q r0, 0x010, r2
- st.q r0, 0x018, r3
- st.q r0, 0x020, r4
- st.q r0, 0x028, r5
- st.q r0, 0x030, r6
- st.q r0, 0x038, r7
- st.q r0, 0x040, r8
- st.q r0, 0x048, r9
- st.q r0, 0x050, r10
- st.q r0, 0x058, r11
- st.q r0, 0x060, r12
- st.q r0, 0x068, r13
- st.q r0, 0x070, r14
- getcon dcr, r14
- st.q r0, 0x078, r14
- st.q r0, 0x080, r16
- st.q r0, 0x088, r17
- st.q r0, 0x090, r18
- st.q r0, 0x098, r19
- st.q r0, 0x0a0, r20
- st.q r0, 0x0a8, r21
- st.q r0, 0x0b0, r22
- st.q r0, 0x0b8, r23
- st.q r0, 0x0c0, r24
- st.q r0, 0x0c8, r25
- st.q r0, 0x0d0, r26
- st.q r0, 0x0d8, r27
- st.q r0, 0x0e0, r28
- st.q r0, 0x0e8, r29
- st.q r0, 0x0f0, r30
- st.q r0, 0x0f8, r31
- st.q r0, 0x100, r32
- st.q r0, 0x108, r33
- st.q r0, 0x110, r34
- st.q r0, 0x118, r35
- st.q r0, 0x120, r36
- st.q r0, 0x128, r37
- st.q r0, 0x130, r38
- st.q r0, 0x138, r39
- st.q r0, 0x140, r40
- st.q r0, 0x148, r41
- st.q r0, 0x150, r42
- st.q r0, 0x158, r43
- st.q r0, 0x160, r44
- st.q r0, 0x168, r45
- st.q r0, 0x170, r46
- st.q r0, 0x178, r47
- st.q r0, 0x180, r48
- st.q r0, 0x188, r49
- st.q r0, 0x190, r50
- st.q r0, 0x198, r51
- st.q r0, 0x1a0, r52
- st.q r0, 0x1a8, r53
- st.q r0, 0x1b0, r54
- st.q r0, 0x1b8, r55
- st.q r0, 0x1c0, r56
- st.q r0, 0x1c8, r57
- st.q r0, 0x1d0, r58
- st.q r0, 0x1d8, r59
- st.q r0, 0x1e0, r60
- st.q r0, 0x1e8, r61
- st.q r0, 0x1f0, r62
- st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
-
- ld.q SP, 0x020, r1 ! former tr0
- st.q r0, 0x200, r1
- gettr tr1, r1
- st.q r0, 0x208, r1
- gettr tr2, r1
- st.q r0, 0x210, r1
- gettr tr3, r1
- st.q r0, 0x218, r1
- gettr tr4, r1
- st.q r0, 0x220, r1
- gettr tr5, r1
- st.q r0, 0x228, r1
- gettr tr6, r1
- st.q r0, 0x230, r1
- gettr tr7, r1
- st.q r0, 0x238, r1
-
- getcon sr, r1
- getcon ssr, r2
- getcon pssr, r3
- getcon spc, r4
- getcon pspc, r5
- getcon intevt, r6
- getcon expevt, r7
- getcon pexpevt, r8
- getcon tra, r9
- getcon tea, r10
- getcon kcr0, r11
- getcon kcr1, r12
- getcon vbr, r13
- getcon resvec, r14
-
- st.q r0, 0x240, r1
- st.q r0, 0x248, r2
- st.q r0, 0x250, r3
- st.q r0, 0x258, r4
- st.q r0, 0x260, r5
- st.q r0, 0x268, r6
- st.q r0, 0x270, r7
- st.q r0, 0x278, r8
- st.q r0, 0x280, r9
- st.q r0, 0x288, r10
- st.q r0, 0x290, r11
- st.q r0, 0x298, r12
- st.q r0, 0x2a0, r13
- st.q r0, 0x2a8, r14
-
- getcon SPC,r2
- getcon SSR,r3
- getcon EXPEVT,r4
- /* Prepare to jump to C - physical address */
- movi panic_handler-CONFIG_PAGE_OFFSET, r1
- ori r1, 1, r1
- ptabs r1, tr0
- getcon DCR, SP
- blink tr0, ZERO
- nop
- nop
- nop
- nop
-
-
-
-
-/*
- * --- Signal Handling Section
- */
-
-/*
- * extern long long _sa_default_rt_restorer
- * extern long long _sa_default_restorer
- *
- * or, better,
- *
- * extern void _sa_default_rt_restorer(void)
- * extern void _sa_default_restorer(void)
- *
- * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
- * from user space. Copied into user space by signal management.
- * Both must be quad aligned and 2 quad long (4 instructions).
- *
- */
- .balign 8
- .global sa_default_rt_restorer
-sa_default_rt_restorer:
- movi 0x10, r9
- shori __NR_rt_sigreturn, r9
- trapa r9
- nop
-
- .balign 8
- .global sa_default_restorer
-sa_default_restorer:
- movi 0x10, r9
- shori __NR_sigreturn, r9
- trapa r9
- nop
-
-/*
- * --- __ex_table Section
- */
-
-/*
- * User Access Exception Table.
- */
- .section __ex_table, "a"
-
- .global asm_uaccess_start /* Just a marker */
-asm_uaccess_start:
-
-#ifdef CONFIG_MMU
- .long ___copy_user1, ___copy_user_exit
- .long ___copy_user2, ___copy_user_exit
- .long ___clear_user1, ___clear_user_exit
-#endif
- .long ___get_user_asm_b1, ___get_user_asm_b_exit
- .long ___get_user_asm_w1, ___get_user_asm_w_exit
- .long ___get_user_asm_l1, ___get_user_asm_l_exit
- .long ___get_user_asm_q1, ___get_user_asm_q_exit
- .long ___put_user_asm_b1, ___put_user_asm_b_exit
- .long ___put_user_asm_w1, ___put_user_asm_w_exit
- .long ___put_user_asm_l1, ___put_user_asm_l_exit
- .long ___put_user_asm_q1, ___put_user_asm_q_exit
-
- .global asm_uaccess_end /* Just a marker */
-asm_uaccess_end:
-
-
-
-
-/*
- * --- .init.text Section
- */
-
- __INIT
-
-/*
- * void trap_init (void)
- *
- */
- .global trap_init
-trap_init:
- addi SP, -24, SP /* Room to save r28/r29/r30 */
- st.q SP, 0, r28
- st.q SP, 8, r29
- st.q SP, 16, r30
-
- /* Set VBR and RESVEC */
- movi LVBR_block, r19
- andi r19, -4, r19 /* reset MMUOFF + reserved */
- /* For RESVEC exceptions we force the MMU off, which means we need the
- physical address. */
- movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
- andi r20, -4, r20 /* reset reserved */
- ori r20, 1, r20 /* set MMUOFF */
- putcon r19, VBR
- putcon r20, RESVEC
-
- /* Sanity check */
- movi LVBR_block_end, r21
- andi r21, -4, r21
- movi BLOCK_SIZE, r29 /* r29 = expected size */
- or r19, ZERO, r30
- add r19, r29, r19
-
- /*
- * Ugly, but better loop forever now than crash afterwards.
- * We should print a message, but if we touch LVBR or
- * LRESVEC blocks we should not be surprised if we get stuck
- * in trap_init().
- */
- pta trap_init_loop, tr1
- gettr tr1, r28 /* r28 = trap_init_loop */
- sub r21, r30, r30 /* r30 = actual size */
-
- /*
- * VBR/RESVEC handlers overlap by being bigger than
- * allowed. Very bad. Just loop forever.
- * (r28) panic/loop address
- * (r29) expected size
- * (r30) actual size
- */
-trap_init_loop:
- bne r19, r21, tr1
-
- /* Now that exception vectors are set up reset SR.BL */
- getcon SR, r22
- movi SR_UNBLOCK_EXC, r23
- and r22, r23, r22
- putcon r22, SR
-
- addi SP, 24, SP
- ptabs LINK, tr0
- blink tr0, ZERO
-
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
deleted file mode 100644
index 3966b5ee8e93..000000000000
--- a/arch/sh/kernel/cpu/sh5/fpu.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/sh5/fpu.c
- *
- * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
- * Copyright (C) 2002 STMicroelectronics Limited
- * Author : Stuart Menefy
- *
- * Started from SH4 version:
- * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
- */
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <asm/processor.h>
-
-void save_fpu(struct task_struct *tsk)
-{
- asm volatile("fst.p %0, (0*8), fp0\n\t"
- "fst.p %0, (1*8), fp2\n\t"
- "fst.p %0, (2*8), fp4\n\t"
- "fst.p %0, (3*8), fp6\n\t"
- "fst.p %0, (4*8), fp8\n\t"
- "fst.p %0, (5*8), fp10\n\t"
- "fst.p %0, (6*8), fp12\n\t"
- "fst.p %0, (7*8), fp14\n\t"
- "fst.p %0, (8*8), fp16\n\t"
- "fst.p %0, (9*8), fp18\n\t"
- "fst.p %0, (10*8), fp20\n\t"
- "fst.p %0, (11*8), fp22\n\t"
- "fst.p %0, (12*8), fp24\n\t"
- "fst.p %0, (13*8), fp26\n\t"
- "fst.p %0, (14*8), fp28\n\t"
- "fst.p %0, (15*8), fp30\n\t"
- "fst.p %0, (16*8), fp32\n\t"
- "fst.p %0, (17*8), fp34\n\t"
- "fst.p %0, (18*8), fp36\n\t"
- "fst.p %0, (19*8), fp38\n\t"
- "fst.p %0, (20*8), fp40\n\t"
- "fst.p %0, (21*8), fp42\n\t"
- "fst.p %0, (22*8), fp44\n\t"
- "fst.p %0, (23*8), fp46\n\t"
- "fst.p %0, (24*8), fp48\n\t"
- "fst.p %0, (25*8), fp50\n\t"
- "fst.p %0, (26*8), fp52\n\t"
- "fst.p %0, (27*8), fp54\n\t"
- "fst.p %0, (28*8), fp56\n\t"
- "fst.p %0, (29*8), fp58\n\t"
- "fst.p %0, (30*8), fp60\n\t"
- "fst.p %0, (31*8), fp62\n\t"
-
- "fgetscr fr63\n\t"
- "fst.s %0, (32*8), fr63\n\t"
- : /* no output */
- : "r" (&tsk->thread.xstate->hardfpu)
- : "memory");
-}
-
-void restore_fpu(struct task_struct *tsk)
-{
- asm volatile("fld.p %0, (0*8), fp0\n\t"
- "fld.p %0, (1*8), fp2\n\t"
- "fld.p %0, (2*8), fp4\n\t"
- "fld.p %0, (3*8), fp6\n\t"
- "fld.p %0, (4*8), fp8\n\t"
- "fld.p %0, (5*8), fp10\n\t"
- "fld.p %0, (6*8), fp12\n\t"
- "fld.p %0, (7*8), fp14\n\t"
- "fld.p %0, (8*8), fp16\n\t"
- "fld.p %0, (9*8), fp18\n\t"
- "fld.p %0, (10*8), fp20\n\t"
- "fld.p %0, (11*8), fp22\n\t"
- "fld.p %0, (12*8), fp24\n\t"
- "fld.p %0, (13*8), fp26\n\t"
- "fld.p %0, (14*8), fp28\n\t"
- "fld.p %0, (15*8), fp30\n\t"
- "fld.p %0, (16*8), fp32\n\t"
- "fld.p %0, (17*8), fp34\n\t"
- "fld.p %0, (18*8), fp36\n\t"
- "fld.p %0, (19*8), fp38\n\t"
- "fld.p %0, (20*8), fp40\n\t"
- "fld.p %0, (21*8), fp42\n\t"
- "fld.p %0, (22*8), fp44\n\t"
- "fld.p %0, (23*8), fp46\n\t"
- "fld.p %0, (24*8), fp48\n\t"
- "fld.p %0, (25*8), fp50\n\t"
- "fld.p %0, (26*8), fp52\n\t"
- "fld.p %0, (27*8), fp54\n\t"
- "fld.p %0, (28*8), fp56\n\t"
- "fld.p %0, (29*8), fp58\n\t"
- "fld.p %0, (30*8), fp60\n\t"
-
- "fld.s %0, (32*8), fr63\n\t"
- "fputscr fr63\n\t"
-
- "fld.p %0, (31*8), fp62\n\t"
- : /* no output */
- : "r" (&tsk->thread.xstate->hardfpu)
- : "memory");
-}
-
-asmlinkage void do_fpu_error(unsigned long ex, struct pt_regs *regs)
-{
- regs->pc += 4;
-
- force_sig(SIGFPE);
-}
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
deleted file mode 100644
index 947250188065..000000000000
--- a/arch/sh/kernel/cpu/sh5/probe.c
+++ /dev/null
@@ -1,72 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/sh5/probe.c
- *
- * CPU Subtype Probing for SH-5.
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2007 Paul Mundt
- */
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/string.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/tlb.h>
-
-void cpu_probe(void)
-{
- unsigned long long cir;
-
- /*
- * Do peeks in real mode to avoid having to set up a mapping for
- * the WPC registers. On SH5-101 cut2, such a mapping would be
- * exposed to an address translation erratum which would make it
- * hard to set up correctly.
- */
- cir = peek_real_address_q(0x0d000008);
- if ((cir & 0xffff) == 0x5103)
- boot_cpu_data.type = CPU_SH5_103;
- else if (((cir >> 32) & 0xffff) == 0x51e2)
- /* CPU.VCR aliased at CIR address on SH5-101 */
- boot_cpu_data.type = CPU_SH5_101;
-
- boot_cpu_data.family = CPU_FAMILY_SH5;
-
- /*
- * First, setup some sane values for the I-cache.
- */
- boot_cpu_data.icache.ways = 4;
- boot_cpu_data.icache.sets = 256;
- boot_cpu_data.icache.linesz = L1_CACHE_BYTES;
- boot_cpu_data.icache.way_incr = (1 << 13);
- boot_cpu_data.icache.entry_shift = 5;
- boot_cpu_data.icache.way_size = boot_cpu_data.icache.sets *
- boot_cpu_data.icache.linesz;
- boot_cpu_data.icache.entry_mask = 0x1fe0;
- boot_cpu_data.icache.flags = 0;
-
- /*
- * Next, setup some sane values for the D-cache.
- *
- * On the SH5, these are pretty consistent with the I-cache settings,
- * so we just copy over the existing definitions.. these can be fixed
- * up later, especially if we add runtime CPU probing.
- *
- * Though in the meantime it saves us from having to duplicate all of
- * the above definitions..
- */
- boot_cpu_data.dcache = boot_cpu_data.icache;
-
- /*
- * Setup any cache-related flags here
- */
-#if defined(CONFIG_CACHE_WRITETHROUGH)
- set_bit(SH_CACHE_MODE_WT, &(boot_cpu_data.dcache.flags));
-#elif defined(CONFIG_CACHE_WRITEBACK)
- set_bit(SH_CACHE_MODE_WB, &(boot_cpu_data.dcache.flags));
-#endif
-
- /* Setup some I/D TLB defaults */
- sh64_tlb_init();
-}
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c
deleted file mode 100644
index dc8476d67244..000000000000
--- a/arch/sh/kernel/cpu/sh5/setup-sh5.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SH5-101/SH5-103 CPU Setup
- *
- * Copyright (C) 2009 Paul Mundt
- */
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial.h>
-#include <linux/serial_sci.h>
-#include <linux/io.h>
-#include <linux/mm.h>
-#include <linux/sh_timer.h>
-#include <asm/addrspace.h>
-#include <asm/platform_early.h>
-
-static struct plat_sci_port scif0_platform_data = {
- .flags = UPF_IOREMAP,
- .scscr = SCSCR_REIE,
- .type = PORT_SCIF,
-};
-
-static struct resource scif0_resources[] = {
- DEFINE_RES_MEM(PHYS_PERIPHERAL_BLOCK + 0x01030000, 0x100),
- DEFINE_RES_IRQ(39),
- DEFINE_RES_IRQ(40),
- DEFINE_RES_IRQ(42),
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .resource = scif0_resources,
- .num_resources = ARRAY_SIZE(scif0_resources),
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct resource rtc_resources[] = {
- [0] = {
- .start = PHYS_PERIPHERAL_BLOCK + 0x01040000,
- .end = PHYS_PERIPHERAL_BLOCK + 0x01040000 + 0x58 - 1,
- .flags = IORESOURCE_IO,
- },
- [1] = {
- /* Period IRQ */
- .start = IRQ_PRI,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- /* Carry IRQ */
- .start = IRQ_CUI,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- /* Alarm IRQ */
- .start = IRQ_ATI,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device rtc_device = {
- .name = "sh-rtc",
- .id = -1,
- .num_resources = ARRAY_SIZE(rtc_resources),
- .resource = rtc_resources,
-};
-
-#define TMU_BLOCK_OFF 0x01020000
-#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
-
-static struct sh_timer_config tmu0_platform_data = {
- .channels_mask = 7,
-};
-
-static struct resource tmu0_resources[] = {
- DEFINE_RES_MEM(TMU_BASE, 0x30),
- DEFINE_RES_IRQ(IRQ_TUNI0),
- DEFINE_RES_IRQ(IRQ_TUNI1),
- DEFINE_RES_IRQ(IRQ_TUNI2),
-};
-
-static struct platform_device tmu0_device = {
- .name = "sh-tmu",
- .id = 0,
- .dev = {
- .platform_data = &tmu0_platform_data,
- },
- .resource = tmu0_resources,
- .num_resources = ARRAY_SIZE(tmu0_resources),
-};
-
-static struct platform_device *sh5_early_devices[] __initdata = {
- &scif0_device,
- &tmu0_device,
-};
-
-static struct platform_device *sh5_devices[] __initdata = {
- &rtc_device,
-};
-
-static int __init sh5_devices_setup(void)
-{
- int ret;
-
- ret = platform_add_devices(sh5_early_devices,
- ARRAY_SIZE(sh5_early_devices));
- if (unlikely(ret != 0))
- return ret;
-
- return platform_add_devices(sh5_devices,
- ARRAY_SIZE(sh5_devices));
-}
-arch_initcall(sh5_devices_setup);
-
-void __init plat_early_device_setup(void)
-{
- sh_early_platform_add_devices(sh5_early_devices,
- ARRAY_SIZE(sh5_early_devices));
-}
diff --git a/arch/sh/kernel/cpu/sh5/switchto.S b/arch/sh/kernel/cpu/sh5/switchto.S
deleted file mode 100644
index d1beff755632..000000000000
--- a/arch/sh/kernel/cpu/sh5/switchto.S
+++ /dev/null
@@ -1,195 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * arch/sh/kernel/cpu/sh5/switchto.S
- *
- * sh64 context switch
- *
- * Copyright (C) 2004 Richard Curnow
-*/
-
- .section .text..SHmedia32,"ax"
- .little
-
- .balign 32
-
- .type sh64_switch_to,@function
- .global sh64_switch_to
- .global __sh64_switch_to_end
-sh64_switch_to:
-
-/* Incoming args
- r2 - prev
- r3 - &prev->thread
- r4 - next
- r5 - &next->thread
-
- Outgoing results
- r2 - last (=prev) : this just stays in r2 throughout
-
- Want to create a full (struct pt_regs) on the stack to allow backtracing
- functions to work. However, we only need to populate the callee-save
- register slots in this structure; since we're a function our ancestors must
- have themselves preserved all caller saved state in the stack. This saves
- some wasted effort since we won't need to look at the values.
-
- In particular, all caller-save registers are immediately available for
- scratch use.
-
-*/
-
-#define FRAME_SIZE (76*8 + 8)
-
- movi FRAME_SIZE, r0
- sub.l r15, r0, r15
- ! Do normal-style register save to support backtrace
-
- st.l r15, 0, r18 ! save link reg
- st.l r15, 4, r14 ! save fp
- add.l r15, r63, r14 ! setup frame pointer
-
- ! hopefully this looks normal to the backtrace now.
-
- addi.l r15, 8, r1 ! base of pt_regs
- addi.l r1, 24, r0 ! base of pt_regs.regs
- addi.l r0, (63*8), r8 ! base of pt_regs.trregs
-
- /* Note : to be fixed?
- struct pt_regs is really designed for holding the state on entry
- to an exception, i.e. pc,sr,regs etc. However, for the context
- switch state, some of this is not required. But the unwinder takes
- struct pt_regs * as an arg so we have to build this structure
- to allow unwinding switched tasks in show_state() */
-
- st.q r0, ( 9*8), r9
- st.q r0, (10*8), r10
- st.q r0, (11*8), r11
- st.q r0, (12*8), r12
- st.q r0, (13*8), r13
- st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
- ! the point where the process is left in suspended animation, i.e. current
- ! fp here, not the saved one.
- st.q r0, (16*8), r16
-
- st.q r0, (24*8), r24
- st.q r0, (25*8), r25
- st.q r0, (26*8), r26
- st.q r0, (27*8), r27
- st.q r0, (28*8), r28
- st.q r0, (29*8), r29
- st.q r0, (30*8), r30
- st.q r0, (31*8), r31
- st.q r0, (32*8), r32
- st.q r0, (33*8), r33
- st.q r0, (34*8), r34
- st.q r0, (35*8), r35
-
- st.q r0, (44*8), r44
- st.q r0, (45*8), r45
- st.q r0, (46*8), r46
- st.q r0, (47*8), r47
- st.q r0, (48*8), r48
- st.q r0, (49*8), r49
- st.q r0, (50*8), r50
- st.q r0, (51*8), r51
- st.q r0, (52*8), r52
- st.q r0, (53*8), r53
- st.q r0, (54*8), r54
- st.q r0, (55*8), r55
- st.q r0, (56*8), r56
- st.q r0, (57*8), r57
- st.q r0, (58*8), r58
- st.q r0, (59*8), r59
-
- ! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
- ! Use a local label to avoid creating a symbol that will confuse the !
- ! backtrace
- pta .Lsave_pc, tr0
-
- gettr tr5, r45
- gettr tr6, r46
- gettr tr7, r47
- st.q r8, (5*8), r45
- st.q r8, (6*8), r46
- st.q r8, (7*8), r47
-
- ! Now switch context
- gettr tr0, r9
- st.l r3, 0, r15 ! prev->thread.sp
- st.l r3, 8, r1 ! prev->thread.kregs
- st.l r3, 4, r9 ! prev->thread.pc
- st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
-
- ! Load PC for next task (init value or save_pc later)
- ld.l r5, 4, r18 ! next->thread.pc
- ! Switch stacks
- ld.l r5, 0, r15 ! next->thread.sp
- ptabs r18, tr0
-
- ! Update current
- ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
- putcon r9, kcr0 ! current = next->thread_info
-
- ! go to save_pc for a reschedule, or the initial thread.pc for a new process
- blink tr0, r63
-
- ! Restore (when we come back to a previously saved task)
-.Lsave_pc:
- addi.l r15, 32, r0 ! r0 = next's regs
- addi.l r0, (63*8), r8 ! r8 = next's tr_regs
-
- ld.q r8, (5*8), r45
- ld.q r8, (6*8), r46
- ld.q r8, (7*8), r47
- ptabs r45, tr5
- ptabs r46, tr6
- ptabs r47, tr7
-
- ld.q r0, ( 9*8), r9
- ld.q r0, (10*8), r10
- ld.q r0, (11*8), r11
- ld.q r0, (12*8), r12
- ld.q r0, (13*8), r13
- ld.q r0, (14*8), r14
- ld.q r0, (16*8), r16
-
- ld.q r0, (24*8), r24
- ld.q r0, (25*8), r25
- ld.q r0, (26*8), r26
- ld.q r0, (27*8), r27
- ld.q r0, (28*8), r28
- ld.q r0, (29*8), r29
- ld.q r0, (30*8), r30
- ld.q r0, (31*8), r31
- ld.q r0, (32*8), r32
- ld.q r0, (33*8), r33
- ld.q r0, (34*8), r34
- ld.q r0, (35*8), r35
-
- ld.q r0, (44*8), r44
- ld.q r0, (45*8), r45
- ld.q r0, (46*8), r46
- ld.q r0, (47*8), r47
- ld.q r0, (48*8), r48
- ld.q r0, (49*8), r49
- ld.q r0, (50*8), r50
- ld.q r0, (51*8), r51
- ld.q r0, (52*8), r52
- ld.q r0, (53*8), r53
- ld.q r0, (54*8), r54
- ld.q r0, (55*8), r55
- ld.q r0, (56*8), r56
- ld.q r0, (57*8), r57
- ld.q r0, (58*8), r58
- ld.q r0, (59*8), r59
-
- ! epilogue
- ld.l r15, 0, r18
- ld.l r15, 4, r14
- ptabs r18, tr0
- movi FRAME_SIZE, r0
- add r15, r0, r15
- blink tr0, r63
-__sh64_switch_to_end:
-.LFE1:
- .size sh64_switch_to,.LFE1-sh64_switch_to
-
diff --git a/arch/sh/kernel/cpu/sh5/unwind.c b/arch/sh/kernel/cpu/sh5/unwind.c
deleted file mode 100644
index 3cb0cd9cea29..000000000000
--- a/arch/sh/kernel/cpu/sh5/unwind.c
+++ /dev/null
@@ -1,342 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/sh5/unwind.c
- *
- * Copyright (C) 2004 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#include <linux/kallsyms.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <asm/page.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/io.h>
-#include <asm/unwinder.h>
-#include <asm/stacktrace.h>
-
-static u8 regcache[63];
-
-/*
- * Finding the previous stack frame isn't horribly straightforward as it is
- * on some other platforms. In the sh64 case, we don't have "linked" stack
- * frames, so we need to do a bit of work to determine the previous frame,
- * and in turn, the previous r14/r18 pair.
- *
- * There are generally a few cases which determine where we can find out
- * the r14/r18 values. In the general case, this can be determined by poking
- * around the prologue of the symbol PC is in (note that we absolutely must
- * have frame pointer support as well as the kernel symbol table mapped,
- * otherwise we can't even get this far).
- *
- * In other cases, such as the interrupt/exception path, we can poke around
- * the sp/fp.
- *
- * Notably, this entire approach is somewhat error prone, and in the event
- * that the previous frame cannot be determined, that's all we can do.
- * Either way, this still leaves us with a more correct backtrace then what
- * we would be able to come up with by walking the stack (which is garbage
- * for anything beyond the first frame).
- * -- PFM.
- */
-static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
- unsigned long *pprev_fp, unsigned long *pprev_pc,
- struct pt_regs *regs)
-{
- const char *sym;
- char namebuf[128];
- unsigned long offset;
- unsigned long prologue = 0;
- unsigned long fp_displacement = 0;
- unsigned long fp_prev = 0;
- unsigned long offset_r14 = 0, offset_r18 = 0;
- int i, found_prologue_end = 0;
-
- sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
- if (!sym)
- return -EINVAL;
-
- prologue = pc - offset;
- if (!prologue)
- return -EINVAL;
-
- /* Validate fp, to avoid risk of dereferencing a bad pointer later.
- Assume 128Mb since that's the amount of RAM on a Cayman. Modify
- when there is an SH-5 board with more. */
- if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
- (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
- ((fp & 7) != 0)) {
- return -EINVAL;
- }
-
- /*
- * Depth to walk, depth is completely arbitrary.
- */
- for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
- unsigned long op;
- u8 major, minor;
- u8 src, dest, disp;
-
- op = *(unsigned long *)prologue;
-
- major = (op >> 26) & 0x3f;
- src = (op >> 20) & 0x3f;
- minor = (op >> 16) & 0xf;
- disp = (op >> 10) & 0x3f;
- dest = (op >> 4) & 0x3f;
-
- /*
- * Stack frame creation happens in a number of ways.. in the
- * general case when the stack frame is less than 511 bytes,
- * it's generally created by an addi or addi.l:
- *
- * addi/addi.l r15, -FRAME_SIZE, r15
- *
- * in the event that the frame size is bigger than this, it's
- * typically created using a movi/sub pair as follows:
- *
- * movi FRAME_SIZE, rX
- * sub r15, rX, r15
- */
-
- switch (major) {
- case (0x00 >> 2):
- switch (minor) {
- case 0x8: /* add.l */
- case 0x9: /* add */
- /* Look for r15, r63, r14 */
- if (src == 15 && disp == 63 && dest == 14)
- found_prologue_end = 1;
-
- break;
- case 0xa: /* sub.l */
- case 0xb: /* sub */
- if (src != 15 || dest != 15)
- continue;
-
- fp_displacement -= regcache[disp];
- fp_prev = fp - fp_displacement;
- break;
- }
- break;
- case (0xa8 >> 2): /* st.l */
- if (src != 15)
- continue;
-
- switch (dest) {
- case 14:
- if (offset_r14 || fp_displacement == 0)
- continue;
-
- offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
- offset_r14 *= sizeof(unsigned long);
- offset_r14 += fp_displacement;
- break;
- case 18:
- if (offset_r18 || fp_displacement == 0)
- continue;
-
- offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
- offset_r18 *= sizeof(unsigned long);
- offset_r18 += fp_displacement;
- break;
- }
-
- break;
- case (0xcc >> 2): /* movi */
- if (dest >= 63) {
- printk(KERN_NOTICE "%s: Invalid dest reg %d "
- "specified in movi handler. Failed "
- "opcode was 0x%lx: ", __func__,
- dest, op);
-
- continue;
- }
-
- /* Sign extend */
- regcache[dest] =
- sign_extend64((((u64)op >> 10) & 0xffff), 9);
- break;
- case (0xd0 >> 2): /* addi */
- case (0xd4 >> 2): /* addi.l */
- /* Look for r15, -FRAME_SIZE, r15 */
- if (src != 15 || dest != 15)
- continue;
-
- /* Sign extended frame size.. */
- fp_displacement +=
- (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
- fp_prev = fp - fp_displacement;
- break;
- }
-
- if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
- break;
- }
-
- if (offset_r14 == 0 || fp_prev == 0) {
- if (!offset_r14)
- pr_debug("Unable to find r14 offset\n");
- if (!fp_prev)
- pr_debug("Unable to find previous fp\n");
-
- return -EINVAL;
- }
-
- /* For innermost leaf function, there might not be a offset_r18 */
- if (!*pprev_pc && (offset_r18 == 0))
- return -EINVAL;
-
- *pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
-
- if (offset_r18)
- *pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
-
- *pprev_pc &= ~1;
-
- return 0;
-}
-
-/*
- * Don't put this on the stack since we'll want to call in to
- * sh64_unwinder_dump() when we're close to underflowing the stack
- * anyway.
- */
-static struct pt_regs here_regs;
-
-extern const char syscall_ret;
-extern const char ret_from_syscall;
-extern const char ret_from_exception;
-extern const char ret_from_irq;
-
-static void sh64_unwind_inner(const struct stacktrace_ops *ops,
- void *data, struct pt_regs *regs);
-
-static inline void unwind_nested(const struct stacktrace_ops *ops, void *data,
- unsigned long pc, unsigned long fp)
-{
- if ((fp >= __MEMORY_START) &&
- ((fp & 7) == 0))
- sh64_unwind_inner(ops, data, (struct pt_regs *)fp);
-}
-
-static void sh64_unwind_inner(const struct stacktrace_ops *ops,
- void *data, struct pt_regs *regs)
-{
- unsigned long pc, fp;
- int ofs = 0;
- int first_pass;
-
- pc = regs->pc & ~1;
- fp = regs->regs[14];
-
- first_pass = 1;
- for (;;) {
- int cond;
- unsigned long next_fp, next_pc;
-
- if (pc == ((unsigned long)&syscall_ret & ~1)) {
- printk("SYSCALL\n");
- unwind_nested(ops, data, pc, fp);
- return;
- }
-
- if (pc == ((unsigned long)&ret_from_syscall & ~1)) {
- printk("SYSCALL (PREEMPTED)\n");
- unwind_nested(ops, data, pc, fp);
- return;
- }
-
- /* In this case, the PC is discovered by lookup_prev_stack_frame but
- it has 4 taken off it to look like the 'caller' */
- if (pc == ((unsigned long)&ret_from_exception & ~1)) {
- printk("EXCEPTION\n");
- unwind_nested(ops, data, pc, fp);
- return;
- }
-
- if (pc == ((unsigned long)&ret_from_irq & ~1)) {
- printk("IRQ\n");
- unwind_nested(ops, data, pc, fp);
- return;
- }
-
- cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
- ((pc & 3) == 0) && ((fp & 7) == 0));
-
- pc -= ofs;
-
- ops->address(data, pc, 1);
-
- if (first_pass) {
- /* If the innermost frame is a leaf function, it's
- * possible that r18 is never saved out to the stack.
- */
- next_pc = regs->regs[18];
- } else {
- next_pc = 0;
- }
-
- if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
- ofs = sizeof(unsigned long);
- pc = next_pc & ~1;
- fp = next_fp;
- } else {
- printk("Unable to lookup previous stack frame\n");
- break;
- }
- first_pass = 0;
- }
-
- printk("\n");
-}
-
-static void sh64_unwinder_dump(struct task_struct *task,
- struct pt_regs *regs,
- unsigned long *sp,
- const struct stacktrace_ops *ops,
- void *data)
-{
- if (!regs) {
- /*
- * Fetch current regs if we have no other saved state to back
- * trace from.
- */
- regs = &here_regs;
-
- __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
- __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
- __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
-
- __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
- __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
- __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
- __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
- __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
- __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
- __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
- __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
-
- __asm__ __volatile__ (
- "pta 0f, tr0\n\t"
- "blink tr0, %0\n\t"
- "0: nop"
- : "=r" (regs->pc)
- );
- }
-
- sh64_unwind_inner(ops, data, regs);
-}
-
-static struct unwinder sh64_unwinder = {
- .name = "sh64-unwinder",
- .dump = sh64_unwinder_dump,
- .rating = 150,
-};
-
-static int __init sh64_unwinder_init(void)
-{
- return unwinder_register(&sh64_unwinder);
-}
-early_initcall(sh64_unwinder_init);
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
index 9f1c9c11d62d..a13c045804ed 100644
--- a/arch/sh/kernel/dumpstack.c
+++ b/arch/sh/kernel/dumpstack.c
@@ -16,36 +16,37 @@
#include <asm/unwinder.h>
#include <asm/stacktrace.h>
-void dump_mem(const char *str, unsigned long bottom, unsigned long top)
+void dump_mem(const char *str, const char *loglvl,
+ unsigned long bottom, unsigned long top)
{
unsigned long p;
int i;
- printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
+ printk("%s%s(0x%08lx to 0x%08lx)\n", loglvl, str, bottom, top);
for (p = bottom & ~31; p < top; ) {
- printk("%04lx: ", p & 0xffff);
+ printk("%s%04lx: ", loglvl, p & 0xffff);
for (i = 0; i < 8; i++, p += 4) {
unsigned int val;
if (p < bottom || p >= top)
- printk(" ");
+ printk("%s ", loglvl);
else {
if (__get_user(val, (unsigned int __user *)p)) {
- printk("\n");
+ printk("%s\n", loglvl);
return;
}
- printk("%08x ", val);
+ printk("%s%08x ", loglvl, val);
}
}
- printk("\n");
+ printk("%s\n", loglvl);
}
}
-void printk_address(unsigned long address, int reliable)
+void printk_address(unsigned long address, int reliable, const char *loglvl)
{
- printk(" [<%p>] %s%pS\n", (void *) address,
+ printk("%s [<%p>] %s%pS\n", loglvl, (void *) address,
reliable ? "" : "? ", (void *) address);
}
@@ -117,8 +118,7 @@ static int print_trace_stack(void *data, char *name)
*/
static void print_trace_address(void *data, unsigned long addr, int reliable)
{
- printk("%s", (char *)data);
- printk_address(addr, reliable);
+ printk_address(addr, reliable, (char *)data);
}
static const struct stacktrace_ops print_trace_ops = {
@@ -127,16 +127,16 @@ static const struct stacktrace_ops print_trace_ops = {
};
void show_trace(struct task_struct *tsk, unsigned long *sp,
- struct pt_regs *regs)
+ struct pt_regs *regs, const char *loglvl)
{
if (regs && user_mode(regs))
return;
- printk("\nCall trace:\n");
+ printk("%s\nCall trace:\n", loglvl);
- unwind_stack(tsk, regs, sp, &print_trace_ops, "");
+ unwind_stack(tsk, regs, sp, &print_trace_ops, (void *)loglvl);
- printk("\n");
+ printk("%s\n", loglvl);
if (!tsk)
tsk = current;
@@ -144,7 +144,7 @@ void show_trace(struct task_struct *tsk, unsigned long *sp,
debug_show_held_locks(tsk);
}
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
unsigned long stack;
@@ -156,7 +156,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
sp = (unsigned long *)tsk->thread.sp;
stack = (unsigned long)sp;
- dump_mem("Stack: ", stack, THREAD_SIZE +
+ dump_mem("Stack: ", loglvl, stack, THREAD_SIZE +
(unsigned long)task_stack_page(tsk));
- show_trace(tsk, sp, NULL);
+ show_trace(tsk, sp, NULL, loglvl);
}
diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S
deleted file mode 100644
index 67685e1f00e1..000000000000
--- a/arch/sh/kernel/head_64.S
+++ /dev/null
@@ -1,346 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * arch/sh/kernel/head_64.S
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003, 2004 Paul Mundt
- */
-
-#include <linux/init.h>
-
-#include <asm/page.h>
-#include <asm/cache.h>
-#include <asm/tlb.h>
-#include <cpu/registers.h>
-#include <cpu/mmu_context.h>
-#include <asm/thread_info.h>
-
-/*
- * MMU defines: TLB boundaries.
- */
-
-#define MMUIR_FIRST ITLB_FIXED
-#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
-#define MMUIR_STEP TLB_STEP
-
-#define MMUDR_FIRST DTLB_FIXED
-#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
-#define MMUDR_STEP TLB_STEP
-
-/* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */
-#if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))
-#error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"
-#endif
-
-/*
- * MMU defines: Fixed TLBs.
- */
-/* Deal safely with the case where the base of RAM is not 512Mb aligned */
-
-#define ALIGN_512M_MASK (0xffffffffe0000000)
-#define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
-#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
-
-#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)
- /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
-
-#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)
- /* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
-
-#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
- /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
-#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
- /* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
-
-#ifdef CONFIG_CACHE_OFF
-#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */
-#else
-#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
-#endif
-#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
-
-#if defined (CONFIG_CACHE_OFF)
-#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */
-#elif defined (CONFIG_CACHE_WRITETHROUGH)
-#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */
- /* WT, invalidate */
-#elif defined (CONFIG_CACHE_WRITEBACK)
-#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */
- /* WB, invalidate */
-#else
-#error preprocessor flag CONFIG_CACHE_... not recognized!
-#endif
-
-#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
-
- .section .empty_zero_page, "aw"
- .global empty_zero_page
-
-empty_zero_page:
- .long 1 /* MOUNT_ROOT_RDONLY */
- .long 0 /* RAMDISK_FLAGS */
- .long 0x0200 /* ORIG_ROOT_DEV */
- .long 1 /* LOADER_TYPE */
- .long 0x00800000 /* INITRD_START */
- .long 0x00800000 /* INITRD_SIZE */
- .long 0
-
- .text
- .balign 4096,0,4096
-
- .section .data, "aw"
- .balign PAGE_SIZE
-
- .section .data, "aw"
- .balign PAGE_SIZE
-
- .global mmu_pdtp_cache
-mmu_pdtp_cache:
- .space PAGE_SIZE, 0
-
- .global fpu_in_use
-fpu_in_use: .quad 0
-
-
- __HEAD
- .balign L1_CACHE_BYTES
-/*
- * Condition at the entry of __stext:
- * . Reset state:
- * . SR.FD = 1 (FPU disabled)
- * . SR.BL = 1 (Exceptions disabled)
- * . SR.MD = 1 (Privileged Mode)
- * . SR.MMU = 0 (MMU Disabled)
- * . SR.CD = 0 (CTC User Visible)
- * . SR.IMASK = Undefined (Interrupt Mask)
- *
- * Operations supposed to be performed by __stext:
- * . prevent speculative fetch onto device memory while MMU is off
- * . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
- * . first, save CPU state and set it to something harmless
- * . any CPU detection and/or endianness settings (?)
- * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
- * . set initial TLB entries for cached and uncached regions
- * (no fine granularity paging)
- * . set initial cache state
- * . enable MMU and caches
- * . set CPU to a consistent state
- * . registers (including stack pointer and current/KCR0)
- * . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
- * at this stage. This is all to later Linux initialization steps.
- * . initialize FPU
- * . clear BSS
- * . jump into start_kernel()
- * . be prepared to hopeless start_kernel() returns.
- *
- */
- .global _stext
-_stext:
- /*
- * Prevent speculative fetch on device memory due to
- * uninitialized target registers.
- */
- ptabs/u ZERO, tr0
- ptabs/u ZERO, tr1
- ptabs/u ZERO, tr2
- ptabs/u ZERO, tr3
- ptabs/u ZERO, tr4
- ptabs/u ZERO, tr5
- ptabs/u ZERO, tr6
- ptabs/u ZERO, tr7
- synci
-
- /*
- * Read/Set CPU state. After this block:
- * r29 = Initial SR
- */
- getcon SR, r29
- movi SR_HARMLESS, r20
- putcon r20, SR
-
- /*
- * Initialize EMI/LMI. To Be Done.
- */
-
- /*
- * CPU detection and/or endianness settings (?). To Be Done.
- * Pure PIC code here, please ! Just save state into r30.
- * After this block:
- * r30 = CPU type/Platform Endianness
- */
-
- /*
- * Set initial TLB entries for cached and uncached regions.
- * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
- */
- /* Clear ITLBs */
- pta clear_ITLB, tr1
- movi MMUIR_FIRST, r21
- movi MMUIR_END, r22
-clear_ITLB:
- putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */
- addi r21, MMUIR_STEP, r21
- bne r21, r22, tr1
-
- /* Clear DTLBs */
- pta clear_DTLB, tr1
- movi MMUDR_FIRST, r21
- movi MMUDR_END, r22
-clear_DTLB:
- putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */
- addi r21, MMUDR_STEP, r21
- bne r21, r22, tr1
-
- /* Map one big (512Mb) page for ITLB */
- movi MMUIR_FIRST, r21
- movi MMUIR_TEXT_L, r22 /* PTEL first */
- add.l r22, r63, r22 /* Sign extend */
- putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
- movi MMUIR_TEXT_H, r22 /* PTEH last */
- add.l r22, r63, r22 /* Sign extend */
- putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
-
- /* Map one big CACHED (512Mb) page for DTLB */
- movi MMUDR_FIRST, r21
- movi MMUDR_CACHED_L, r22 /* PTEL first */
- add.l r22, r63, r22 /* Sign extend */
- putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
- movi MMUDR_CACHED_H, r22 /* PTEH last */
- add.l r22, r63, r22 /* Sign extend */
- putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
-
- /*
- * Setup a DTLB translation for SCIF phys.
- */
- addi r21, MMUDR_STEP, r21
- movi 0x0a03, r22 /* SCIF phys */
- shori 0x0148, r22
- putcfg r21, 1, r22 /* PTEL first */
- movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
- shori 0x0003, r22
- putcfg r21, 0, r22 /* PTEH last */
-
- /*
- * Set cache behaviours.
- */
- /* ICache */
- movi ICCR_BASE, r21
- movi ICCR0_INIT_VAL, r22
- movi ICCR1_INIT_VAL, r23
- putcfg r21, ICCR_REG0, r22
- putcfg r21, ICCR_REG1, r23
-
- /* OCache */
- movi OCCR_BASE, r21
- movi OCCR0_INIT_VAL, r22
- movi OCCR1_INIT_VAL, r23
- putcfg r21, OCCR_REG0, r22
- putcfg r21, OCCR_REG1, r23
-
-
- /*
- * Enable Caches and MMU. Do the first non-PIC jump.
- * Now head.S global variables, constants and externs
- * can be used.
- */
- getcon SR, r21
- movi SR_ENABLE_MMU, r22
- or r21, r22, r21
- putcon r21, SSR
- movi hyperspace, r22
- ori r22, 1, r22 /* Make it SHmedia, not required but..*/
- putcon r22, SPC
- synco
- rte /* And now go into the hyperspace ... */
-hyperspace: /* ... that's the next instruction ! */
-
- /*
- * Set CPU to a consistent state.
- * r31 = FPU support flag
- * tr0/tr7 in use. Others give a chance to loop somewhere safe
- */
- movi start_kernel, r32
- ori r32, 1, r32
-
- ptabs r32, tr0 /* r32 = _start_kernel address */
- pta/u hopeless, tr1
- pta/u hopeless, tr2
- pta/u hopeless, tr3
- pta/u hopeless, tr4
- pta/u hopeless, tr5
- pta/u hopeless, tr6
- pta/u hopeless, tr7
- gettr tr1, r28 /* r28 = hopeless address */
-
- /* Set initial stack pointer */
- movi init_thread_union, SP
- putcon SP, KCR0 /* Set current to init_task */
- movi THREAD_SIZE, r22 /* Point to the end */
- add SP, r22, SP
-
- /*
- * Initialize FPU.
- * Keep FPU flag in r31. After this block:
- * r31 = FPU flag
- */
- movi fpu_in_use, r31 /* Temporary */
-
-#ifdef CONFIG_SH_FPU
- getcon SR, r21
- movi SR_ENABLE_FPU, r22
- and r21, r22, r22
- putcon r22, SR /* Try to enable */
- getcon SR, r22
- xor r21, r22, r21
- shlri r21, 15, r21 /* Supposedly 0/1 */
- st.q r31, 0 , r21 /* Set fpu_in_use */
-#else
- movi 0, r21
- st.q r31, 0 , r21 /* Set fpu_in_use */
-#endif
- or r21, ZERO, r31 /* Set FPU flag at last */
-
-#ifndef CONFIG_SH_NO_BSS_INIT
-/* Don't clear BSS if running on slow platforms such as an RTL simulation,
- remote memory via SHdebug link, etc. For these the memory can be guaranteed
- to be all zero on boot anyway. */
- /*
- * Clear bss
- */
- pta clear_quad, tr1
- movi __bss_start, r22
- movi _end, r23
-clear_quad:
- st.q r22, 0, ZERO
- addi r22, 8, r22
- bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */
-#endif
- pta/u hopeless, tr1
-
- /* Say bye to head.S but be prepared to wrongly get back ... */
- blink tr0, LINK
-
- /* If we ever get back here through LINK/tr1-tr7 */
- pta/u hopeless, tr7
-
-hopeless:
- /*
- * Something's badly wrong here. Loop endlessly,
- * there's nothing more we can do about it.
- *
- * Note on hopeless: it can be jumped into invariably
- * before or after jumping into hyperspace. The only
- * requirement is to be PIC called (PTA) before and
- * any way (PTA/PTABS) after. According to Virtual
- * to Physical mapping a simulator/emulator can easily
- * tell where we came here from just looking at hopeless
- * (PC) address.
- *
- * For debugging purposes:
- * (r28) hopeless/loop address
- * (r29) Original SR
- * (r30) CPU type/Platform endianness
- * (r31) FPU Support
- * (r32) _start_kernel address
- */
- blink tr7, ZERO
diff --git a/arch/sh/kernel/irq_64.c b/arch/sh/kernel/irq_64.c
deleted file mode 100644
index 7a1f50435e33..000000000000
--- a/arch/sh/kernel/irq_64.c
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SHmedia irqflags support
- *
- * Copyright (C) 2006 - 2009 Paul Mundt
- */
-#include <linux/irqflags.h>
-#include <linux/module.h>
-#include <cpu/registers.h>
-
-void notrace arch_local_irq_restore(unsigned long flags)
-{
- unsigned long long __dummy;
-
- if (flags == ARCH_IRQ_DISABLED) {
- __asm__ __volatile__ (
- "getcon " __SR ", %0\n\t"
- "or %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy)
- : "r" (ARCH_IRQ_DISABLED)
- );
- } else {
- __asm__ __volatile__ (
- "getcon " __SR ", %0\n\t"
- "and %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy)
- : "r" (~ARCH_IRQ_DISABLED)
- );
- }
-}
-EXPORT_SYMBOL(arch_local_irq_restore);
-
-unsigned long notrace arch_local_save_flags(void)
-{
- unsigned long flags;
-
- __asm__ __volatile__ (
- "getcon " __SR ", %0\n\t"
- "and %0, %1, %0"
- : "=&r" (flags)
- : "r" (ARCH_IRQ_DISABLED)
- );
-
- return flags;
-}
-EXPORT_SYMBOL(arch_local_save_flags);
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 63d63a36f6f2..4a98980b8a07 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -14,7 +14,6 @@
#include <linux/ftrace.h>
#include <linux/suspend.h>
#include <linux/memblock.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c
index bbc78d1d618e..b9cee98a754e 100644
--- a/arch/sh/kernel/module.c
+++ b/arch/sh/kernel/module.c
@@ -46,15 +46,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
+ ELF32_R_SYM(rel[i].r_info);
relocation = sym->st_value + rel[i].r_addend;
-#ifdef CONFIG_SUPERH64
- /* For text addresses, bit2 of the st_other field indicates
- * whether the symbol is SHmedia (1) or SHcompact (0). If
- * SHmedia, the LSB of the symbol needs to be asserted
- * for the CPU to be in SHmedia mode when it starts executing
- * the branch target. */
- relocation |= !!(sym->st_other & 4);
-#endif
-
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_SH_NONE:
break;
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 4d1bfc848dd3..169832fcf21b 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -23,9 +23,7 @@ EXPORT_SYMBOL(__stack_chk_guard);
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
-#ifdef CONFIG_SUPERH32
unlazy_fpu(src, task_pt_regs(src));
-#endif
*dst = *src;
if (src->thread.xstate) {
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index a094633874c3..456cc8d171f7 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -59,7 +59,7 @@ void show_regs(struct pt_regs * regs)
printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n",
regs->mach, regs->macl, regs->gbr, regs->pr);
- show_trace(NULL, (unsigned long *)regs->regs[15], regs);
+ show_trace(NULL, (unsigned long *)regs->regs[15], regs, KERN_DEFAULT);
show_code(regs);
}
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
deleted file mode 100644
index c2844a2e18cd..000000000000
--- a/arch/sh/kernel/process_64.c
+++ /dev/null
@@ -1,461 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/process_64.c
- *
- * This file handles the architecture-dependent parts of process handling..
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2007 Paul Mundt
- * Copyright (C) 2003, 2004 Richard Curnow
- *
- * Started from SH3/4 version:
- * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
- *
- * In turn started from i386 version:
- * Copyright (C) 1995 Linus Torvalds
- */
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/ptrace.h>
-#include <linux/reboot.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/sched/debug.h>
-#include <linux/sched/task.h>
-#include <linux/sched/task_stack.h>
-#include <asm/syscalls.h>
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/mmu_context.h>
-#include <asm/fpu.h>
-#include <asm/switch_to.h>
-
-struct task_struct *last_task_used_math = NULL;
-struct pt_regs fake_swapper_regs = { 0, };
-
-void show_regs(struct pt_regs *regs)
-{
- unsigned long long ah, al, bh, bl, ch, cl;
-
- printk("\n");
- show_regs_print_info(KERN_DEFAULT);
-
- ah = (regs->pc) >> 32;
- al = (regs->pc) & 0xffffffff;
- bh = (regs->regs[18]) >> 32;
- bl = (regs->regs[18]) & 0xffffffff;
- ch = (regs->regs[15]) >> 32;
- cl = (regs->regs[15]) & 0xffffffff;
- printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->sr) >> 32;
- al = (regs->sr) & 0xffffffff;
- asm volatile ("getcon " __TEA ", %0" : "=r" (bh));
- asm volatile ("getcon " __TEA ", %0" : "=r" (bl));
- bh = (bh) >> 32;
- bl = (bl) & 0xffffffff;
- asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch));
- asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl));
- ch = (ch) >> 32;
- cl = (cl) & 0xffffffff;
- printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[0]) >> 32;
- al = (regs->regs[0]) & 0xffffffff;
- bh = (regs->regs[1]) >> 32;
- bl = (regs->regs[1]) & 0xffffffff;
- ch = (regs->regs[2]) >> 32;
- cl = (regs->regs[2]) & 0xffffffff;
- printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[3]) >> 32;
- al = (regs->regs[3]) & 0xffffffff;
- bh = (regs->regs[4]) >> 32;
- bl = (regs->regs[4]) & 0xffffffff;
- ch = (regs->regs[5]) >> 32;
- cl = (regs->regs[5]) & 0xffffffff;
- printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[6]) >> 32;
- al = (regs->regs[6]) & 0xffffffff;
- bh = (regs->regs[7]) >> 32;
- bl = (regs->regs[7]) & 0xffffffff;
- ch = (regs->regs[8]) >> 32;
- cl = (regs->regs[8]) & 0xffffffff;
- printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[9]) >> 32;
- al = (regs->regs[9]) & 0xffffffff;
- bh = (regs->regs[10]) >> 32;
- bl = (regs->regs[10]) & 0xffffffff;
- ch = (regs->regs[11]) >> 32;
- cl = (regs->regs[11]) & 0xffffffff;
- printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[12]) >> 32;
- al = (regs->regs[12]) & 0xffffffff;
- bh = (regs->regs[13]) >> 32;
- bl = (regs->regs[13]) & 0xffffffff;
- ch = (regs->regs[14]) >> 32;
- cl = (regs->regs[14]) & 0xffffffff;
- printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[16]) >> 32;
- al = (regs->regs[16]) & 0xffffffff;
- bh = (regs->regs[17]) >> 32;
- bl = (regs->regs[17]) & 0xffffffff;
- ch = (regs->regs[19]) >> 32;
- cl = (regs->regs[19]) & 0xffffffff;
- printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[20]) >> 32;
- al = (regs->regs[20]) & 0xffffffff;
- bh = (regs->regs[21]) >> 32;
- bl = (regs->regs[21]) & 0xffffffff;
- ch = (regs->regs[22]) >> 32;
- cl = (regs->regs[22]) & 0xffffffff;
- printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[23]) >> 32;
- al = (regs->regs[23]) & 0xffffffff;
- bh = (regs->regs[24]) >> 32;
- bl = (regs->regs[24]) & 0xffffffff;
- ch = (regs->regs[25]) >> 32;
- cl = (regs->regs[25]) & 0xffffffff;
- printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[26]) >> 32;
- al = (regs->regs[26]) & 0xffffffff;
- bh = (regs->regs[27]) >> 32;
- bl = (regs->regs[27]) & 0xffffffff;
- ch = (regs->regs[28]) >> 32;
- cl = (regs->regs[28]) & 0xffffffff;
- printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[29]) >> 32;
- al = (regs->regs[29]) & 0xffffffff;
- bh = (regs->regs[30]) >> 32;
- bl = (regs->regs[30]) & 0xffffffff;
- ch = (regs->regs[31]) >> 32;
- cl = (regs->regs[31]) & 0xffffffff;
- printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[32]) >> 32;
- al = (regs->regs[32]) & 0xffffffff;
- bh = (regs->regs[33]) >> 32;
- bl = (regs->regs[33]) & 0xffffffff;
- ch = (regs->regs[34]) >> 32;
- cl = (regs->regs[34]) & 0xffffffff;
- printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[35]) >> 32;
- al = (regs->regs[35]) & 0xffffffff;
- bh = (regs->regs[36]) >> 32;
- bl = (regs->regs[36]) & 0xffffffff;
- ch = (regs->regs[37]) >> 32;
- cl = (regs->regs[37]) & 0xffffffff;
- printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[38]) >> 32;
- al = (regs->regs[38]) & 0xffffffff;
- bh = (regs->regs[39]) >> 32;
- bl = (regs->regs[39]) & 0xffffffff;
- ch = (regs->regs[40]) >> 32;
- cl = (regs->regs[40]) & 0xffffffff;
- printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[41]) >> 32;
- al = (regs->regs[41]) & 0xffffffff;
- bh = (regs->regs[42]) >> 32;
- bl = (regs->regs[42]) & 0xffffffff;
- ch = (regs->regs[43]) >> 32;
- cl = (regs->regs[43]) & 0xffffffff;
- printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[44]) >> 32;
- al = (regs->regs[44]) & 0xffffffff;
- bh = (regs->regs[45]) >> 32;
- bl = (regs->regs[45]) & 0xffffffff;
- ch = (regs->regs[46]) >> 32;
- cl = (regs->regs[46]) & 0xffffffff;
- printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[47]) >> 32;
- al = (regs->regs[47]) & 0xffffffff;
- bh = (regs->regs[48]) >> 32;
- bl = (regs->regs[48]) & 0xffffffff;
- ch = (regs->regs[49]) >> 32;
- cl = (regs->regs[49]) & 0xffffffff;
- printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[50]) >> 32;
- al = (regs->regs[50]) & 0xffffffff;
- bh = (regs->regs[51]) >> 32;
- bl = (regs->regs[51]) & 0xffffffff;
- ch = (regs->regs[52]) >> 32;
- cl = (regs->regs[52]) & 0xffffffff;
- printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[53]) >> 32;
- al = (regs->regs[53]) & 0xffffffff;
- bh = (regs->regs[54]) >> 32;
- bl = (regs->regs[54]) & 0xffffffff;
- ch = (regs->regs[55]) >> 32;
- cl = (regs->regs[55]) & 0xffffffff;
- printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[56]) >> 32;
- al = (regs->regs[56]) & 0xffffffff;
- bh = (regs->regs[57]) >> 32;
- bl = (regs->regs[57]) & 0xffffffff;
- ch = (regs->regs[58]) >> 32;
- cl = (regs->regs[58]) & 0xffffffff;
- printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[59]) >> 32;
- al = (regs->regs[59]) & 0xffffffff;
- bh = (regs->regs[60]) >> 32;
- bl = (regs->regs[60]) & 0xffffffff;
- ch = (regs->regs[61]) >> 32;
- cl = (regs->regs[61]) & 0xffffffff;
- printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[62]) >> 32;
- al = (regs->regs[62]) & 0xffffffff;
- bh = (regs->tregs[0]) >> 32;
- bl = (regs->tregs[0]) & 0xffffffff;
- ch = (regs->tregs[1]) >> 32;
- cl = (regs->tregs[1]) & 0xffffffff;
- printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->tregs[2]) >> 32;
- al = (regs->tregs[2]) & 0xffffffff;
- bh = (regs->tregs[3]) >> 32;
- bl = (regs->tregs[3]) & 0xffffffff;
- ch = (regs->tregs[4]) >> 32;
- cl = (regs->tregs[4]) & 0xffffffff;
- printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->tregs[5]) >> 32;
- al = (regs->tregs[5]) & 0xffffffff;
- bh = (regs->tregs[6]) >> 32;
- bl = (regs->tregs[6]) & 0xffffffff;
- ch = (regs->tregs[7]) >> 32;
- cl = (regs->tregs[7]) & 0xffffffff;
- printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- /*
- * If we're in kernel mode, dump the stack too..
- */
- if (!user_mode(regs)) {
- void show_stack(struct task_struct *tsk, unsigned long *sp);
- unsigned long sp = regs->regs[15] & 0xffffffff;
- struct task_struct *tsk = get_current();
-
- tsk->thread.kregs = regs;
-
- show_stack(tsk, (unsigned long *)sp);
- }
-}
-
-/*
- * Free current thread data structures etc..
- */
-void exit_thread(struct task_struct *tsk)
-{
- /*
- * See arch/sparc/kernel/process.c for the precedent for doing
- * this -- RPC.
- *
- * The SH-5 FPU save/restore approach relies on
- * last_task_used_math pointing to a live task_struct. When
- * another task tries to use the FPU for the 1st time, the FPUDIS
- * trap handling (see arch/sh/kernel/cpu/sh5/fpu.c) will save the
- * existing FPU state to the FP regs field within
- * last_task_used_math before re-loading the new task's FPU state
- * (or initialising it if the FPU has been used before). So if
- * last_task_used_math is stale, and its page has already been
- * re-allocated for another use, the consequences are rather
- * grim. Unless we null it here, there is no other path through
- * which it would get safely nulled.
- */
-#ifdef CONFIG_SH_FPU
- if (last_task_used_math == tsk)
- last_task_used_math = NULL;
-#endif
-}
-
-void flush_thread(void)
-{
-
- /* Called by fs/exec.c (setup_new_exec) to remove traces of a
- * previously running executable. */
-#ifdef CONFIG_SH_FPU
- if (last_task_used_math == current) {
- last_task_used_math = NULL;
- }
- /* Force FPU state to be reinitialised after exec */
- clear_used_math();
-#endif
-
- /* if we are a kernel thread, about to change to user thread,
- * update kreg
- */
- if(current->thread.kregs==&fake_swapper_regs) {
- current->thread.kregs =
- ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
- current->thread.uregs = current->thread.kregs;
- }
-}
-
-void release_thread(struct task_struct *dead_task)
-{
- /* do nothing */
-}
-
-/* Fill in the fpu structure for a core dump.. */
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
-{
-#ifdef CONFIG_SH_FPU
- int fpvalid;
- struct task_struct *tsk = current;
-
- fpvalid = !!tsk_used_math(tsk);
- if (fpvalid) {
- if (current == last_task_used_math) {
- enable_fpu();
- save_fpu(tsk);
- disable_fpu();
- last_task_used_math = 0;
- regs->sr |= SR_FD;
- }
-
- memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu));
- }
-
- return fpvalid;
-#else
- return 0; /* Task didn't use the fpu at all. */
-#endif
-}
-EXPORT_SYMBOL(dump_fpu);
-
-asmlinkage void ret_from_fork(void);
-asmlinkage void ret_from_kernel_thread(void);
-
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long arg, struct task_struct *p)
-{
- struct pt_regs *childregs;
-
-#ifdef CONFIG_SH_FPU
- /* can't happen for a kernel thread */
- if (last_task_used_math == current) {
- enable_fpu();
- save_fpu(current);
- disable_fpu();
- last_task_used_math = NULL;
- current_pt_regs()->sr |= SR_FD;
- }
-#endif
- /* Copy from sh version */
- childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
- p->thread.sp = (unsigned long) childregs;
-
- if (unlikely(p->flags & PF_KTHREAD)) {
- memset(childregs, 0, sizeof(struct pt_regs));
- childregs->regs[2] = (unsigned long)arg;
- childregs->regs[3] = (unsigned long)usp;
- childregs->sr = (1 << 30); /* not user_mode */
- childregs->sr |= SR_FD; /* Invalidate FPU flag */
- p->thread.pc = (unsigned long) ret_from_kernel_thread;
- return 0;
- }
- *childregs = *current_pt_regs();
-
- /*
- * Sign extend the edited stack.
- * Note that thread.pc and thread.pc will stay
- * 32-bit wide and context switch must take care
- * of NEFF sign extension.
- */
- if (usp)
- childregs->regs[15] = neff_sign_extend(usp);
- p->thread.uregs = childregs;
-
- childregs->regs[9] = 0; /* Set return value for child */
- childregs->sr |= SR_FD; /* Invalidate FPU flag */
-
- p->thread.pc = (unsigned long) ret_from_fork;
-
- return 0;
-}
-
-#ifdef CONFIG_FRAME_POINTER
-static int in_sh64_switch_to(unsigned long pc)
-{
- extern char __sh64_switch_to_end;
- /* For a sleeping task, the PC is somewhere in the middle of the function,
- so we don't have to worry about masking the LSB off */
- return (pc >= (unsigned long) sh64_switch_to) &&
- (pc < (unsigned long) &__sh64_switch_to_end);
-}
-#endif
-
-unsigned long get_wchan(struct task_struct *p)
-{
- unsigned long pc;
-
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
-
- /*
- * The same comment as on the Alpha applies here, too ...
- */
- pc = thread_saved_pc(p);
-
-#ifdef CONFIG_FRAME_POINTER
- if (in_sh64_switch_to(pc)) {
- unsigned long schedule_fp;
- unsigned long sh64_switch_to_fp;
- unsigned long schedule_caller_pc;
-
- sh64_switch_to_fp = (long) p->thread.sp;
- /* r14 is saved at offset 4 in the sh64_switch_to frame */
- schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
-
- /* and the caller of 'schedule' is (currently!) saved at offset 24
- in the frame of schedule (from disasm) */
- schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
- return schedule_caller_pc;
- }
-#endif
- return pc;
-}
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index d5052c30a0e9..64bfb714943e 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -25,7 +25,6 @@
#include <linux/regset.h>
#include <linux/hw_breakpoint.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
deleted file mode 100644
index 11085e48eaa6..000000000000
--- a/arch/sh/kernel/ptrace_64.c
+++ /dev/null
@@ -1,576 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/ptrace_64.c
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2008 Paul Mundt
- *
- * Started from SH3/4 version:
- * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
- *
- * Original x86 implementation:
- * By Ross Biro 1/23/92
- * edited by Linus Torvalds
- */
-#include <linux/kernel.h>
-#include <linux/rwsem.h>
-#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/bitops.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/signal.h>
-#include <linux/syscalls.h>
-#include <linux/audit.h>
-#include <linux/seccomp.h>
-#include <linux/tracehook.h>
-#include <linux/elf.h>
-#include <linux/regset.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/mmu_context.h>
-#include <asm/syscalls.h>
-#include <asm/fpu.h>
-#include <asm/traps.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/syscalls.h>
-
-/* This mask defines the bits of the SR which the user is not allowed to
- change, which are everything except S, Q, M, PR, SZ, FR. */
-#define SR_MASK (0xffff8cfd)
-
-/*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
- */
-
-/*
- * This routine will get a word from the user area in the process kernel stack.
- */
-static inline int get_stack_long(struct task_struct *task, int offset)
-{
- unsigned char *stack;
-
- stack = (unsigned char *)(task->thread.uregs);
- stack += offset;
- return (*((int *)stack));
-}
-
-static inline unsigned long
-get_fpu_long(struct task_struct *task, unsigned long addr)
-{
- unsigned long tmp;
- struct pt_regs *regs;
- regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
-
- if (!tsk_used_math(task)) {
- if (addr == offsetof(struct user_fpu_struct, fpscr)) {
- tmp = FPSCR_INIT;
- } else {
- tmp = 0xffffffffUL; /* matches initial value in fpu.c */
- }
- return tmp;
- }
-
- if (last_task_used_math == task) {
- enable_fpu();
- save_fpu(task);
- disable_fpu();
- last_task_used_math = 0;
- regs->sr |= SR_FD;
- }
-
- tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
- return tmp;
-}
-
-/*
- * This routine will put a word into the user area in the process kernel stack.
- */
-static inline int put_stack_long(struct task_struct *task, int offset,
- unsigned long data)
-{
- unsigned char *stack;
-
- stack = (unsigned char *)(task->thread.uregs);
- stack += offset;
- *(unsigned long *) stack = data;
- return 0;
-}
-
-static inline int
-put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
-{
- struct pt_regs *regs;
-
- regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
-
- if (!tsk_used_math(task)) {
- init_fpu(task);
- } else if (last_task_used_math == task) {
- enable_fpu();
- save_fpu(task);
- disable_fpu();
- last_task_used_math = 0;
- regs->sr |= SR_FD;
- }
-
- ((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
- return 0;
-}
-
-void user_enable_single_step(struct task_struct *child)
-{
- struct pt_regs *regs = child->thread.uregs;
-
- regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
-
- set_tsk_thread_flag(child, TIF_SINGLESTEP);
-}
-
-void user_disable_single_step(struct task_struct *child)
-{
- struct pt_regs *regs = child->thread.uregs;
-
- regs->sr &= ~SR_SSTEP;
-
- clear_tsk_thread_flag(child, TIF_SINGLESTEP);
-}
-
-static int genregs_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- const struct pt_regs *regs = task_pt_regs(target);
- int ret;
-
- /* PC, SR, SYSCALL */
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &regs->pc,
- 0, 3 * sizeof(unsigned long long));
-
- /* R1 -> R63 */
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- regs->regs,
- offsetof(struct pt_regs, regs[0]),
- 63 * sizeof(unsigned long long));
- /* TR0 -> TR7 */
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- regs->tregs,
- offsetof(struct pt_regs, tregs[0]),
- 8 * sizeof(unsigned long long));
-
- if (!ret)
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- sizeof(struct pt_regs), -1);
-
- return ret;
-}
-
-static int genregs_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- struct pt_regs *regs = task_pt_regs(target);
- int ret;
-
- /* PC, SR, SYSCALL */
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &regs->pc,
- 0, 3 * sizeof(unsigned long long));
-
- /* R1 -> R63 */
- if (!ret && count > 0)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- regs->regs,
- offsetof(struct pt_regs, regs[0]),
- 63 * sizeof(unsigned long long));
-
- /* TR0 -> TR7 */
- if (!ret && count > 0)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- regs->tregs,
- offsetof(struct pt_regs, tregs[0]),
- 8 * sizeof(unsigned long long));
-
- if (!ret)
- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- sizeof(struct pt_regs), -1);
-
- return ret;
-}
-
-#ifdef CONFIG_SH_FPU
-int fpregs_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- ret = init_fpu(target);
- if (ret)
- return ret;
-
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->hardfpu, 0, -1);
-}
-
-static int fpregs_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- ret = init_fpu(target);
- if (ret)
- return ret;
-
- set_stopped_child_used_math(target);
-
- return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.xstate->hardfpu, 0, -1);
-}
-
-static int fpregs_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- return tsk_used_math(target) ? regset->n : 0;
-}
-#endif
-
-const struct pt_regs_offset regoffset_table[] = {
- REG_OFFSET_NAME(pc),
- REG_OFFSET_NAME(sr),
- REG_OFFSET_NAME(syscall_nr),
- REGS_OFFSET_NAME(0),
- REGS_OFFSET_NAME(1),
- REGS_OFFSET_NAME(2),
- REGS_OFFSET_NAME(3),
- REGS_OFFSET_NAME(4),
- REGS_OFFSET_NAME(5),
- REGS_OFFSET_NAME(6),
- REGS_OFFSET_NAME(7),
- REGS_OFFSET_NAME(8),
- REGS_OFFSET_NAME(9),
- REGS_OFFSET_NAME(10),
- REGS_OFFSET_NAME(11),
- REGS_OFFSET_NAME(12),
- REGS_OFFSET_NAME(13),
- REGS_OFFSET_NAME(14),
- REGS_OFFSET_NAME(15),
- REGS_OFFSET_NAME(16),
- REGS_OFFSET_NAME(17),
- REGS_OFFSET_NAME(18),
- REGS_OFFSET_NAME(19),
- REGS_OFFSET_NAME(20),
- REGS_OFFSET_NAME(21),
- REGS_OFFSET_NAME(22),
- REGS_OFFSET_NAME(23),
- REGS_OFFSET_NAME(24),
- REGS_OFFSET_NAME(25),
- REGS_OFFSET_NAME(26),
- REGS_OFFSET_NAME(27),
- REGS_OFFSET_NAME(28),
- REGS_OFFSET_NAME(29),
- REGS_OFFSET_NAME(30),
- REGS_OFFSET_NAME(31),
- REGS_OFFSET_NAME(32),
- REGS_OFFSET_NAME(33),
- REGS_OFFSET_NAME(34),
- REGS_OFFSET_NAME(35),
- REGS_OFFSET_NAME(36),
- REGS_OFFSET_NAME(37),
- REGS_OFFSET_NAME(38),
- REGS_OFFSET_NAME(39),
- REGS_OFFSET_NAME(40),
- REGS_OFFSET_NAME(41),
- REGS_OFFSET_NAME(42),
- REGS_OFFSET_NAME(43),
- REGS_OFFSET_NAME(44),
- REGS_OFFSET_NAME(45),
- REGS_OFFSET_NAME(46),
- REGS_OFFSET_NAME(47),
- REGS_OFFSET_NAME(48),
- REGS_OFFSET_NAME(49),
- REGS_OFFSET_NAME(50),
- REGS_OFFSET_NAME(51),
- REGS_OFFSET_NAME(52),
- REGS_OFFSET_NAME(53),
- REGS_OFFSET_NAME(54),
- REGS_OFFSET_NAME(55),
- REGS_OFFSET_NAME(56),
- REGS_OFFSET_NAME(57),
- REGS_OFFSET_NAME(58),
- REGS_OFFSET_NAME(59),
- REGS_OFFSET_NAME(60),
- REGS_OFFSET_NAME(61),
- REGS_OFFSET_NAME(62),
- REGS_OFFSET_NAME(63),
- TREGS_OFFSET_NAME(0),
- TREGS_OFFSET_NAME(1),
- TREGS_OFFSET_NAME(2),
- TREGS_OFFSET_NAME(3),
- TREGS_OFFSET_NAME(4),
- TREGS_OFFSET_NAME(5),
- TREGS_OFFSET_NAME(6),
- TREGS_OFFSET_NAME(7),
- REG_OFFSET_END,
-};
-
-/*
- * These are our native regset flavours.
- */
-enum sh_regset {
- REGSET_GENERAL,
-#ifdef CONFIG_SH_FPU
- REGSET_FPU,
-#endif
-};
-
-static const struct user_regset sh_regsets[] = {
- /*
- * Format is:
- * PC, SR, SYSCALL,
- * R1 --> R63,
- * TR0 --> TR7,
- */
- [REGSET_GENERAL] = {
- .core_note_type = NT_PRSTATUS,
- .n = ELF_NGREG,
- .size = sizeof(long long),
- .align = sizeof(long long),
- .get = genregs_get,
- .set = genregs_set,
- },
-
-#ifdef CONFIG_SH_FPU
- [REGSET_FPU] = {
- .core_note_type = NT_PRFPREG,
- .n = sizeof(struct user_fpu_struct) /
- sizeof(long long),
- .size = sizeof(long long),
- .align = sizeof(long long),
- .get = fpregs_get,
- .set = fpregs_set,
- .active = fpregs_active,
- },
-#endif
-};
-
-static const struct user_regset_view user_sh64_native_view = {
- .name = "sh64",
- .e_machine = EM_SH,
- .regsets = sh_regsets,
- .n = ARRAY_SIZE(sh_regsets),
-};
-
-const struct user_regset_view *task_user_regset_view(struct task_struct *task)
-{
- return &user_sh64_native_view;
-}
-
-long arch_ptrace(struct task_struct *child, long request,
- unsigned long addr, unsigned long data)
-{
- int ret;
- unsigned long __user *datap = (unsigned long __user *) data;
-
- switch (request) {
- /* read the word at location addr in the USER area. */
- case PTRACE_PEEKUSR: {
- unsigned long tmp;
-
- ret = -EIO;
- if ((addr & 3) || addr < 0)
- break;
-
- if (addr < sizeof(struct pt_regs))
- tmp = get_stack_long(child, addr);
- else if ((addr >= offsetof(struct user, fpu)) &&
- (addr < offsetof(struct user, u_fpvalid))) {
- unsigned long index;
- ret = init_fpu(child);
- if (ret)
- break;
- index = addr - offsetof(struct user, fpu);
- tmp = get_fpu_long(child, index);
- } else if (addr == offsetof(struct user, u_fpvalid)) {
- tmp = !!tsk_used_math(child);
- } else {
- break;
- }
- ret = put_user(tmp, datap);
- break;
- }
-
- case PTRACE_POKEUSR:
- /* write the word at location addr in the USER area. We must
- disallow any changes to certain SR bits or u_fpvalid, since
- this could crash the kernel or result in a security
- loophole. */
- ret = -EIO;
- if ((addr & 3) || addr < 0)
- break;
-
- if (addr < sizeof(struct pt_regs)) {
- /* Ignore change of top 32 bits of SR */
- if (addr == offsetof (struct pt_regs, sr)+4)
- {
- ret = 0;
- break;
- }
- /* If lower 32 bits of SR, ignore non-user bits */
- if (addr == offsetof (struct pt_regs, sr))
- {
- long cursr = get_stack_long(child, addr);
- data &= ~(SR_MASK);
- data |= (cursr & SR_MASK);
- }
- ret = put_stack_long(child, addr, data);
- }
- else if ((addr >= offsetof(struct user, fpu)) &&
- (addr < offsetof(struct user, u_fpvalid))) {
- unsigned long index;
- ret = init_fpu(child);
- if (ret)
- break;
- index = addr - offsetof(struct user, fpu);
- ret = put_fpu_long(child, index, data);
- }
- break;
-
- case PTRACE_GETREGS:
- return copy_regset_to_user(child, &user_sh64_native_view,
- REGSET_GENERAL,
- 0, sizeof(struct pt_regs),
- datap);
- case PTRACE_SETREGS:
- return copy_regset_from_user(child, &user_sh64_native_view,
- REGSET_GENERAL,
- 0, sizeof(struct pt_regs),
- datap);
-#ifdef CONFIG_SH_FPU
- case PTRACE_GETFPREGS:
- return copy_regset_to_user(child, &user_sh64_native_view,
- REGSET_FPU,
- 0, sizeof(struct user_fpu_struct),
- datap);
- case PTRACE_SETFPREGS:
- return copy_regset_from_user(child, &user_sh64_native_view,
- REGSET_FPU,
- 0, sizeof(struct user_fpu_struct),
- datap);
-#endif
- default:
- ret = ptrace_request(child, request, addr, data);
- break;
- }
-
- return ret;
-}
-
-asmlinkage int sh64_ptrace(long request, long pid,
- unsigned long addr, unsigned long data)
-{
-#define WPC_DBRMODE 0x0d104008
- static unsigned long first_call;
-
- if (!test_and_set_bit(0, &first_call)) {
- /* Set WPC.DBRMODE to 0. This makes all debug events get
- * delivered through RESVEC, i.e. into the handlers in entry.S.
- * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
- * would normally be left set to 1, which makes debug events get
- * delivered through DBRVEC, i.e. into the remote gdb's
- * handlers. This prevents ptrace getting them, and confuses
- * the remote gdb.) */
- printk("DBRMODE set to 0 to permit native debugging\n");
- poke_real_address_q(WPC_DBRMODE, 0);
- }
-
- return sys_ptrace(request, pid, addr, data);
-}
-
-asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
-{
- long long ret = 0;
-
- secure_computing_strict(regs->regs[9]);
-
- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs))
- /*
- * Tracing decided this syscall should not happen.
- * We'll return a bogus call number to get an ENOSYS
- * error, but leave the original number in regs->regs[0].
- */
- ret = -1LL;
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_enter(regs, regs->regs[9]);
-
- audit_syscall_entry(regs->regs[1], regs->regs[2], regs->regs[3],
- regs->regs[4], regs->regs[5]);
-
- return ret ?: regs->regs[9];
-}
-
-asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
-{
- int step;
-
- audit_syscall_exit(regs);
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_exit(regs, regs->regs[9]);
-
- step = test_thread_flag(TIF_SINGLESTEP);
- if (step || test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, step);
-}
-
-/* Called with interrupts disabled */
-asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
-{
- /* This is called after a single step exception (DEBUGSS).
- There is no need to change the PC, as it is a post-execution
- exception, as entry.S does not do anything to the PC for DEBUGSS.
- We need to clear the Single Step setting in SR to avoid
- continually stepping. */
- local_irq_enable();
- regs->sr &= ~SR_SSTEP;
- force_sig(SIGTRAP);
-}
-
-/* Called with interrupts disabled */
-BUILD_TRAP_HANDLER(breakpoint)
-{
- TRAP_HANDLER_DECL;
-
- /* We need to forward step the PC, to counteract the backstep done
- in signal.c. */
- local_irq_enable();
- force_sig(SIGTRAP);
- regs->pc += 4;
-}
-
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure single step bits etc are not set.
- */
-void ptrace_disable(struct task_struct *child)
-{
- user_disable_single_step(child);
-}
diff --git a/arch/sh/kernel/reboot.c b/arch/sh/kernel/reboot.c
index 11001a8a5fe0..5c33f036418b 100644
--- a/arch/sh/kernel/reboot.c
+++ b/arch/sh/kernel/reboot.c
@@ -4,9 +4,7 @@
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/module.h>
-#ifdef CONFIG_SUPERH32
#include <asm/watchdog.h>
-#endif
#include <asm/addrspace.h>
#include <asm/reboot.h>
#include <asm/tlbflush.h>
@@ -15,13 +13,11 @@
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
-#ifdef CONFIG_SUPERH32
static void watchdog_trigger_immediate(void)
{
sh_wdt_write_cnt(0xFF);
sh_wdt_write_csr(0xC2);
}
-#endif
static void native_machine_restart(char * __unused)
{
@@ -33,10 +29,8 @@ static void native_machine_restart(char * __unused)
/* Address error with SR.BL=1 first. */
trigger_address_error();
-#ifdef CONFIG_SUPERH32
/* If that fails or is unsupported, go for the watchdog next. */
watchdog_trigger_immediate();
-#endif
/*
* Give up and sleep.
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 282774472603..5858936cb431 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -38,6 +38,13 @@ DECLARE_EXPORT(__ashlsi3);
DECLARE_EXPORT(__lshrsi3_r0);
DECLARE_EXPORT(__ashrsi3_r0);
DECLARE_EXPORT(__ashlsi3_r0);
+
+DECLARE_EXPORT(__ashiftrt_r4_0);
+DECLARE_EXPORT(__ashiftrt_r4_1);
+DECLARE_EXPORT(__ashiftrt_r4_2);
+DECLARE_EXPORT(__ashiftrt_r4_3);
+DECLARE_EXPORT(__ashiftrt_r4_4);
+DECLARE_EXPORT(__ashiftrt_r4_5);
DECLARE_EXPORT(__ashiftrt_r4_6);
DECLARE_EXPORT(__ashiftrt_r4_7);
DECLARE_EXPORT(__ashiftrt_r4_8);
@@ -48,13 +55,23 @@ DECLARE_EXPORT(__ashiftrt_r4_12);
DECLARE_EXPORT(__ashiftrt_r4_13);
DECLARE_EXPORT(__ashiftrt_r4_14);
DECLARE_EXPORT(__ashiftrt_r4_15);
+DECLARE_EXPORT(__ashiftrt_r4_16);
+DECLARE_EXPORT(__ashiftrt_r4_17);
+DECLARE_EXPORT(__ashiftrt_r4_18);
+DECLARE_EXPORT(__ashiftrt_r4_19);
DECLARE_EXPORT(__ashiftrt_r4_20);
DECLARE_EXPORT(__ashiftrt_r4_21);
DECLARE_EXPORT(__ashiftrt_r4_22);
DECLARE_EXPORT(__ashiftrt_r4_23);
DECLARE_EXPORT(__ashiftrt_r4_24);
+DECLARE_EXPORT(__ashiftrt_r4_25);
+DECLARE_EXPORT(__ashiftrt_r4_26);
DECLARE_EXPORT(__ashiftrt_r4_27);
+DECLARE_EXPORT(__ashiftrt_r4_28);
+DECLARE_EXPORT(__ashiftrt_r4_29);
DECLARE_EXPORT(__ashiftrt_r4_30);
+DECLARE_EXPORT(__ashiftrt_r4_31);
+DECLARE_EXPORT(__ashiftrt_r4_32);
DECLARE_EXPORT(__movstr);
DECLARE_EXPORT(__movstrSI8);
DECLARE_EXPORT(__movstrSI12);
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
deleted file mode 100644
index 9de17065afb4..000000000000
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ /dev/null
@@ -1,51 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/sh_ksyms_64.c
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- */
-#include <linux/rwsem.h>
-#include <linux/module.h>
-#include <linux/smp.h>
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/sched.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-#include <linux/screen_info.h>
-#include <asm/cacheflush.h>
-#include <asm/processor.h>
-#include <linux/uaccess.h>
-#include <asm/checksum.h>
-#include <asm/io.h>
-#include <asm/delay.h>
-#include <asm/irq.h>
-
-EXPORT_SYMBOL(__put_user_asm_b);
-EXPORT_SYMBOL(__put_user_asm_w);
-EXPORT_SYMBOL(__put_user_asm_l);
-EXPORT_SYMBOL(__put_user_asm_q);
-EXPORT_SYMBOL(__get_user_asm_b);
-EXPORT_SYMBOL(__get_user_asm_w);
-EXPORT_SYMBOL(__get_user_asm_l);
-EXPORT_SYMBOL(__get_user_asm_q);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(__copy_user);
-EXPORT_SYMBOL(empty_zero_page);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__ndelay);
-EXPORT_SYMBOL(__const_udelay);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcpy);
-
-/* Ugh. These come in from libgcc.a at link time. */
-#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
-
-DECLARE_EXPORT(__sdivsi3);
-DECLARE_EXPORT(__sdivsi3_1);
-DECLARE_EXPORT(__sdivsi3_2);
-DECLARE_EXPORT(__udivsi3);
-DECLARE_EXPORT(__div_table);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index 24473fa6c3b6..a0fbb8427b39 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -28,7 +28,6 @@
#include <linux/tracehook.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/syscalls.h>
#include <asm/fpu.h>
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
deleted file mode 100644
index b9aaa9266b34..000000000000
--- a/arch/sh/kernel/signal_64.c
+++ /dev/null
@@ -1,567 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/signal_64.c
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2008 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#include <linux/rwsem.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/personality.h>
-#include <linux/ptrace.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/tracehook.h>
-#include <asm/ucontext.h>
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/cacheflush.h>
-#include <asm/fpu.h>
-
-#define REG_RET 9
-#define REG_ARG1 2
-#define REG_ARG2 3
-#define REG_ARG3 4
-#define REG_SP 15
-#define REG_PR 18
-#define REF_REG_RET regs->regs[REG_RET]
-#define REF_REG_SP regs->regs[REG_SP]
-#define DEREF_REG_PR regs->regs[REG_PR]
-
-#define DEBUG_SIG 0
-
-static void
-handle_signal(struct ksignal *ksig, struct pt_regs *regs);
-
-static inline void
-handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa)
-{
- /* If we're not from a syscall, bail out */
- if (regs->syscall_nr < 0)
- return;
-
- /* check for system call restart.. */
- switch (regs->regs[REG_RET]) {
- case -ERESTART_RESTARTBLOCK:
- case -ERESTARTNOHAND:
- no_system_call_restart:
- regs->regs[REG_RET] = -EINTR;
- break;
-
- case -ERESTARTSYS:
- if (!(sa->sa_flags & SA_RESTART))
- goto no_system_call_restart;
- /* fallthrough */
- case -ERESTARTNOINTR:
- /* Decode syscall # */
- regs->regs[REG_RET] = regs->syscall_nr;
- regs->pc -= 4;
- break;
- }
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- *
- * Note that we go through the signals twice: once to check the signals that
- * the kernel can handle, and then we build all the user-level signal handling
- * stack-frames in one go after that.
- */
-static void do_signal(struct pt_regs *regs)
-{
- struct ksignal ksig;
-
- /*
- * We want the common case to go fast, which
- * is why we may in certain cases get here from
- * kernel mode. Just return without doing anything
- * if so.
- */
- if (!user_mode(regs))
- return;
-
- if (get_signal(&ksig)) {
- handle_syscall_restart(regs, &ksig.ka.sa);
-
- /* Whee! Actually deliver the signal. */
- handle_signal(&ksig, regs);
- return;
- }
-
- /* Did we come from a system call? */
- if (regs->syscall_nr >= 0) {
- /* Restart the system call - no handlers present */
- switch (regs->regs[REG_RET]) {
- case -ERESTARTNOHAND:
- case -ERESTARTSYS:
- case -ERESTARTNOINTR:
- /* Decode Syscall # */
- regs->regs[REG_RET] = regs->syscall_nr;
- regs->pc -= 4;
- break;
-
- case -ERESTART_RESTARTBLOCK:
- regs->regs[REG_RET] = __NR_restart_syscall;
- regs->pc -= 4;
- break;
- }
- }
-
- /* No signal to deliver -- put the saved sigmask back */
- restore_saved_sigmask();
-}
-
-/*
- * Do a signal return; undo the signal stack.
- */
-struct sigframe {
- struct sigcontext sc;
- unsigned long extramask[_NSIG_WORDS-1];
- long long retcode[2];
-};
-
-struct rt_sigframe {
- struct siginfo __user *pinfo;
- void *puc;
- struct siginfo info;
- struct ucontext uc;
- long long retcode[2];
-};
-
-#ifdef CONFIG_SH_FPU
-static inline int
-restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
-{
- int err = 0;
- int fpvalid;
-
- err |= __get_user (fpvalid, &sc->sc_fpvalid);
- conditional_used_math(fpvalid);
- if (! fpvalid)
- return err;
-
- if (current == last_task_used_math) {
- last_task_used_math = NULL;
- regs->sr |= SR_FD;
- }
-
- err |= __copy_from_user(&current->thread.xstate->hardfpu, &sc->sc_fpregs[0],
- (sizeof(long long) * 32) + (sizeof(int) * 1));
-
- return err;
-}
-
-static inline int
-setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
-{
- int err = 0;
- int fpvalid;
-
- fpvalid = !!used_math();
- err |= __put_user(fpvalid, &sc->sc_fpvalid);
- if (! fpvalid)
- return err;
-
- if (current == last_task_used_math) {
- enable_fpu();
- save_fpu(current);
- disable_fpu();
- last_task_used_math = NULL;
- regs->sr |= SR_FD;
- }
-
- err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu,
- (sizeof(long long) * 32) + (sizeof(int) * 1));
- clear_used_math();
-
- return err;
-}
-#else
-static inline int
-restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
-{
- return 0;
-}
-static inline int
-setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
-{
- return 0;
-}
-#endif
-
-static int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
-{
- unsigned int err = 0;
- unsigned long long current_sr, new_sr;
-#define SR_MASK 0xffff8cfd
-
-#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
-
- COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
- COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
- COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
- COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
- COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
- COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
- COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
- COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
- COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
- COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
- COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
- COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
- COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
- COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
- COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
- COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
- COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
- COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
-
- /* Prevent the signal handler manipulating SR in a way that can
- crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
- modified */
- current_sr = regs->sr;
- err |= __get_user(new_sr, &sc->sc_sr);
- regs->sr &= SR_MASK;
- regs->sr |= (new_sr & ~SR_MASK);
-
- COPY(pc);
-
-#undef COPY
-
- /* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
- * has been restored above.) */
- err |= restore_sigcontext_fpu(regs, sc);
-
- regs->syscall_nr = -1; /* disable syscall checks */
- err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
- return err;
-}
-
-asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
- unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs * regs)
-{
- struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
- sigset_t set;
- long long ret;
-
- /* Always make any pending restarted system calls return -EINTR */
- current->restart_block.fn = do_no_restart_syscall;
-
- if (!access_ok(frame, sizeof(*frame)))
- goto badframe;
-
- if (__get_user(set.sig[0], &frame->sc.oldmask)
- || (_NSIG_WORDS > 1
- && __copy_from_user(&set.sig[1], &frame->extramask,
- sizeof(frame->extramask))))
- goto badframe;
-
- set_current_blocked(&set);
-
- if (restore_sigcontext(regs, &frame->sc, &ret))
- goto badframe;
- regs->pc -= 4;
-
- return (int) ret;
-
-badframe:
- force_sig(SIGSEGV);
- return 0;
-}
-
-asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
- unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7,
- struct pt_regs * regs)
-{
- struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
- sigset_t set;
- long long ret;
-
- /* Always make any pending restarted system calls return -EINTR */
- current->restart_block.fn = do_no_restart_syscall;
-
- if (!access_ok(frame, sizeof(*frame)))
- goto badframe;
-
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
- goto badframe;
-
- set_current_blocked(&set);
-
- if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
- goto badframe;
- regs->pc -= 4;
-
- if (restore_altstack(&frame->uc.uc_stack))
- goto badframe;
-
- return (int) ret;
-
-badframe:
- force_sig(SIGSEGV);
- return 0;
-}
-
-/*
- * Set up a signal frame.
- */
-static int
-setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
- unsigned long mask)
-{
- int err = 0;
-
- /* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
- err |= setup_sigcontext_fpu(regs, sc);
-
-#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
-
- COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
- COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
- COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
- COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
- COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
- COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
- COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
- COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
- COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
- COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
- COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
- COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
- COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
- COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
- COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
- COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
- COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
- COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
- COPY(sr); COPY(pc);
-
-#undef COPY
-
- err |= __put_user(mask, &sc->oldmask);
-
- return err;
-}
-
-/*
- * Determine which stack to use..
- */
-static inline void __user *
-get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
-{
- if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
- sp = current->sas_ss_sp + current->sas_ss_size;
-
- return (void __user *)((sp - frame_size) & -8ul);
-}
-
-void sa_default_restorer(void); /* See comments below */
-void sa_default_rt_restorer(void); /* See comments below */
-
-static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
-{
- struct sigframe __user *frame;
- int err = 0, sig = ksig->sig;
- int signal;
-
- frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame));
-
- if (!access_ok(frame, sizeof(*frame)))
- return -EFAULT;
-
- err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
-
- /* Give up earlier as i386, in case */
- if (err)
- return -EFAULT;
-
- if (_NSIG_WORDS > 1) {
- err |= __copy_to_user(frame->extramask, &set->sig[1],
- sizeof(frame->extramask)); }
-
- /* Give up earlier as i386, in case */
- if (err)
- return -EFAULT;
-
- /* Set up to return from userspace. If provided, use a stub
- already in userspace. */
- if (ksig->ka.sa.sa_flags & SA_RESTORER) {
- /*
- * On SH5 all edited pointers are subject to NEFF
- */
- DEREF_REG_PR = neff_sign_extend((unsigned long)
- ksig->ka->sa.sa_restorer | 0x1);
- } else {
- /*
- * Different approach on SH5.
- * . Endianness independent asm code gets placed in entry.S .
- * This is limited to four ASM instructions corresponding
- * to two long longs in size.
- * . err checking is done on the else branch only
- * . flush_icache_range() is called upon __put_user() only
- * . all edited pointers are subject to NEFF
- * . being code, linker turns ShMedia bit on, always
- * dereference index -1.
- */
- DEREF_REG_PR = neff_sign_extend((unsigned long)
- frame->retcode | 0x01);
-
- if (__copy_to_user(frame->retcode,
- (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0)
- return -EFAULT;
-
- /* Cohere the trampoline with the I-cache. */
- flush_cache_sigtramp(DEREF_REG_PR-1);
- }
-
- /*
- * Set up registers for signal handler.
- * All edited pointers are subject to NEFF.
- */
- regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
- regs->regs[REG_ARG1] = sig; /* Arg for signal handler */
-
- /* FIXME:
- The glibc profiling support for SH-5 needs to be passed a sigcontext
- so it can retrieve the PC. At some point during 2003 the glibc
- support was changed to receive the sigcontext through the 2nd
- argument, but there are still versions of libc.so in use that use
- the 3rd argument. Until libc.so is stabilised, pass the sigcontext
- through both 2nd and 3rd arguments.
- */
-
- regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
- regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
-
- regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
-
- /* Broken %016Lx */
- pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
- sig, current->comm, current->pid, frame,
- regs->pc >> 32, regs->pc & 0xffffffff,
- DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
-
- return 0;
-}
-
-static int setup_rt_frame(struct ksignal *kig, sigset_t *set,
- struct pt_regs *regs)
-{
- struct rt_sigframe __user *frame;
- int err = 0, sig = ksig->sig;
-
- frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame));
-
- if (!access_ok(frame, sizeof(*frame)))
- return -EFAULT;
-
- err |= __put_user(&frame->info, &frame->pinfo);
- err |= __put_user(&frame->uc, &frame->puc);
- err |= copy_siginfo_to_user(&frame->info, &ksig->info);
-
- /* Give up earlier as i386, in case */
- if (err)
- return -EFAULT;
-
- /* Create the ucontext. */
- err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(0, &frame->uc.uc_link);
- err |= __save_altstack(&frame->uc.uc_stack, regs->regs[REG_SP]);
- err |= setup_sigcontext(&frame->uc.uc_mcontext,
- regs, set->sig[0]);
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-
- /* Give up earlier as i386, in case */
- if (err)
- return -EFAULT;
-
- /* Set up to return from userspace. If provided, use a stub
- already in userspace. */
- if (ksig->ka.sa.sa_flags & SA_RESTORER) {
- /*
- * On SH5 all edited pointers are subject to NEFF
- */
- DEREF_REG_PR = neff_sign_extend((unsigned long)
- ksig->ka.sa.sa_restorer | 0x1);
- } else {
- /*
- * Different approach on SH5.
- * . Endianness independent asm code gets placed in entry.S .
- * This is limited to four ASM instructions corresponding
- * to two long longs in size.
- * . err checking is done on the else branch only
- * . flush_icache_range() is called upon __put_user() only
- * . all edited pointers are subject to NEFF
- * . being code, linker turns ShMedia bit on, always
- * dereference index -1.
- */
- DEREF_REG_PR = neff_sign_extend((unsigned long)
- frame->retcode | 0x01);
-
- if (__copy_to_user(frame->retcode,
- (void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0)
- return -EFAULT;
-
- /* Cohere the trampoline with the I-cache. */
- flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
- }
-
- /*
- * Set up registers for signal handler.
- * All edited pointers are subject to NEFF.
- */
- regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
- regs->regs[REG_ARG1] = sig; /* Arg for signal handler */
- regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
- regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
- regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
-
- pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
- sig, current->comm, current->pid, frame,
- regs->pc >> 32, regs->pc & 0xffffffff,
- DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
-
- return 0;
-}
-
-/*
- * OK, we're invoking a handler
- */
-static void
-handle_signal(struct ksignal *ksig, struct pt_regs *regs)
-{
- sigset_t *oldset = sigmask_to_save();
- int ret;
-
- /* Set up the stack frame */
- if (ksig->ka.sa.sa_flags & SA_SIGINFO)
- ret = setup_rt_frame(ksig, oldset, regs);
- else
- ret = setup_frame(ksig, oldset, regs);
-
- signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
-}
-
-asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
-{
- if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(regs);
-
- if (thread_info_flags & _TIF_NOTIFY_RESUME) {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
- }
-}
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index f8afc014e084..a5a7b33ed81a 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -69,10 +69,10 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
if (addr + len < addr)
return -EFAULT;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma (current->mm, addr);
if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return -EFAULT;
}
@@ -91,6 +91,6 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
if (op & CACHEFLUSH_I)
flush_icache_range(addr, addr+len);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return 0;
}
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
deleted file mode 100644
index 1bcb86f0b728..000000000000
--- a/arch/sh/kernel/syscalls_64.S
+++ /dev/null
@@ -1,419 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * arch/sh/kernel/syscalls_64.S
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2004 - 2007 Paul Mundt
- * Copyright (C) 2003, 2004 Richard Curnow
- */
-
-#include <linux/sys.h>
-
- .section .data, "aw"
- .balign 32
-
-/*
- * System calls jump table
- */
- .globl sys_call_table
-sys_call_table:
- .long sys_restart_syscall /* 0 - old "setup()" system call */
- .long sys_exit
- .long sys_fork
- .long sys_read
- .long sys_write
- .long sys_open /* 5 */
- .long sys_close
- .long sys_waitpid
- .long sys_creat
- .long sys_link
- .long sys_unlink /* 10 */
- .long sys_execve
- .long sys_chdir
- .long sys_time
- .long sys_mknod
- .long sys_chmod /* 15 */
- .long sys_lchown16
- .long sys_ni_syscall /* old break syscall holder */
- .long sys_stat
- .long sys_lseek
- .long sys_getpid /* 20 */
- .long sys_mount
- .long sys_oldumount
- .long sys_setuid16
- .long sys_getuid16
- .long sys_stime /* 25 */
- .long sh64_ptrace
- .long sys_alarm
- .long sys_fstat
- .long sys_pause
- .long sys_utime /* 30 */
- .long sys_ni_syscall /* old stty syscall holder */
- .long sys_ni_syscall /* old gtty syscall holder */
- .long sys_access
- .long sys_nice
- .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
- .long sys_sync
- .long sys_kill
- .long sys_rename
- .long sys_mkdir
- .long sys_rmdir /* 40 */
- .long sys_dup
- .long sys_pipe
- .long sys_times
- .long sys_ni_syscall /* old prof syscall holder */
- .long sys_brk /* 45 */
- .long sys_setgid16
- .long sys_getgid16
- .long sys_signal
- .long sys_geteuid16
- .long sys_getegid16 /* 50 */
- .long sys_acct
- .long sys_umount /* recycled never used phys( */
- .long sys_ni_syscall /* old lock syscall holder */
- .long sys_ioctl
- .long sys_fcntl /* 55 */
- .long sys_ni_syscall /* old mpx syscall holder */
- .long sys_setpgid
- .long sys_ni_syscall /* old ulimit syscall holder */
- .long sys_ni_syscall /* sys_olduname */
- .long sys_umask /* 60 */
- .long sys_chroot
- .long sys_ustat
- .long sys_dup2
- .long sys_getppid
- .long sys_getpgrp /* 65 */
- .long sys_setsid
- .long sys_sigaction
- .long sys_sgetmask
- .long sys_ssetmask
- .long sys_setreuid16 /* 70 */
- .long sys_setregid16
- .long sys_sigsuspend
- .long sys_sigpending
- .long sys_sethostname
- .long sys_setrlimit /* 75 */
- .long sys_old_getrlimit
- .long sys_getrusage
- .long sys_gettimeofday
- .long sys_settimeofday
- .long sys_getgroups16 /* 80 */
- .long sys_setgroups16
- .long sys_ni_syscall /* sys_oldselect */
- .long sys_symlink
- .long sys_lstat
- .long sys_readlink /* 85 */
- .long sys_uselib
- .long sys_swapon
- .long sys_reboot
- .long sys_old_readdir
- .long old_mmap /* 90 */
- .long sys_munmap
- .long sys_truncate
- .long sys_ftruncate
- .long sys_fchmod
- .long sys_fchown16 /* 95 */
- .long sys_getpriority
- .long sys_setpriority
- .long sys_ni_syscall /* old profil syscall holder */
- .long sys_statfs
- .long sys_fstatfs /* 100 */
- .long sys_ni_syscall /* ioperm */
- .long sys_socketcall /* Obsolete implementation of socket syscall */
- .long sys_syslog
- .long sys_setitimer
- .long sys_getitimer /* 105 */
- .long sys_newstat
- .long sys_newlstat
- .long sys_newfstat
- .long sys_uname
- .long sys_ni_syscall /* 110 */ /* iopl */
- .long sys_vhangup
- .long sys_ni_syscall /* idle */
- .long sys_ni_syscall /* vm86old */
- .long sys_wait4
- .long sys_swapoff /* 115 */
- .long sys_sysinfo
- .long sys_ipc /* Obsolete ipc syscall implementation */
- .long sys_fsync
- .long sys_sigreturn
- .long sys_clone /* 120 */
- .long sys_setdomainname
- .long sys_newuname
- .long sys_cacheflush /* x86: sys_modify_ldt */
- .long sys_adjtimex
- .long sys_mprotect /* 125 */
- .long sys_sigprocmask
- .long sys_ni_syscall /* old "create_module" */
- .long sys_init_module
- .long sys_delete_module
- .long sys_ni_syscall /* 130: old "get_kernel_syms" */
- .long sys_quotactl
- .long sys_getpgid
- .long sys_fchdir
- .long sys_bdflush
- .long sys_sysfs /* 135 */
- .long sys_personality
- .long sys_ni_syscall /* for afs_syscall */
- .long sys_setfsuid16
- .long sys_setfsgid16
- .long sys_llseek /* 140 */
- .long sys_getdents
- .long sys_select
- .long sys_flock
- .long sys_msync
- .long sys_readv /* 145 */
- .long sys_writev
- .long sys_getsid
- .long sys_fdatasync
- .long sys_sysctl
- .long sys_mlock /* 150 */
- .long sys_munlock
- .long sys_mlockall
- .long sys_munlockall
- .long sys_sched_setparam
- .long sys_sched_getparam /* 155 */
- .long sys_sched_setscheduler
- .long sys_sched_getscheduler
- .long sys_sched_yield
- .long sys_sched_get_priority_max
- .long sys_sched_get_priority_min /* 160 */
- .long sys_sched_rr_get_interval
- .long sys_nanosleep
- .long sys_mremap
- .long sys_setresuid16
- .long sys_getresuid16 /* 165 */
- .long sys_ni_syscall /* vm86 */
- .long sys_ni_syscall /* old "query_module" */
- .long sys_poll
- .long sys_ni_syscall /* was nfsservctl */
- .long sys_setresgid16 /* 170 */
- .long sys_getresgid16
- .long sys_prctl
- .long sys_rt_sigreturn
- .long sys_rt_sigaction
- .long sys_rt_sigprocmask /* 175 */
- .long sys_rt_sigpending
- .long sys_rt_sigtimedwait
- .long sys_rt_sigqueueinfo
- .long sys_rt_sigsuspend
- .long sys_pread64 /* 180 */
- .long sys_pwrite64
- .long sys_chown16
- .long sys_getcwd
- .long sys_capget
- .long sys_capset /* 185 */
- .long sys_sigaltstack
- .long sys_sendfile
- .long sys_ni_syscall /* getpmsg */
- .long sys_ni_syscall /* putpmsg */
- .long sys_vfork /* 190 */
- .long sys_getrlimit
- .long sys_mmap2
- .long sys_truncate64
- .long sys_ftruncate64
- .long sys_stat64 /* 195 */
- .long sys_lstat64
- .long sys_fstat64
- .long sys_lchown
- .long sys_getuid
- .long sys_getgid /* 200 */
- .long sys_geteuid
- .long sys_getegid
- .long sys_setreuid
- .long sys_setregid
- .long sys_getgroups /* 205 */
- .long sys_setgroups
- .long sys_fchown
- .long sys_setresuid
- .long sys_getresuid
- .long sys_setresgid /* 210 */
- .long sys_getresgid
- .long sys_chown
- .long sys_setuid
- .long sys_setgid
- .long sys_setfsuid /* 215 */
- .long sys_setfsgid
- .long sys_pivot_root
- .long sys_mincore
- .long sys_madvise
- /* Broken-out socket family (maintain backwards compatibility in syscall
- numbering with 2.4) */
- .long sys_socket /* 220 */
- .long sys_bind
- .long sys_connect
- .long sys_listen
- .long sys_accept
- .long sys_getsockname /* 225 */
- .long sys_getpeername
- .long sys_socketpair
- .long sys_send
- .long sys_sendto
- .long sys_recv /* 230*/
- .long sys_recvfrom
- .long sys_shutdown
- .long sys_setsockopt
- .long sys_getsockopt
- .long sys_sendmsg /* 235 */
- .long sys_recvmsg
- /* Broken-out IPC family (maintain backwards compatibility in syscall
- numbering with 2.4) */
- .long sys_semop
- .long sys_semget
- .long sys_semctl
- .long sys_msgsnd /* 240 */
- .long sys_msgrcv
- .long sys_msgget
- .long sys_msgctl
- .long sys_shmat
- .long sys_shmdt /* 245 */
- .long sys_shmget
- .long sys_shmctl
- /* Rest of syscalls listed in 2.4 i386 unistd.h */
- .long sys_getdents64
- .long sys_fcntl64
- .long sys_ni_syscall /* 250 reserved for TUX */
- .long sys_ni_syscall /* Reserved for Security */
- .long sys_gettid
- .long sys_readahead
- .long sys_setxattr
- .long sys_lsetxattr /* 255 */
- .long sys_fsetxattr
- .long sys_getxattr
- .long sys_lgetxattr
- .long sys_fgetxattr
- .long sys_listxattr /* 260 */
- .long sys_llistxattr
- .long sys_flistxattr
- .long sys_removexattr
- .long sys_lremovexattr
- .long sys_fremovexattr /* 265 */
- .long sys_tkill
- .long sys_sendfile64
- .long sys_futex
- .long sys_sched_setaffinity
- .long sys_sched_getaffinity /* 270 */
- .long sys_ni_syscall /* reserved for set_thread_area */
- .long sys_ni_syscall /* reserved for get_thread_area */
- .long sys_io_setup
- .long sys_io_destroy
- .long sys_io_getevents /* 275 */
- .long sys_io_submit
- .long sys_io_cancel
- .long sys_fadvise64
- .long sys_ni_syscall
- .long sys_exit_group /* 280 */
- /* Rest of new 2.6 syscalls */
- .long sys_lookup_dcookie
- .long sys_epoll_create
- .long sys_epoll_ctl
- .long sys_epoll_wait
- .long sys_remap_file_pages /* 285 */
- .long sys_set_tid_address
- .long sys_timer_create
- .long sys_timer_settime
- .long sys_timer_gettime
- .long sys_timer_getoverrun /* 290 */
- .long sys_timer_delete
- .long sys_clock_settime
- .long sys_clock_gettime
- .long sys_clock_getres
- .long sys_clock_nanosleep /* 295 */
- .long sys_statfs64
- .long sys_fstatfs64
- .long sys_tgkill
- .long sys_utimes
- .long sys_fadvise64_64 /* 300 */
- .long sys_ni_syscall /* Reserved for vserver */
- .long sys_ni_syscall /* Reserved for mbind */
- .long sys_ni_syscall /* get_mempolicy */
- .long sys_ni_syscall /* set_mempolicy */
- .long sys_mq_open /* 305 */
- .long sys_mq_unlink
- .long sys_mq_timedsend
- .long sys_mq_timedreceive
- .long sys_mq_notify
- .long sys_mq_getsetattr /* 310 */
- .long sys_ni_syscall /* Reserved for kexec */
- .long sys_waitid
- .long sys_add_key
- .long sys_request_key
- .long sys_keyctl /* 315 */
- .long sys_ioprio_set
- .long sys_ioprio_get
- .long sys_inotify_init
- .long sys_inotify_add_watch
- .long sys_inotify_rm_watch /* 320 */
- .long sys_ni_syscall
- .long sys_migrate_pages
- .long sys_openat
- .long sys_mkdirat
- .long sys_mknodat /* 325 */
- .long sys_fchownat
- .long sys_futimesat
- .long sys_fstatat64
- .long sys_unlinkat
- .long sys_renameat /* 330 */
- .long sys_linkat
- .long sys_symlinkat
- .long sys_readlinkat
- .long sys_fchmodat
- .long sys_faccessat /* 335 */
- .long sys_pselect6
- .long sys_ppoll
- .long sys_unshare
- .long sys_set_robust_list
- .long sys_get_robust_list /* 340 */
- .long sys_splice
- .long sys_sync_file_range
- .long sys_tee
- .long sys_vmsplice
- .long sys_move_pages /* 345 */
- .long sys_getcpu
- .long sys_epoll_pwait
- .long sys_utimensat
- .long sys_signalfd
- .long sys_timerfd_create /* 350 */
- .long sys_eventfd
- .long sys_fallocate
- .long sys_timerfd_settime
- .long sys_timerfd_gettime
- .long sys_signalfd4 /* 355 */
- .long sys_eventfd2
- .long sys_epoll_create1
- .long sys_dup3
- .long sys_pipe2
- .long sys_inotify_init1 /* 360 */
- .long sys_preadv
- .long sys_pwritev
- .long sys_rt_tgsigqueueinfo
- .long sys_perf_event_open
- .long sys_recvmmsg /* 365 */
- .long sys_accept4
- .long sys_fanotify_init
- .long sys_fanotify_mark
- .long sys_prlimit64
- .long sys_name_to_handle_at /* 370 */
- .long sys_open_by_handle_at
- .long sys_clock_adjtime
- .long sys_syncfs
- .long sys_sendmmsg
- .long sys_setns /* 375 */
- .long sys_process_vm_readv
- .long sys_process_vm_writev
- .long sys_kcmp
- .long sys_finit_module
- .long sys_sched_getattr /* 380 */
- .long sys_sched_setattr
- .long sys_renameat2
- .long sys_seccomp
- .long sys_getrandom
- .long sys_memfd_create /* 385 */
- .long sys_bpf
- .long sys_execveat
- .long sys_userfaultfd
- .long sys_membarrier
- .long sys_mlock2 /* 390 */
- .long sys_copy_file_range
- .long sys_preadv2
- .long sys_pwritev2
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 2130381c9d57..a33025451fcd 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -38,8 +38,8 @@ void die(const char *str, struct pt_regs *regs, long err)
task_pid_nr(current), task_stack_page(current) + 1);
if (!user_mode(regs) || in_interrupt())
- dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
- (unsigned long)task_stack_page(current));
+ dump_mem("Stack: ", KERN_DEFAULT, regs->regs[15],
+ THREAD_SIZE + (unsigned long)task_stack_page(current));
notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
deleted file mode 100644
index 37046f3a26d3..000000000000
--- a/arch/sh/kernel/traps_64.c
+++ /dev/null
@@ -1,814 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/traps_64.c
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003, 2004 Paul Mundt
- * Copyright (C) 2003, 2004 Richard Curnow
- */
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/kallsyms.h>
-#include <linux/interrupt.h>
-#include <linux/sysctl.h>
-#include <linux/module.h>
-#include <linux/perf_event.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-#include <asm/alignment.h>
-#include <asm/processor.h>
-#include <asm/pgtable.h>
-#include <asm/fpu.h>
-
-static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode)
-{
- int get_user_error;
- unsigned long aligned_pc;
- insn_size_t opcode;
-
- if ((pc & 3) == 1) {
- /* SHmedia */
- aligned_pc = pc & ~3;
- if (from_user_mode) {
- if (!access_ok(aligned_pc, sizeof(insn_size_t))) {
- get_user_error = -EFAULT;
- } else {
- get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
- *result_opcode = opcode;
- }
- return get_user_error;
- } else {
- /* If the fault was in the kernel, we can either read
- * this directly, or if not, we fault.
- */
- *result_opcode = *(insn_size_t *)aligned_pc;
- return 0;
- }
- } else if ((pc & 1) == 0) {
- /* SHcompact */
- /* TODO : provide handling for this. We don't really support
- user-mode SHcompact yet, and for a kernel fault, this would
- have to come from a module built for SHcompact. */
- return -EFAULT;
- } else {
- /* misaligned */
- return -EFAULT;
- }
-}
-
-static int address_is_sign_extended(__u64 a)
-{
- __u64 b;
-#if (NEFF == 32)
- b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
- return (b == a) ? 1 : 0;
-#else
-#error "Sign extend check only works for NEFF==32"
-#endif
-}
-
-/* return -1 for fault, 0 for OK */
-static int generate_and_check_address(struct pt_regs *regs,
- insn_size_t opcode,
- int displacement_not_indexed,
- int width_shift,
- __u64 *address)
-{
- __u64 base_address, addr;
- int basereg;
-
- switch (1 << width_shift) {
- case 1: inc_unaligned_byte_access(); break;
- case 2: inc_unaligned_word_access(); break;
- case 4: inc_unaligned_dword_access(); break;
- case 8: inc_unaligned_multi_access(); break;
- }
-
- basereg = (opcode >> 20) & 0x3f;
- base_address = regs->regs[basereg];
- if (displacement_not_indexed) {
- __s64 displacement;
- displacement = (opcode >> 10) & 0x3ff;
- displacement = sign_extend64(displacement, 9);
- addr = (__u64)((__s64)base_address + (displacement << width_shift));
- } else {
- __u64 offset;
- int offsetreg;
- offsetreg = (opcode >> 10) & 0x3f;
- offset = regs->regs[offsetreg];
- addr = base_address + offset;
- }
-
- /* Check sign extended */
- if (!address_is_sign_extended(addr))
- return -1;
-
- /* Check accessible. For misaligned access in the kernel, assume the
- address is always accessible (and if not, just fault when the
- load/store gets done.) */
- if (user_mode(regs)) {
- inc_unaligned_user_access();
-
- if (addr >= TASK_SIZE)
- return -1;
- } else
- inc_unaligned_kernel_access();
-
- *address = addr;
-
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr);
- unaligned_fixups_notify(current, opcode, regs);
-
- return 0;
-}
-
-static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
-{
- unsigned short x;
- unsigned char *p, *q;
- p = (unsigned char *) (int) address;
- q = (unsigned char *) &x;
- q[0] = p[0];
- q[1] = p[1];
-
- if (do_sign_extend) {
- *result = (__u64)(__s64) *(short *) &x;
- } else {
- *result = (__u64) x;
- }
-}
-
-static void misaligned_kernel_word_store(__u64 address, __u64 value)
-{
- unsigned short x;
- unsigned char *p, *q;
- p = (unsigned char *) (int) address;
- q = (unsigned char *) &x;
-
- x = (__u16) value;
- p[0] = q[0];
- p[1] = q[1];
-}
-
-static int misaligned_load(struct pt_regs *regs,
- insn_size_t opcode,
- int displacement_not_indexed,
- int width_shift,
- int do_sign_extend)
-{
- /* Return -1 for a fault, 0 for OK */
- int error;
- int destreg;
- __u64 address;
-
- error = generate_and_check_address(regs, opcode,
- displacement_not_indexed, width_shift, &address);
- if (error < 0)
- return error;
-
- destreg = (opcode >> 4) & 0x3f;
- if (user_mode(regs)) {
- __u64 buffer;
-
- if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
- return -1;
- }
-
- if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
- return -1; /* fault */
- }
- switch (width_shift) {
- case 1:
- if (do_sign_extend) {
- regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
- } else {
- regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
- }
- break;
- case 2:
- regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
- break;
- case 3:
- regs->regs[destreg] = buffer;
- break;
- default:
- printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
- width_shift, (unsigned long) regs->pc);
- break;
- }
- } else {
- /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
- __u64 lo, hi;
-
- switch (width_shift) {
- case 1:
- misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
- break;
- case 2:
- asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
- asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
- regs->regs[destreg] = lo | hi;
- break;
- case 3:
- asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
- asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
- regs->regs[destreg] = lo | hi;
- break;
-
- default:
- printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
- width_shift, (unsigned long) regs->pc);
- break;
- }
- }
-
- return 0;
-}
-
-static int misaligned_store(struct pt_regs *regs,
- insn_size_t opcode,
- int displacement_not_indexed,
- int width_shift)
-{
- /* Return -1 for a fault, 0 for OK */
- int error;
- int srcreg;
- __u64 address;
-
- error = generate_and_check_address(regs, opcode,
- displacement_not_indexed, width_shift, &address);
- if (error < 0)
- return error;
-
- srcreg = (opcode >> 4) & 0x3f;
- if (user_mode(regs)) {
- __u64 buffer;
-
- if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
- return -1;
- }
-
- switch (width_shift) {
- case 1:
- *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
- break;
- case 2:
- *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
- break;
- case 3:
- buffer = regs->regs[srcreg];
- break;
- default:
- printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
- width_shift, (unsigned long) regs->pc);
- break;
- }
-
- if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
- return -1; /* fault */
- }
- } else {
- /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
- __u64 val = regs->regs[srcreg];
-
- switch (width_shift) {
- case 1:
- misaligned_kernel_word_store(address, val);
- break;
- case 2:
- asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
- asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
- break;
- case 3:
- asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
- asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
- break;
-
- default:
- printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
- width_shift, (unsigned long) regs->pc);
- break;
- }
- }
-
- return 0;
-}
-
-/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
- error. */
-static int misaligned_fpu_load(struct pt_regs *regs,
- insn_size_t opcode,
- int displacement_not_indexed,
- int width_shift,
- int do_paired_load)
-{
- /* Return -1 for a fault, 0 for OK */
- int error;
- int destreg;
- __u64 address;
-
- error = generate_and_check_address(regs, opcode,
- displacement_not_indexed, width_shift, &address);
- if (error < 0)
- return error;
-
- destreg = (opcode >> 4) & 0x3f;
- if (user_mode(regs)) {
- __u64 buffer;
- __u32 buflo, bufhi;
-
- if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
- return -1;
- }
-
- if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
- return -1; /* fault */
- }
- /* 'current' may be the current owner of the FPU state, so
- context switch the registers into memory so they can be
- indexed by register number. */
- if (last_task_used_math == current) {
- enable_fpu();
- save_fpu(current);
- disable_fpu();
- last_task_used_math = NULL;
- regs->sr |= SR_FD;
- }
-
- buflo = *(__u32*) &buffer;
- bufhi = *(1 + (__u32*) &buffer);
-
- switch (width_shift) {
- case 2:
- current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
- break;
- case 3:
- if (do_paired_load) {
- current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
- current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
- } else {
-#if defined(CONFIG_CPU_LITTLE_ENDIAN)
- current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
- current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
-#else
- current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
- current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
-#endif
- }
- break;
- default:
- printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
- width_shift, (unsigned long) regs->pc);
- break;
- }
- return 0;
- } else {
- die ("Misaligned FPU load inside kernel", regs, 0);
- return -1;
- }
-}
-
-static int misaligned_fpu_store(struct pt_regs *regs,
- insn_size_t opcode,
- int displacement_not_indexed,
- int width_shift,
- int do_paired_load)
-{
- /* Return -1 for a fault, 0 for OK */
- int error;
- int srcreg;
- __u64 address;
-
- error = generate_and_check_address(regs, opcode,
- displacement_not_indexed, width_shift, &address);
- if (error < 0)
- return error;
-
- srcreg = (opcode >> 4) & 0x3f;
- if (user_mode(regs)) {
- __u64 buffer;
- /* Initialise these to NaNs. */
- __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
-
- if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
- return -1;
- }
-
- /* 'current' may be the current owner of the FPU state, so
- context switch the registers into memory so they can be
- indexed by register number. */
- if (last_task_used_math == current) {
- enable_fpu();
- save_fpu(current);
- disable_fpu();
- last_task_used_math = NULL;
- regs->sr |= SR_FD;
- }
-
- switch (width_shift) {
- case 2:
- buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
- break;
- case 3:
- if (do_paired_load) {
- buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
- bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
- } else {
-#if defined(CONFIG_CPU_LITTLE_ENDIAN)
- bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
- buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
-#else
- buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
- bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
-#endif
- }
- break;
- default:
- printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
- width_shift, (unsigned long) regs->pc);
- break;
- }
-
- *(__u32*) &buffer = buflo;
- *(1 + (__u32*) &buffer) = bufhi;
- if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
- return -1; /* fault */
- }
- return 0;
- } else {
- die ("Misaligned FPU load inside kernel", regs, 0);
- return -1;
- }
-}
-
-static int misaligned_fixup(struct pt_regs *regs)
-{
- insn_size_t opcode;
- int error;
- int major, minor;
- unsigned int user_action;
-
- user_action = unaligned_user_action();
- if (!(user_action & UM_FIXUP))
- return -1;
-
- error = read_opcode(regs->pc, &opcode, user_mode(regs));
- if (error < 0) {
- return error;
- }
- major = (opcode >> 26) & 0x3f;
- minor = (opcode >> 16) & 0xf;
-
- switch (major) {
- case (0x84>>2): /* LD.W */
- error = misaligned_load(regs, opcode, 1, 1, 1);
- break;
- case (0xb0>>2): /* LD.UW */
- error = misaligned_load(regs, opcode, 1, 1, 0);
- break;
- case (0x88>>2): /* LD.L */
- error = misaligned_load(regs, opcode, 1, 2, 1);
- break;
- case (0x8c>>2): /* LD.Q */
- error = misaligned_load(regs, opcode, 1, 3, 0);
- break;
-
- case (0xa4>>2): /* ST.W */
- error = misaligned_store(regs, opcode, 1, 1);
- break;
- case (0xa8>>2): /* ST.L */
- error = misaligned_store(regs, opcode, 1, 2);
- break;
- case (0xac>>2): /* ST.Q */
- error = misaligned_store(regs, opcode, 1, 3);
- break;
-
- case (0x40>>2): /* indexed loads */
- switch (minor) {
- case 0x1: /* LDX.W */
- error = misaligned_load(regs, opcode, 0, 1, 1);
- break;
- case 0x5: /* LDX.UW */
- error = misaligned_load(regs, opcode, 0, 1, 0);
- break;
- case 0x2: /* LDX.L */
- error = misaligned_load(regs, opcode, 0, 2, 1);
- break;
- case 0x3: /* LDX.Q */
- error = misaligned_load(regs, opcode, 0, 3, 0);
- break;
- default:
- error = -1;
- break;
- }
- break;
-
- case (0x60>>2): /* indexed stores */
- switch (minor) {
- case 0x1: /* STX.W */
- error = misaligned_store(regs, opcode, 0, 1);
- break;
- case 0x2: /* STX.L */
- error = misaligned_store(regs, opcode, 0, 2);
- break;
- case 0x3: /* STX.Q */
- error = misaligned_store(regs, opcode, 0, 3);
- break;
- default:
- error = -1;
- break;
- }
- break;
-
- case (0x94>>2): /* FLD.S */
- error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
- break;
- case (0x98>>2): /* FLD.P */
- error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
- break;
- case (0x9c>>2): /* FLD.D */
- error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
- break;
- case (0x1c>>2): /* floating indexed loads */
- switch (minor) {
- case 0x8: /* FLDX.S */
- error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
- break;
- case 0xd: /* FLDX.P */
- error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
- break;
- case 0x9: /* FLDX.D */
- error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
- break;
- default:
- error = -1;
- break;
- }
- break;
- case (0xb4>>2): /* FLD.S */
- error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
- break;
- case (0xb8>>2): /* FLD.P */
- error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
- break;
- case (0xbc>>2): /* FLD.D */
- error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
- break;
- case (0x3c>>2): /* floating indexed stores */
- switch (minor) {
- case 0x8: /* FSTX.S */
- error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
- break;
- case 0xd: /* FSTX.P */
- error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
- break;
- case 0x9: /* FSTX.D */
- error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
- break;
- default:
- error = -1;
- break;
- }
- break;
-
- default:
- /* Fault */
- error = -1;
- break;
- }
-
- if (error < 0) {
- return error;
- } else {
- regs->pc += 4; /* Skip the instruction that's just been emulated */
- return 0;
- }
-}
-
-static void do_unhandled_exception(int signr, char *str, unsigned long error,
- struct pt_regs *regs)
-{
- if (user_mode(regs))
- force_sig(signr);
-
- die_if_no_fixup(str, regs, error);
-}
-
-#define DO_ERROR(signr, str, name) \
-asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
-{ \
- do_unhandled_exception(signr, str, error_code, regs); \
-}
-
-DO_ERROR(SIGILL, "illegal slot instruction", illegal_slot_inst)
-DO_ERROR(SIGSEGV, "address error (exec)", address_error_exec)
-
-#if defined(CONFIG_SH64_ID2815_WORKAROUND)
-
-#define OPCODE_INVALID 0
-#define OPCODE_USER_VALID 1
-#define OPCODE_PRIV_VALID 2
-
-/* getcon/putcon - requires checking which control register is referenced. */
-#define OPCODE_CTRL_REG 3
-
-/* Table of valid opcodes for SHmedia mode.
- Form a 10-bit value by concatenating the major/minor opcodes i.e.
- opcode[31:26,20:16]. The 6 MSBs of this value index into the following
- array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
- LSBs==4'b0000 etc). */
-static unsigned long shmedia_opcode_table[64] = {
- 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
- 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
- 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
- 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
- 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
- 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
- 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
- 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
-};
-
-/* Workaround SH5-101 cut2 silicon defect #2815 :
- in some situations, inter-mode branches from SHcompact -> SHmedia
- which should take ITLBMISS or EXECPROT exceptions at the target
- falsely take RESINST at the target instead. */
-void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
-{
- insn_size_t opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
- unsigned long pc, aligned_pc;
- unsigned long index, shift;
- unsigned long major, minor, combined;
- unsigned long reserved_field;
- int opcode_state;
- int get_user_error;
- int signr = SIGILL;
- char *exception_name = "reserved_instruction";
-
- pc = regs->pc;
-
- /* SHcompact is not handled */
- if (unlikely((pc & 3) == 0))
- goto out;
-
- /* SHmedia : check for defect. This requires executable vmas
- to be readable too. */
- aligned_pc = pc & ~3;
- if (!access_ok(aligned_pc, sizeof(insn_size_t)))
- get_user_error = -EFAULT;
- else
- get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
-
- if (get_user_error < 0) {
- /*
- * Error trying to read opcode. This typically means a
- * real fault, not a RESINST any more. So change the
- * codes.
- */
- exception_name = "address error (exec)";
- signr = SIGSEGV;
- goto out;
- }
-
- /* These bits are currently reserved as zero in all valid opcodes */
- reserved_field = opcode & 0xf;
- if (unlikely(reserved_field))
- goto out; /* invalid opcode */
-
- major = (opcode >> 26) & 0x3f;
- minor = (opcode >> 16) & 0xf;
- combined = (major << 4) | minor;
- index = major;
- shift = minor << 1;
- opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
- switch (opcode_state) {
- case OPCODE_INVALID:
- /* Trap. */
- break;
- case OPCODE_USER_VALID:
- /*
- * Restart the instruction: the branch to the instruction
- * will now be from an RTE not from SHcompact so the
- * silicon defect won't be triggered.
- */
- return;
- case OPCODE_PRIV_VALID:
- if (!user_mode(regs)) {
- /*
- * Should only ever get here if a module has
- * SHcompact code inside it. If so, the same fix
- * up is needed.
- */
- return; /* same reason */
- }
-
- /*
- * Otherwise, user mode trying to execute a privileged
- * instruction - fall through to trap.
- */
- break;
- case OPCODE_CTRL_REG:
- /* If in privileged mode, return as above. */
- if (!user_mode(regs))
- return;
-
- /* In user mode ... */
- if (combined == 0x9f) { /* GETCON */
- unsigned long regno = (opcode >> 20) & 0x3f;
-
- if (regno >= 62)
- return;
-
- /* reserved/privileged control register => trap */
- } else if (combined == 0x1bf) { /* PUTCON */
- unsigned long regno = (opcode >> 4) & 0x3f;
-
- if (regno >= 62)
- return;
-
- /* reserved/privileged control register => trap */
- }
-
- break;
- default:
- /* Fall through to trap. */
- break;
- }
-
-out:
- do_unhandled_exception(signr, exception_name, error_code, regs);
-}
-
-#else /* CONFIG_SH64_ID2815_WORKAROUND */
-
-/* If the workaround isn't needed, this is just a straightforward reserved
- instruction */
-DO_ERROR(SIGILL, "reserved instruction", reserved_inst)
-
-#endif /* CONFIG_SH64_ID2815_WORKAROUND */
-
-/* Called with interrupts disabled */
-asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
-{
- die_if_kernel("exception", regs, ex);
-}
-
-asmlinkage int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
-{
- /* Syscall debug */
- printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
-
- die_if_kernel("unknown trapa", regs, scId);
-
- return -ENOSYS;
-}
-
-/* Implement misaligned load/store handling for kernel (and optionally for user
- mode too). Limitation : only SHmedia mode code is handled - there is no
- handling at all for misaligned accesses occurring in SHcompact code yet. */
-
-asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
-{
- if (misaligned_fixup(regs) < 0)
- do_unhandled_exception(SIGSEGV, "address error(load)",
- error_code, regs);
-}
-
-asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
-{
- if (misaligned_fixup(regs) < 0)
- do_unhandled_exception(SIGSEGV, "address error(store)",
- error_code, regs);
-}
-
-asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
-{
- u64 peek_real_address_q(u64 addr);
- u64 poke_real_address_q(u64 addr, u64 val);
- unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
- unsigned long long exp_cause;
- /* It's not worth ioremapping the debug module registers for the amount
- of access we make to them - just go direct to their physical
- addresses. */
- exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
- if (exp_cause & ~4)
- printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
- (unsigned long)(exp_cause & 0xffffffff));
- show_state();
- /* Clear all DEBUGINT causes */
- poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
-}
-
-void per_cpu_trap_init(void)
-{
- /* Nothing to do for now, VBR initialization later. */
-}
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index c60b19958c35..bde7a6c01aaf 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -3,14 +3,7 @@
* ld script to make SuperH Linux kernel
* Written by Niibe Yutaka and Paul Mundt
*/
-#ifdef CONFIG_SUPERH64
-#define LOAD_OFFSET PAGE_OFFSET
-OUTPUT_ARCH(sh:sh5)
-#else
-#define LOAD_OFFSET 0
OUTPUT_ARCH(sh)
-#endif
-
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/vmlinux.lds.h>
@@ -28,14 +21,13 @@ SECTIONS
_text = .; /* Text and read-only data */
- .empty_zero_page : AT(ADDR(.empty_zero_page) - LOAD_OFFSET) {
+ .empty_zero_page : AT(ADDR(.empty_zero_page)) {
*(.empty_zero_page)
} = 0
- .text : AT(ADDR(.text) - LOAD_OFFSET) {
+ .text : AT(ADDR(.text)) {
HEAD_TEXT
TEXT_TEXT
- EXTRA_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
@@ -62,7 +54,7 @@ SECTIONS
INIT_DATA_SECTION(16)
. = ALIGN(4);
- .machvec.init : AT(ADDR(.machvec.init) - LOAD_OFFSET) {
+ .machvec.init : AT(ADDR(.machvec.init)) {
__machvec_start = .;
*(.machvec.init)
__machvec_end = .;
@@ -74,8 +66,8 @@ SECTIONS
* .exit.text is discarded at runtime, not link time, to deal with
* references from __bug_table
*/
- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { EXIT_TEXT }
- .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA }
+ .exit.text : AT(ADDR(.exit.text)) { EXIT_TEXT }
+ .exit.data : AT(ADDR(.exit.data)) { EXIT_DATA }
. = ALIGN(PAGE_SIZE);
__init_end = .;
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 98494480f048..1bd85a6949c4 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -61,7 +61,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
unsigned long addr;
int ret;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
@@ -80,7 +80,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
current->mm->context.vdso = (void *)addr;
up_fail:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
index dad8e6a54906..540e670dbafc 100644
--- a/arch/sh/lib/delay.c
+++ b/arch/sh/lib/delay.c
@@ -29,6 +29,7 @@ void __delay(unsigned long loops)
: "0" (loops)
: "t");
}
+EXPORT_SYMBOL(__delay);
inline void __const_udelay(unsigned long xloops)
{
diff --git a/arch/sh/lib64/Makefile b/arch/sh/lib64/Makefile
deleted file mode 100644
index 69779ff741df..000000000000
--- a/arch/sh/lib64/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# Makefile for the SH-5 specific library files..
-#
-# Copyright (C) 2000, 2001 Paolo Alberelli
-# Copyright (C) 2003 - 2008 Paul Mundt
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-
-# Panic should really be compiled as PIC
-lib-y := udelay.o panic.o memcpy.o memset.o \
- copy_user_memcpy.o copy_page.o strcpy.o strlen.o
-
-# Extracted from libgcc
-lib-y += udivsi3.o udivdi3.o sdivsi3.o
diff --git a/arch/sh/lib64/copy_page.S b/arch/sh/lib64/copy_page.S
deleted file mode 100644
index 0ec6fca63b56..000000000000
--- a/arch/sh/lib64/copy_page.S
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
-
- This file is subject to the terms and conditions of the GNU General Public
- License. See the file "COPYING" in the main directory of this archive
- for more details.
-
- Tight version of mempy for the case of just copying a page.
- Prefetch strategy empirically optimised against RTL simulations
- of SH5-101 cut2 eval chip with Cayman board DDR memory.
-
- Parameters:
- r2 : destination effective address (start of page)
- r3 : source effective address (start of page)
-
- Always copies 4096 bytes.
-
- Points to review.
- * Currently the prefetch is 4 lines ahead and the alloco is 2 lines ahead.
- It seems like the prefetch needs to be at at least 4 lines ahead to get
- the data into the cache in time, and the allocos contend with outstanding
- prefetches for the same cache set, so it's better to have the numbers
- different.
- */
-
- .section .text..SHmedia32,"ax"
- .little
-
- .balign 8
- .global copy_page
-copy_page:
-
- /* Copy 4096 bytes worth of data from r3 to r2.
- Do prefetches 4 lines ahead.
- Do alloco 2 lines ahead */
-
- pta 1f, tr1
- pta 2f, tr2
- pta 3f, tr3
- ptabs r18, tr0
-
-#if 0
- /* TAKum03020 */
- ld.q r3, 0x00, r63
- ld.q r3, 0x20, r63
- ld.q r3, 0x40, r63
- ld.q r3, 0x60, r63
-#endif
- alloco r2, 0x00
- synco ! TAKum03020
- alloco r2, 0x20
- synco ! TAKum03020
-
- movi 3968, r6
- add r2, r6, r6
- addi r6, 64, r7
- addi r7, 64, r8
- sub r3, r2, r60
- addi r60, 8, r61
- addi r61, 8, r62
- addi r62, 8, r23
- addi r60, 0x80, r22
-
-/* Minimal code size. The extra branches inside the loop don't cost much
- because they overlap with the time spent waiting for prefetches to
- complete. */
-1:
-#if 0
- /* TAKum03020 */
- bge/u r2, r6, tr2 ! skip prefetch for last 4 lines
- ldx.q r2, r22, r63 ! prefetch 4 lines hence
-#endif
-2:
- bge/u r2, r7, tr3 ! skip alloco for last 2 lines
- alloco r2, 0x40 ! alloc destination line 2 lines ahead
- synco ! TAKum03020
-3:
- ldx.q r2, r60, r36
- ldx.q r2, r61, r37
- ldx.q r2, r62, r38
- ldx.q r2, r23, r39
- st.q r2, 0, r36
- st.q r2, 8, r37
- st.q r2, 16, r38
- st.q r2, 24, r39
- addi r2, 32, r2
- bgt/l r8, r2, tr1
-
- blink tr0, r63 ! return
diff --git a/arch/sh/lib64/copy_user_memcpy.S b/arch/sh/lib64/copy_user_memcpy.S
deleted file mode 100644
index 515f81b00202..000000000000
--- a/arch/sh/lib64/copy_user_memcpy.S
+++ /dev/null
@@ -1,218 +0,0 @@
-! SPDX-License-Identifier: GPL-2.0
-!
-! Fast SH memcpy
-!
-! by Toshiyasu Morita (tm@netcom.com)
-! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut)
-! SH5 code Copyright 2002 SuperH Ltd.
-!
-! Entry: ARG0: destination pointer
-! ARG1: source pointer
-! ARG2: byte count
-!
-! Exit: RESULT: destination pointer
-! any other registers in the range r0-r7: trashed
-!
-! Notes: Usually one wants to do small reads and write a longword, but
-! unfortunately it is difficult in some cases to concatanate bytes
-! into a longword on the SH, so this does a longword read and small
-! writes.
-!
-! This implementation makes two assumptions about how it is called:
-!
-! 1.: If the byte count is nonzero, the address of the last byte to be
-! copied is unsigned greater than the address of the first byte to
-! be copied. This could be easily swapped for a signed comparison,
-! but the algorithm used needs some comparison.
-!
-! 2.: When there are two or three bytes in the last word of an 11-or-more
-! bytes memory chunk to b copied, the rest of the word can be read
-! without side effects.
-! This could be easily changed by increasing the minimum size of
-! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
-! however, this would cost a few extra cyles on average.
-! For SHmedia, the assumption is that any quadword can be read in its
-! enirety if at least one byte is included in the copy.
-
-/* Imported into Linux kernel by Richard Curnow. This is used to implement the
- __copy_user function in the general case, so it has to be a distinct
- function from intra-kernel memcpy to allow for exception fix-ups in the
- event that the user pointer is bad somewhere in the copy (e.g. due to
- running off the end of the vma).
-
- Note, this algorithm will be slightly wasteful in the case where the source
- and destination pointers are equally aligned, because the stlo/sthi pairs
- could then be merged back into single stores. If there are a lot of cache
- misses, this is probably offset by the stall lengths on the preloads.
-
-*/
-
-/* NOTE : Prefetches removed and allocos guarded by synco to avoid TAKum03020
- * erratum. The first two prefetches are nop-ed out to avoid upsetting the
- * instruction counts used in the jump address calculation.
- * */
-
- .section .text..SHmedia32,"ax"
- .little
- .balign 32
- .global copy_user_memcpy
- .global copy_user_memcpy_end
-copy_user_memcpy:
-
-#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
-#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
-#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
-#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
-
- nop ! ld.b r3,0,r63 ! TAKum03020
- pta/l Large,tr0
- movi 25,r0
- bgeu/u r4,r0,tr0
- nsb r4,r0
- shlli r0,5,r0
- movi (L1-L0+63*32 + 1) & 0xffff,r1
- sub r1, r0, r0
-L0: ptrel r0,tr0
- add r2,r4,r5
- ptabs r18,tr1
- add r3,r4,r6
- blink tr0,r63
-
-/* Rearranged to make cut2 safe */
- .balign 8
-L4_7: /* 4..7 byte memcpy cntd. */
- stlo.l r2, 0, r0
- or r6, r7, r6
- sthi.l r5, -1, r6
- stlo.l r5, -4, r6
- blink tr1,r63
-
- .balign 8
-L1: /* 0 byte memcpy */
- nop
- blink tr1,r63
- nop
- nop
- nop
- nop
-
-L2_3: /* 2 or 3 byte memcpy cntd. */
- st.b r5,-1,r6
- blink tr1,r63
-
- /* 1 byte memcpy */
- ld.b r3,0,r0
- st.b r2,0,r0
- blink tr1,r63
-
-L8_15: /* 8..15 byte memcpy cntd. */
- stlo.q r2, 0, r0
- or r6, r7, r6
- sthi.q r5, -1, r6
- stlo.q r5, -8, r6
- blink tr1,r63
-
- /* 2 or 3 byte memcpy */
- ld.b r3,0,r0
- nop ! ld.b r2,0,r63 ! TAKum03020
- ld.b r3,1,r1
- st.b r2,0,r0
- pta/l L2_3,tr0
- ld.b r6,-1,r6
- st.b r2,1,r1
- blink tr0, r63
-
- /* 4 .. 7 byte memcpy */
- LDUAL (r3, 0, r0, r1)
- pta L4_7, tr0
- ldlo.l r6, -4, r7
- or r0, r1, r0
- sthi.l r2, 3, r0
- ldhi.l r6, -1, r6
- blink tr0, r63
-
- /* 8 .. 15 byte memcpy */
- LDUAQ (r3, 0, r0, r1)
- pta L8_15, tr0
- ldlo.q r6, -8, r7
- or r0, r1, r0
- sthi.q r2, 7, r0
- ldhi.q r6, -1, r6
- blink tr0, r63
-
- /* 16 .. 24 byte memcpy */
- LDUAQ (r3, 0, r0, r1)
- LDUAQ (r3, 8, r8, r9)
- or r0, r1, r0
- sthi.q r2, 7, r0
- or r8, r9, r8
- sthi.q r2, 15, r8
- ldlo.q r6, -8, r7
- ldhi.q r6, -1, r6
- stlo.q r2, 8, r8
- stlo.q r2, 0, r0
- or r6, r7, r6
- sthi.q r5, -1, r6
- stlo.q r5, -8, r6
- blink tr1,r63
-
-Large:
- ! ld.b r2, 0, r63 ! TAKum03020
- pta/l Loop_ua, tr1
- ori r3, -8, r7
- sub r2, r7, r22
- sub r3, r2, r6
- add r2, r4, r5
- ldlo.q r3, 0, r0
- addi r5, -16, r5
- movi 64+8, r27 ! could subtract r7 from that.
- stlo.q r2, 0, r0
- sthi.q r2, 7, r0
- ldx.q r22, r6, r0
- bgtu/l r27, r4, tr1
-
- addi r5, -48, r27
- pta/l Loop_line, tr0
- addi r6, 64, r36
- addi r6, -24, r19
- addi r6, -16, r20
- addi r6, -8, r21
-
-Loop_line:
- ! ldx.q r22, r36, r63 ! TAKum03020
- alloco r22, 32
- synco
- addi r22, 32, r22
- ldx.q r22, r19, r23
- sthi.q r22, -25, r0
- ldx.q r22, r20, r24
- ldx.q r22, r21, r25
- stlo.q r22, -32, r0
- ldx.q r22, r6, r0
- sthi.q r22, -17, r23
- sthi.q r22, -9, r24
- sthi.q r22, -1, r25
- stlo.q r22, -24, r23
- stlo.q r22, -16, r24
- stlo.q r22, -8, r25
- bgeu r27, r22, tr0
-
-Loop_ua:
- addi r22, 8, r22
- sthi.q r22, -1, r0
- stlo.q r22, -8, r0
- ldx.q r22, r6, r0
- bgtu/l r5, r22, tr1
-
- add r3, r4, r7
- ldlo.q r7, -8, r1
- sthi.q r22, 7, r0
- ldhi.q r7, -1, r7
- ptabs r18,tr1
- stlo.q r22, 0, r0
- or r1, r7, r1
- sthi.q r5, 15, r1
- stlo.q r5, 8, r1
- blink tr1, r63
-copy_user_memcpy_end:
- nop
diff --git a/arch/sh/lib64/memcpy.S b/arch/sh/lib64/memcpy.S
deleted file mode 100644
index 231ea595b39a..000000000000
--- a/arch/sh/lib64/memcpy.S
+++ /dev/null
@@ -1,202 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */
-/* Modified by SuperH, Inc. September 2003 */
-!
-! Fast SH memcpy
-!
-! by Toshiyasu Morita (tm@netcom.com)
-! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut)
-! SH5 code Copyright 2002 SuperH Ltd.
-!
-! Entry: ARG0: destination pointer
-! ARG1: source pointer
-! ARG2: byte count
-!
-! Exit: RESULT: destination pointer
-! any other registers in the range r0-r7: trashed
-!
-! Notes: Usually one wants to do small reads and write a longword, but
-! unfortunately it is difficult in some cases to concatanate bytes
-! into a longword on the SH, so this does a longword read and small
-! writes.
-!
-! This implementation makes two assumptions about how it is called:
-!
-! 1.: If the byte count is nonzero, the address of the last byte to be
-! copied is unsigned greater than the address of the first byte to
-! be copied. This could be easily swapped for a signed comparison,
-! but the algorithm used needs some comparison.
-!
-! 2.: When there are two or three bytes in the last word of an 11-or-more
-! bytes memory chunk to b copied, the rest of the word can be read
-! without side effects.
-! This could be easily changed by increasing the minimum size of
-! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
-! however, this would cost a few extra cyles on average.
-! For SHmedia, the assumption is that any quadword can be read in its
-! enirety if at least one byte is included in the copy.
-!
-
- .section .text..SHmedia32,"ax"
- .globl memcpy
- .type memcpy, @function
- .align 5
-
-memcpy:
-
-#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
-#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
-#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
-#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
-
- ld.b r3,0,r63
- pta/l Large,tr0
- movi 25,r0
- bgeu/u r4,r0,tr0
- nsb r4,r0
- shlli r0,5,r0
- movi (L1-L0+63*32 + 1) & 0xffff,r1
- sub r1, r0, r0
-L0: ptrel r0,tr0
- add r2,r4,r5
- ptabs r18,tr1
- add r3,r4,r6
- blink tr0,r63
-
-/* Rearranged to make cut2 safe */
- .balign 8
-L4_7: /* 4..7 byte memcpy cntd. */
- stlo.l r2, 0, r0
- or r6, r7, r6
- sthi.l r5, -1, r6
- stlo.l r5, -4, r6
- blink tr1,r63
-
- .balign 8
-L1: /* 0 byte memcpy */
- nop
- blink tr1,r63
- nop
- nop
- nop
- nop
-
-L2_3: /* 2 or 3 byte memcpy cntd. */
- st.b r5,-1,r6
- blink tr1,r63
-
- /* 1 byte memcpy */
- ld.b r3,0,r0
- st.b r2,0,r0
- blink tr1,r63
-
-L8_15: /* 8..15 byte memcpy cntd. */
- stlo.q r2, 0, r0
- or r6, r7, r6
- sthi.q r5, -1, r6
- stlo.q r5, -8, r6
- blink tr1,r63
-
- /* 2 or 3 byte memcpy */
- ld.b r3,0,r0
- ld.b r2,0,r63
- ld.b r3,1,r1
- st.b r2,0,r0
- pta/l L2_3,tr0
- ld.b r6,-1,r6
- st.b r2,1,r1
- blink tr0, r63
-
- /* 4 .. 7 byte memcpy */
- LDUAL (r3, 0, r0, r1)
- pta L4_7, tr0
- ldlo.l r6, -4, r7
- or r0, r1, r0
- sthi.l r2, 3, r0
- ldhi.l r6, -1, r6
- blink tr0, r63
-
- /* 8 .. 15 byte memcpy */
- LDUAQ (r3, 0, r0, r1)
- pta L8_15, tr0
- ldlo.q r6, -8, r7
- or r0, r1, r0
- sthi.q r2, 7, r0
- ldhi.q r6, -1, r6
- blink tr0, r63
-
- /* 16 .. 24 byte memcpy */
- LDUAQ (r3, 0, r0, r1)
- LDUAQ (r3, 8, r8, r9)
- or r0, r1, r0
- sthi.q r2, 7, r0
- or r8, r9, r8
- sthi.q r2, 15, r8
- ldlo.q r6, -8, r7
- ldhi.q r6, -1, r6
- stlo.q r2, 8, r8
- stlo.q r2, 0, r0
- or r6, r7, r6
- sthi.q r5, -1, r6
- stlo.q r5, -8, r6
- blink tr1,r63
-
-Large:
- ld.b r2, 0, r63
- pta/l Loop_ua, tr1
- ori r3, -8, r7
- sub r2, r7, r22
- sub r3, r2, r6
- add r2, r4, r5
- ldlo.q r3, 0, r0
- addi r5, -16, r5
- movi 64+8, r27 // could subtract r7 from that.
- stlo.q r2, 0, r0
- sthi.q r2, 7, r0
- ldx.q r22, r6, r0
- bgtu/l r27, r4, tr1
-
- addi r5, -48, r27
- pta/l Loop_line, tr0
- addi r6, 64, r36
- addi r6, -24, r19
- addi r6, -16, r20
- addi r6, -8, r21
-
-Loop_line:
- ldx.q r22, r36, r63
- alloco r22, 32
- addi r22, 32, r22
- ldx.q r22, r19, r23
- sthi.q r22, -25, r0
- ldx.q r22, r20, r24
- ldx.q r22, r21, r25
- stlo.q r22, -32, r0
- ldx.q r22, r6, r0
- sthi.q r22, -17, r23
- sthi.q r22, -9, r24
- sthi.q r22, -1, r25
- stlo.q r22, -24, r23
- stlo.q r22, -16, r24
- stlo.q r22, -8, r25
- bgeu r27, r22, tr0
-
-Loop_ua:
- addi r22, 8, r22
- sthi.q r22, -1, r0
- stlo.q r22, -8, r0
- ldx.q r22, r6, r0
- bgtu/l r5, r22, tr1
-
- add r3, r4, r7
- ldlo.q r7, -8, r1
- sthi.q r22, 7, r0
- ldhi.q r7, -1, r7
- ptabs r18,tr1
- stlo.q r22, 0, r0
- or r1, r7, r1
- sthi.q r5, 15, r1
- stlo.q r5, 8, r1
- blink tr1, r63
-
- .size memcpy,.-memcpy
diff --git a/arch/sh/lib64/memset.S b/arch/sh/lib64/memset.S
deleted file mode 100644
index 453aa5f1d263..000000000000
--- a/arch/sh/lib64/memset.S
+++ /dev/null
@@ -1,92 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */
-/* Modified by SuperH, Inc. September 2003 */
-!
-! Fast SH memset
-!
-! by Toshiyasu Morita (tm@netcom.com)
-!
-! SH5 code by J"orn Rennecke (joern.rennecke@superh.com)
-! Copyright 2002 SuperH Ltd.
-!
-
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-#define SHHI shlld
-#define SHLO shlrd
-#else
-#define SHHI shlrd
-#define SHLO shlld
-#endif
-
- .section .text..SHmedia32,"ax"
- .globl memset
- .type memset, @function
-
- .align 5
-
-memset:
- pta/l multiquad, tr0
- andi r2, 7, r22
- ptabs r18, tr2
- mshflo.b r3,r3,r3
- add r4, r22, r23
- mperm.w r3, r63, r3 // Fill pattern now in every byte of r3
-
- movi 8, r9
- bgtu/u r23, r9, tr0 // multiquad
-
- beqi/u r4, 0, tr2 // Return with size 0 - ensures no mem accesses
- ldlo.q r2, 0, r7
- shlli r4, 2, r4
- movi -1, r8
- SHHI r8, r4, r8
- SHHI r8, r4, r8
- mcmv r7, r8, r3
- stlo.q r2, 0, r3
- blink tr2, r63
-
-multiquad:
- pta/l lastquad, tr0
- stlo.q r2, 0, r3
- shlri r23, 3, r24
- add r2, r4, r5
- beqi/u r24, 1, tr0 // lastquad
- pta/l loop, tr1
- sub r2, r22, r25
- andi r5, -8, r20 // calculate end address and
- addi r20, -7*8, r8 // loop end address; This might overflow, so we need
- // to use a different test before we start the loop
- bge/u r24, r9, tr1 // loop
- st.q r25, 8, r3
- st.q r20, -8, r3
- shlri r24, 1, r24
- beqi/u r24, 1, tr0 // lastquad
- st.q r25, 16, r3
- st.q r20, -16, r3
- beqi/u r24, 2, tr0 // lastquad
- st.q r25, 24, r3
- st.q r20, -24, r3
-lastquad:
- sthi.q r5, -1, r3
- blink tr2,r63
-
-loop:
-!!! alloco r25, 32 // QQQ comment out for short-term fix to SHUK #3895.
- // QQQ commenting out is locically correct, but sub-optimal
- // QQQ Sean McGoogan - 4th April 2003.
- st.q r25, 8, r3
- st.q r25, 16, r3
- st.q r25, 24, r3
- st.q r25, 32, r3
- addi r25, 32, r25
- bgeu/l r8, r25, tr1 // loop
-
- st.q r20, -40, r3
- st.q r20, -32, r3
- st.q r20, -24, r3
- st.q r20, -16, r3
- st.q r20, -8, r3
- sthi.q r5, -1, r3
- blink tr2,r63
-
- .size memset,.-memset
diff --git a/arch/sh/lib64/panic.c b/arch/sh/lib64/panic.c
deleted file mode 100644
index 38c954e04f6a..000000000000
--- a/arch/sh/lib64/panic.c
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright (C) 2003 Richard Curnow, SuperH UK Limited
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-void
-panic_handler(unsigned long panicPC, unsigned long panicSSR,
- unsigned long panicEXPEVT)
-{
- /* Never return from the panic handler */
- for (;;) ;
-}
diff --git a/arch/sh/lib64/sdivsi3.S b/arch/sh/lib64/sdivsi3.S
deleted file mode 100644
index b422e2374430..000000000000
--- a/arch/sh/lib64/sdivsi3.S
+++ /dev/null
@@ -1,136 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
- .global __sdivsi3
- .global __sdivsi3_1
- .global __sdivsi3_2
- .section .text..SHmedia32,"ax"
- .align 2
-
- /* inputs: r4,r5 */
- /* clobbered: r1,r18,r19,r20,r21,r25,tr0 */
- /* result in r0 */
-__sdivsi3:
-__sdivsi3_1:
- ptb __div_table,tr0
- gettr tr0,r20
-
-__sdivsi3_2:
- nsb r5, r1
- shlld r5, r1, r25 /* normalize; [-2 ..1, 1..2) in s2.62 */
- shari r25, 58, r21 /* extract 5(6) bit index (s2.4 with hole -1..1) */
- /* bubble */
- ldx.ub r20, r21, r19 /* u0.8 */
- shari r25, 32, r25 /* normalize to s2.30 */
- shlli r21, 1, r21
- muls.l r25, r19, r19 /* s2.38 */
- ldx.w r20, r21, r21 /* s2.14 */
- ptabs r18, tr0
- shari r19, 24, r19 /* truncate to s2.14 */
- sub r21, r19, r19 /* some 11 bit inverse in s1.14 */
- muls.l r19, r19, r21 /* u0.28 */
- sub r63, r1, r1
- addi r1, 92, r1
- muls.l r25, r21, r18 /* s2.58 */
- shlli r19, 45, r19 /* multiply by two and convert to s2.58 */
- /* bubble */
- sub r19, r18, r18
- shari r18, 28, r18 /* some 22 bit inverse in s1.30 */
- muls.l r18, r25, r0 /* s2.60 */
- muls.l r18, r4, r25 /* s32.30 */
- /* bubble */
- shari r0, 16, r19 /* s-16.44 */
- muls.l r19, r18, r19 /* s-16.74 */
- shari r25, 63, r0
- shari r4, 14, r18 /* s19.-14 */
- shari r19, 30, r19 /* s-16.44 */
- muls.l r19, r18, r19 /* s15.30 */
- xor r21, r0, r21 /* You could also use the constant 1 << 27. */
- add r21, r25, r21
- sub r21, r19, r21
- shard r21, r1, r21
- sub r21, r0, r0
- blink tr0, r63
-
-/* This table has been generated by divtab.c .
-Defects for bias -330:
- Max defect: 6.081536e-07 at -1.000000e+00
- Min defect: 2.849516e-08 at 1.030651e+00
- Max 2nd step defect: 9.606539e-12 at -1.000000e+00
- Min 2nd step defect: 0.000000e+00 at 0.000000e+00
- Defect at 1: 1.238659e-07
- Defect at -2: 1.061708e-07 */
-
- .balign 2
- .type __div_table,@object
- .size __div_table,128
-/* negative division constants */
- .word -16638
- .word -17135
- .word -17737
- .word -18433
- .word -19103
- .word -19751
- .word -20583
- .word -21383
- .word -22343
- .word -23353
- .word -24407
- .word -25582
- .word -26863
- .word -28382
- .word -29965
- .word -31800
-/* negative division factors */
- .byte 66
- .byte 70
- .byte 75
- .byte 81
- .byte 87
- .byte 93
- .byte 101
- .byte 109
- .byte 119
- .byte 130
- .byte 142
- .byte 156
- .byte 172
- .byte 192
- .byte 214
- .byte 241
- .skip 16
- .global __div_table
-__div_table:
- .skip 16
-/* positive division factors */
- .byte 241
- .byte 214
- .byte 192
- .byte 172
- .byte 156
- .byte 142
- .byte 130
- .byte 119
- .byte 109
- .byte 101
- .byte 93
- .byte 87
- .byte 81
- .byte 75
- .byte 70
- .byte 66
-/* positive division constants */
- .word 31801
- .word 29966
- .word 28383
- .word 26864
- .word 25583
- .word 24408
- .word 23354
- .word 22344
- .word 21384
- .word 20584
- .word 19752
- .word 19104
- .word 18434
- .word 17738
- .word 17136
- .word 16639
diff --git a/arch/sh/lib64/strcpy.S b/arch/sh/lib64/strcpy.S
deleted file mode 100644
index b61631e523d4..000000000000
--- a/arch/sh/lib64/strcpy.S
+++ /dev/null
@@ -1,98 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */
-/* Modified by SuperH, Inc. September 2003 */
-! Entry: arg0: destination
-! arg1: source
-! Exit: result: destination
-!
-! SH5 code Copyright 2002 SuperH Ltd.
-
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-#define SHHI shlld
-#define SHLO shlrd
-#else
-#define SHHI shlrd
-#define SHLO shlld
-#endif
-
- .section .text..SHmedia32,"ax"
- .globl strcpy
- .type strcpy, @function
- .align 5
-
-strcpy:
-
- pta/l shortstring,tr1
- ldlo.q r3,0,r4
- ptabs r18,tr4
- shlli r3,3,r7
- addi r2, 8, r0
- mcmpeq.b r4,r63,r6
- SHHI r6,r7,r6
- bnei/u r6,0,tr1 // shortstring
- pta/l no_lddst, tr2
- ori r3,-8,r23
- sub r2, r23, r0
- sub r3, r2, r21
- addi r21, 8, r20
- ldx.q r0, r21, r5
- pta/l loop, tr0
- ori r2,-8,r22
- mcmpeq.b r5, r63, r6
- bgt/u r22, r23, tr2 // no_lddst
-
- // r22 < r23 : Need to do a load from the destination.
- // r22 == r23 : Doesn't actually need to load from destination,
- // but still can be handled here.
- ldlo.q r2, 0, r9
- movi -1, r8
- SHLO r8, r7, r8
- mcmv r4, r8, r9
- stlo.q r2, 0, r9
- beqi/l r6, 0, tr0 // loop
-
- add r5, r63, r4
- addi r0, 8, r0
- blink tr1, r63 // shortstring
-no_lddst:
- // r22 > r23: note that for r22 == r23 the sthi.q would clobber
- // bytes before the destination region.
- stlo.q r2, 0, r4
- SHHI r4, r7, r4
- sthi.q r0, -1, r4
- beqi/l r6, 0, tr0 // loop
-
- add r5, r63, r4
- addi r0, 8, r0
-shortstring:
-#if __BYTE_ORDER != __LITTLE_ENDIAN
- pta/l shortstring2,tr1
- byterev r4,r4
-#endif
-shortstring2:
- st.b r0,-8,r4
- andi r4,0xff,r5
- shlri r4,8,r4
- addi r0,1,r0
- bnei/l r5,0,tr1
- blink tr4,r63 // return
-
- .balign 8
-loop:
- stlo.q r0, 0, r5
- ldx.q r0, r20, r4
- addi r0, 16, r0
- sthi.q r0, -9, r5
- mcmpeq.b r4, r63, r6
- bnei/u r6, 0, tr1 // shortstring
- ldx.q r0, r21, r5
- stlo.q r0, -8, r4
- sthi.q r0, -1, r4
- mcmpeq.b r5, r63, r6
- beqi/l r6, 0, tr0 // loop
-
- add r5, r63, r4
- addi r0, 8, r0
- blink tr1, r63 // shortstring
-
- .size strcpy,.-strcpy
diff --git a/arch/sh/lib64/strlen.S b/arch/sh/lib64/strlen.S
deleted file mode 100644
index c00b972f9999..000000000000
--- a/arch/sh/lib64/strlen.S
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Simplistic strlen() implementation for SHmedia.
- *
- * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
- */
-
- .section .text..SHmedia32,"ax"
- .globl strlen
- .type strlen,@function
-
- .balign 16
-strlen:
- ptabs r18, tr4
-
- /*
- * Note: We could easily deal with the NULL case here with a simple
- * sanity check, though it seems that the behavior we want is to fault
- * in the event that r2 == NULL, so we don't bother.
- */
-/* beqi r2, 0, tr4 */ ! Sanity check
-
- movi -1, r0
- pta/l loop, tr0
-loop:
- ld.b r2, 0, r1
- addi r2, 1, r2
- addi r0, 1, r0
- bnei/l r1, 0, tr0
-
- or r0, r63, r2
- blink tr4, r63
-
- .size strlen,.-strlen
diff --git a/arch/sh/lib64/udelay.c b/arch/sh/lib64/udelay.c
deleted file mode 100644
index f215b063da70..000000000000
--- a/arch/sh/lib64/udelay.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * arch/sh/lib64/udelay.c
- *
- * Delay routines, using a pre-computed "loops_per_jiffy" value.
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003, 2004 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/sched.h>
-#include <asm/param.h>
-
-/*
- * Use only for very small delays (< 1 msec).
- *
- * The active part of our cycle counter is only 32-bits wide, and
- * we're treating the difference between two marks as signed. On
- * a 1GHz box, that's about 2 seconds.
- */
-
-void __delay(unsigned long loops)
-{
- long long dummy;
- __asm__ __volatile__("gettr tr0, %1\n\t"
- "pta $+4, tr0\n\t"
- "addi %0, -1, %0\n\t"
- "bne %0, r63, tr0\n\t"
- "ptabs %1, tr0\n\t":"=r"(loops),
- "=r"(dummy)
- :"0"(loops));
-}
-
-void __const_udelay(unsigned long xloops)
-{
- __delay(xloops * (HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy));
-}
-
-void __udelay(unsigned long usecs)
-{
- __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
-}
-
-void __ndelay(unsigned long nsecs)
-{
- __const_udelay(nsecs * 0x00000005);
-}
diff --git a/arch/sh/lib64/udivdi3.S b/arch/sh/lib64/udivdi3.S
deleted file mode 100644
index c032cb157589..000000000000
--- a/arch/sh/lib64/udivdi3.S
+++ /dev/null
@@ -1,121 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
- .section .text..SHmedia32,"ax"
- .align 2
- .global __udivdi3
-__udivdi3:
- shlri r3,1,r4
- nsb r4,r22
- shlld r3,r22,r6
- shlri r6,49,r5
- movi 0xffffffffffffbaf1,r21 /* .l shift count 17. */
- sub r21,r5,r1
- mmulfx.w r1,r1,r4
- mshflo.w r1,r63,r1
- sub r63,r22,r20 // r63 == 64 % 64
- mmulfx.w r5,r4,r4
- pta large_divisor,tr0
- addi r20,32,r9
- msub.w r1,r4,r1
- madd.w r1,r1,r1
- mmulfx.w r1,r1,r4
- shlri r6,32,r7
- bgt/u r9,r63,tr0 // large_divisor
- mmulfx.w r5,r4,r4
- shlri r2,32+14,r19
- addi r22,-31,r0
- msub.w r1,r4,r1
-
- mulu.l r1,r7,r4
- addi r1,-3,r5
- mulu.l r5,r19,r5
- sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2
- shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as
- the case may be, %0000000000000000 000.11111111111, still */
- muls.l r1,r4,r4 /* leaving at least one sign bit. */
- mulu.l r5,r3,r8
- mshalds.l r1,r21,r1
- shari r4,26,r4
- shlld r8,r0,r8
- add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)
- sub r2,r8,r2
- /* Can do second step of 64 : 32 div now, using r1 and the rest in r2. */
-
- shlri r2,22,r21
- mulu.l r21,r1,r21
- shlld r5,r0,r8
- addi r20,30-22,r0
- shlrd r21,r0,r21
- mulu.l r21,r3,r5
- add r8,r21,r8
- mcmpgt.l r21,r63,r21 // See Note 1
- addi r20,30,r0
- mshfhi.l r63,r21,r21
- sub r2,r5,r2
- andc r2,r21,r2
-
- /* small divisor: need a third divide step */
- mulu.l r2,r1,r7
- ptabs r18,tr0
- addi r2,1,r2
- shlrd r7,r0,r7
- mulu.l r7,r3,r5
- add r8,r7,r8
- sub r2,r3,r2
- cmpgt r2,r5,r5
- add r8,r5,r2
- /* could test r3 here to check for divide by zero. */
- blink tr0,r63
-
-large_divisor:
- mmulfx.w r5,r4,r4
- shlrd r2,r9,r25
- shlri r25,32,r8
- msub.w r1,r4,r1
-
- mulu.l r1,r7,r4
- addi r1,-3,r5
- mulu.l r5,r8,r5
- sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2
- shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as
- the case may be, %0000000000000000 000.11111111111, still */
- muls.l r1,r4,r4 /* leaving at least one sign bit. */
- shlri r5,14-1,r8
- mulu.l r8,r7,r5
- mshalds.l r1,r21,r1
- shari r4,26,r4
- add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)
- sub r25,r5,r25
- /* Can do second step of 64 : 32 div now, using r1 and the rest in r25. */
-
- shlri r25,22,r21
- mulu.l r21,r1,r21
- pta no_lo_adj,tr0
- addi r22,32,r0
- shlri r21,40,r21
- mulu.l r21,r7,r5
- add r8,r21,r8
- shlld r2,r0,r2
- sub r25,r5,r25
- bgtu/u r7,r25,tr0 // no_lo_adj
- addi r8,1,r8
- sub r25,r7,r25
-no_lo_adj:
- mextr4 r2,r25,r2
-
- /* large_divisor: only needs a few adjustments. */
- mulu.l r8,r6,r5
- ptabs r18,tr0
- /* bubble */
- cmpgtu r5,r2,r5
- sub r8,r5,r2
- blink tr0,r63
-
-/* Note 1: To shift the result of the second divide stage so that the result
- always fits into 32 bits, yet we still reduce the rest sufficiently
- would require a lot of instructions to do the shifts just right. Using
- the full 64 bit shift result to multiply with the divisor would require
- four extra instructions for the upper 32 bits (shift / mulu / shift / sub).
- Fortunately, if the upper 32 bits of the shift result are nonzero, we
- know that the rest after taking this partial result into account will
- fit into 32 bits. So we just clear the upper 32 bits of the rest if the
- upper 32 bits of the partial result are nonzero. */
diff --git a/arch/sh/lib64/udivsi3.S b/arch/sh/lib64/udivsi3.S
deleted file mode 100644
index e4788fb4fe82..000000000000
--- a/arch/sh/lib64/udivsi3.S
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
- .global __udivsi3
- .section .text..SHmedia32,"ax"
- .align 2
-
-/*
- inputs: r4,r5
- clobbered: r18,r19,r20,r21,r22,r25,tr0
- result in r0.
- */
-__udivsi3:
- addz.l r5,r63,r22
- nsb r22,r0
- shlld r22,r0,r25
- shlri r25,48,r25
- movi 0xffffffffffffbb0c,r20 /* shift count eqiv 76 */
- sub r20,r25,r21
- mmulfx.w r21,r21,r19
- mshflo.w r21,r63,r21
- ptabs r18,tr0
- mmulfx.w r25,r19,r19
- sub r20,r0,r0
- /* bubble */
- msub.w r21,r19,r19
-
- /*
- * It would be nice for scheduling to do this add to r21 before
- * the msub.w, but we need a different value for r19 to keep
- * errors under control.
- */
- addi r19,-2,r21
- mulu.l r4,r21,r18
- mmulfx.w r19,r19,r19
- shlli r21,15,r21
- shlrd r18,r0,r18
- mulu.l r18,r22,r20
- mmacnfx.wl r25,r19,r21
- /* bubble */
- sub r4,r20,r25
-
- mulu.l r25,r21,r19
- addi r0,14,r0
- /* bubble */
- shlrd r19,r0,r19
- mulu.l r19,r22,r20
- add r18,r19,r18
- /* bubble */
- sub.l r25,r20,r25
-
- mulu.l r25,r21,r19
- addz.l r25,r63,r25
- sub r25,r22,r25
- shlrd r19,r0,r19
- mulu.l r19,r22,r20
- addi r25,1,r25
- add r18,r19,r18
-
- cmpgt r25,r20,r25
- add.l r18,r25,r0
- blink tr0,r63
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 5c8a2ebfc720..6c39d24ad919 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -15,8 +15,7 @@ config MMU
config PAGE_OFFSET
hex
- default "0x80000000" if MMU && SUPERH32
- default "0x20000000" if MMU && SUPERH64
+ default "0x80000000" if MMU
default "0x00000000"
config FORCE_MAX_ZONEORDER
@@ -72,12 +71,11 @@ config MEMORY_SIZE
config 29BIT
def_bool !32BIT
- depends on SUPERH32
select UNCACHED_MAPPING
config 32BIT
bool
- default y if CPU_SH5 || !MMU
+ default !MMU
config PMB
bool "Support 32-bit physical addressing through PMB"
@@ -152,7 +150,7 @@ config ARCH_MEMORY_PROBE
config IOREMAP_FIXED
def_bool y
- depends on X2TLB || SUPERH64
+ depends on X2TLB
config UNCACHED_MAPPING
bool
@@ -184,7 +182,7 @@ config PAGE_SIZE_16KB
config PAGE_SIZE_64KB
bool "64kB"
- depends on !MMU || CPU_SH4 || CPU_SH5
+ depends on !MMU || CPU_SH4
help
This enables support for 64kB pages, possible on all SH-4
CPUs and later.
@@ -216,10 +214,6 @@ config HUGETLB_PAGE_SIZE_64MB
bool "64MB"
depends on X2TLB
-config HUGETLB_PAGE_SIZE_512MB
- bool "512MB"
- depends on CPU_SH5
-
endchoice
config SCHED_MC
@@ -242,7 +236,7 @@ config SH7705_CACHE_32KB
choice
prompt "Cache mode"
- default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4 || CPU_SH5
+ default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4
default CACHE_WRITETHROUGH if (CPU_SH2 && !CPU_SH2A)
config CACHE_WRITEBACK
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 5051b38fd5b6..487da0ff03b3 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -10,15 +10,14 @@ cacheops-$(CONFIG_CPU_SUBTYPE_SH7619) := cache-sh2.o
cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o
cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o
cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o
-cacheops-$(CONFIG_CPU_SH5) := cache-sh5.o flush-sh4.o
cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
obj-y += $(cacheops-y)
mmu-y := nommu.o extable_32.o
-mmu-$(CONFIG_MMU) := extable_$(BITS).o fault.o ioremap.o kmap.o \
- pgtable.o tlbex_$(BITS).o tlbflush_$(BITS).o
+mmu-$(CONFIG_MMU) := extable_32.o fault.o ioremap.o kmap.o \
+ pgtable.o tlbex_32.o tlbflush_32.o
obj-y += $(mmu-y)
@@ -31,7 +30,6 @@ ifdef CONFIG_MMU
debugfs-$(CONFIG_CPU_SH4) += tlb-debugfs.o
tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o
-tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o
tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o
obj-y += $(tlb-y)
endif
@@ -46,29 +44,4 @@ obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
GCOV_PROFILE_pmb.o := n
-# Special flags for tlbex_64.o. This puts restrictions on the number of
-# caller-save registers that the compiler can target when building this file.
-# This is required because the code is called from a context in entry.S where
-# very few registers have been saved in the exception handler (for speed
-# reasons).
-# The caller save registers that have been saved and which can be used are
-# r2,r3,r4,r5 : argument passing
-# r15, r18 : SP and LINK
-# tr0-4 : allow all caller-save TR's. The compiler seems to be able to make
-# use of them, so it's probably beneficial to performance to save them
-# and have them available for it.
-#
-# The resources not listed below are callee save, i.e. the compiler is free to
-# use any of them and will spill them to the stack itself.
-
-CFLAGS_tlbex_64.o += -ffixed-r7 \
- -ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \
- -ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \
- -ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \
- -ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \
- -ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \
- -ffixed-r41 -ffixed-r42 -ffixed-r43 \
- -ffixed-r60 -ffixed-r61 -ffixed-r62 \
- -fomit-frame-pointer
-
ccflags-y := -Werror
diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c
index 8172a171d727..26f3bd43e850 100644
--- a/arch/sh/mm/cache-sh3.c
+++ b/arch/sh/mm/cache-sh3.c
@@ -12,7 +12,6 @@
#include <linux/threads.h>
#include <asm/addrspace.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/io.h>
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 45943bcb7042..ddfa9685f1ef 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -16,7 +16,6 @@
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/highmem.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/cache_insns.h>
#include <asm/cacheflush.h>
@@ -183,7 +182,7 @@ static void sh4_flush_cache_all(void *unused)
* accessed with (hence cache set) is in accord with the physical
* address (i.e. tag). It's no different here.
*
- * Caller takes mm->mmap_sem.
+ * Caller takes mm->mmap_lock.
*/
static void sh4_flush_cache_mm(void *arg)
{
@@ -208,9 +207,6 @@ static void sh4_flush_cache_page(void *args)
struct page *page;
unsigned long address, pfn, phys;
int map_coherent = 0;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
pmd_t *pmd;
pte_t *pte;
void *vaddr;
@@ -224,10 +220,7 @@ static void sh4_flush_cache_page(void *args)
if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
return;
- pgd = pgd_offset(vma->vm_mm, address);
- p4d = p4d_offset(pgd, address);
- pud = pud_offset(p4d, address);
- pmd = pmd_offset(pud, address);
+ pmd = pmd_off(vma->vm_mm, address);
pte = pte_offset_kernel(pmd, address);
/* If the page isn't present, there is nothing to do here. */
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
deleted file mode 100644
index 442a77cc2957..000000000000
--- a/arch/sh/mm/cache-sh5.c
+++ /dev/null
@@ -1,626 +0,0 @@
-/*
- * arch/sh/mm/cache-sh5.c
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2002 Benedict Gaster
- * Copyright (C) 2003 Richard Curnow
- * Copyright (C) 2003 - 2008 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <asm/tlb.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/pgalloc.h>
-#include <linux/uaccess.h>
-#include <asm/mmu_context.h>
-
-extern void __weak sh4__flush_region_init(void);
-
-/* Wired TLB entry for the D-cache */
-static unsigned long long dtlb_cache_slot;
-
-/*
- * The following group of functions deal with mapping and unmapping a
- * temporary page into a DTLB slot that has been set aside for exclusive
- * use.
- */
-static inline void
-sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
- unsigned long paddr)
-{
- local_irq_disable();
- sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
-}
-
-static inline void sh64_teardown_dtlb_cache_slot(void)
-{
- sh64_teardown_tlb_slot(dtlb_cache_slot);
- local_irq_enable();
-}
-
-static inline void sh64_icache_inv_all(void)
-{
- unsigned long long addr, flag, data;
- unsigned long flags;
-
- addr = ICCR0;
- flag = ICCR0_ICI;
- data = 0;
-
- /* Make this a critical section for safety (probably not strictly necessary.) */
- local_irq_save(flags);
-
- /* Without %1 it gets unexplicably wrong */
- __asm__ __volatile__ (
- "getcfg %3, 0, %0\n\t"
- "or %0, %2, %0\n\t"
- "putcfg %3, 0, %0\n\t"
- "synci"
- : "=&r" (data)
- : "0" (data), "r" (flag), "r" (addr));
-
- local_irq_restore(flags);
-}
-
-static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
-{
- /* Invalidate range of addresses [start,end] from the I-cache, where
- * the addresses lie in the kernel superpage. */
-
- unsigned long long ullend, addr, aligned_start;
- aligned_start = (unsigned long long)(signed long long)(signed long) start;
- addr = L1_CACHE_ALIGN(aligned_start);
- ullend = (unsigned long long) (signed long long) (signed long) end;
-
- while (addr <= ullend) {
- __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
- addr += L1_CACHE_BYTES;
- }
-}
-
-static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
-{
- /* If we get called, we know that vma->vm_flags contains VM_EXEC.
- Also, eaddr is page-aligned. */
- unsigned int cpu = smp_processor_id();
- unsigned long long addr, end_addr;
- unsigned long flags = 0;
- unsigned long running_asid, vma_asid;
- addr = eaddr;
- end_addr = addr + PAGE_SIZE;
-
- /* Check whether we can use the current ASID for the I-cache
- invalidation. For example, if we're called via
- access_process_vm->flush_cache_page->here, (e.g. when reading from
- /proc), 'running_asid' will be that of the reader, not of the
- victim.
-
- Also, note the risk that we might get pre-empted between the ASID
- compare and blocking IRQs, and before we regain control, the
- pid->ASID mapping changes. However, the whole cache will get
- invalidated when the mapping is renewed, so the worst that can
- happen is that the loop below ends up invalidating somebody else's
- cache entries.
- */
-
- running_asid = get_asid();
- vma_asid = cpu_asid(cpu, vma->vm_mm);
- if (running_asid != vma_asid) {
- local_irq_save(flags);
- switch_and_save_asid(vma_asid);
- }
- while (addr < end_addr) {
- /* Worth unrolling a little */
- __asm__ __volatile__("icbi %0, 0" : : "r" (addr));
- __asm__ __volatile__("icbi %0, 32" : : "r" (addr));
- __asm__ __volatile__("icbi %0, 64" : : "r" (addr));
- __asm__ __volatile__("icbi %0, 96" : : "r" (addr));
- addr += 128;
- }
- if (running_asid != vma_asid) {
- switch_and_save_asid(running_asid);
- local_irq_restore(flags);
- }
-}
-
-static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- /* Used for invalidating big chunks of I-cache, i.e. assume the range
- is whole pages. If 'start' or 'end' is not page aligned, the code
- is conservative and invalidates to the ends of the enclosing pages.
- This is functionally OK, just a performance loss. */
-
- /* See the comments below in sh64_dcache_purge_user_range() regarding
- the choice of algorithm. However, for the I-cache option (2) isn't
- available because there are no physical tags so aliases can't be
- resolved. The icbi instruction has to be used through the user
- mapping. Because icbi is cheaper than ocbp on a cache hit, it
- would be cheaper to use the selective code for a large range than is
- possible with the D-cache. Just assume 64 for now as a working
- figure.
- */
- int n_pages;
-
- if (!mm)
- return;
-
- n_pages = ((end - start) >> PAGE_SHIFT);
- if (n_pages >= 64) {
- sh64_icache_inv_all();
- } else {
- unsigned long aligned_start;
- unsigned long eaddr;
- unsigned long after_last_page_start;
- unsigned long mm_asid, current_asid;
- unsigned long flags = 0;
-
- mm_asid = cpu_asid(smp_processor_id(), mm);
- current_asid = get_asid();
-
- if (mm_asid != current_asid) {
- /* Switch ASID and run the invalidate loop under cli */
- local_irq_save(flags);
- switch_and_save_asid(mm_asid);
- }
-
- aligned_start = start & PAGE_MASK;
- after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
-
- while (aligned_start < after_last_page_start) {
- struct vm_area_struct *vma;
- unsigned long vma_end;
- vma = find_vma(mm, aligned_start);
- if (!vma || (aligned_start <= vma->vm_end)) {
- /* Avoid getting stuck in an error condition */
- aligned_start += PAGE_SIZE;
- continue;
- }
- vma_end = vma->vm_end;
- if (vma->vm_flags & VM_EXEC) {
- /* Executable */
- eaddr = aligned_start;
- while (eaddr < vma_end) {
- sh64_icache_inv_user_page(vma, eaddr);
- eaddr += PAGE_SIZE;
- }
- }
- aligned_start = vma->vm_end; /* Skip to start of next region */
- }
-
- if (mm_asid != current_asid) {
- switch_and_save_asid(current_asid);
- local_irq_restore(flags);
- }
- }
-}
-
-static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
-{
- /* The icbi instruction never raises ITLBMISS. i.e. if there's not a
- cache hit on the virtual tag the instruction ends there, without a
- TLB lookup. */
-
- unsigned long long aligned_start;
- unsigned long long ull_end;
- unsigned long long addr;
-
- ull_end = end;
-
- /* Just invalidate over the range using the natural addresses. TLB
- miss handling will be OK (TBC). Since it's for the current process,
- either we're already in the right ASID context, or the ASIDs have
- been recycled since we were last active in which case we might just
- invalidate another processes I-cache entries : no worries, just a
- performance drop for him. */
- aligned_start = L1_CACHE_ALIGN(start);
- addr = aligned_start;
- while (addr < ull_end) {
- __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- addr += L1_CACHE_BYTES;
- }
-}
-
-/* Buffer used as the target of alloco instructions to purge data from cache
- sets by natural eviction. -- RPC */
-#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
-static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
-
-static inline void sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
-{
- /* Purge all ways in a particular block of sets, specified by the base
- set number and number of sets. Can handle wrap-around, if that's
- needed. */
-
- int dummy_buffer_base_set;
- unsigned long long eaddr, eaddr0, eaddr1;
- int j;
- int set_offset;
-
- dummy_buffer_base_set = ((int)&dummy_alloco_area &
- cpu_data->dcache.entry_mask) >>
- cpu_data->dcache.entry_shift;
- set_offset = sets_to_purge_base - dummy_buffer_base_set;
-
- for (j = 0; j < n_sets; j++, set_offset++) {
- set_offset &= (cpu_data->dcache.sets - 1);
- eaddr0 = (unsigned long long)dummy_alloco_area +
- (set_offset << cpu_data->dcache.entry_shift);
-
- /*
- * Do one alloco which hits the required set per cache
- * way. For write-back mode, this will purge the #ways
- * resident lines. There's little point unrolling this
- * loop because the allocos stall more if they're too
- * close together.
- */
- eaddr1 = eaddr0 + cpu_data->dcache.way_size *
- cpu_data->dcache.ways;
-
- for (eaddr = eaddr0; eaddr < eaddr1;
- eaddr += cpu_data->dcache.way_size) {
- __asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
- __asm__ __volatile__ ("synco"); /* TAKum03020 */
- }
-
- eaddr1 = eaddr0 + cpu_data->dcache.way_size *
- cpu_data->dcache.ways;
-
- for (eaddr = eaddr0; eaddr < eaddr1;
- eaddr += cpu_data->dcache.way_size) {
- /*
- * Load from each address. Required because
- * alloco is a NOP if the cache is write-through.
- */
- if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
- __raw_readb((unsigned long)eaddr);
- }
- }
-
- /*
- * Don't use OCBI to invalidate the lines. That costs cycles
- * directly. If the dummy block is just left resident, it will
- * naturally get evicted as required.
- */
-}
-
-/*
- * Purge the entire contents of the dcache. The most efficient way to
- * achieve this is to use alloco instructions on a region of unused
- * memory equal in size to the cache, thereby causing the current
- * contents to be discarded by natural eviction. The alternative, namely
- * reading every tag, setting up a mapping for the corresponding page and
- * doing an OCBP for the line, would be much more expensive.
- */
-static void sh64_dcache_purge_all(void)
-{
-
- sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
-}
-
-
-/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
- anything else in the kernel */
-#define MAGIC_PAGE0_START 0xffffffffec000000ULL
-
-/* Purge the physical page 'paddr' from the cache. It's known that any
- * cache lines requiring attention have the same page colour as the the
- * address 'eaddr'.
- *
- * This relies on the fact that the D-cache matches on physical tags when
- * no virtual tag matches. So we create an alias for the original page
- * and purge through that. (Alternatively, we could have done this by
- * switching ASID to match the original mapping and purged through that,
- * but that involves ASID switching cost + probably a TLBMISS + refill
- * anyway.)
- */
-static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
- unsigned long eaddr)
-{
- unsigned long long magic_page_start;
- unsigned long long magic_eaddr, magic_eaddr_end;
-
- magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
-
- /* As long as the kernel is not pre-emptible, this doesn't need to be
- under cli/sti. */
- sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
-
- magic_eaddr = magic_page_start;
- magic_eaddr_end = magic_eaddr + PAGE_SIZE;
-
- while (magic_eaddr < magic_eaddr_end) {
- /* Little point in unrolling this loop - the OCBPs are blocking
- and won't go any quicker (i.e. the loop overhead is parallel
- to part of the OCBP execution.) */
- __asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
- magic_eaddr += L1_CACHE_BYTES;
- }
-
- sh64_teardown_dtlb_cache_slot();
-}
-
-/*
- * Purge a page given its physical start address, by creating a temporary
- * 1 page mapping and purging across that. Even if we know the virtual
- * address (& vma or mm) of the page, the method here is more elegant
- * because it avoids issues of coping with page faults on the purge
- * instructions (i.e. no special-case code required in the critical path
- * in the TLB miss handling).
- */
-static void sh64_dcache_purge_phy_page(unsigned long paddr)
-{
- unsigned long long eaddr_start, eaddr, eaddr_end;
- int i;
-
- /* As long as the kernel is not pre-emptible, this doesn't need to be
- under cli/sti. */
- eaddr_start = MAGIC_PAGE0_START;
- for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
- sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
-
- eaddr = eaddr_start;
- eaddr_end = eaddr + PAGE_SIZE;
- while (eaddr < eaddr_end) {
- __asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
- eaddr += L1_CACHE_BYTES;
- }
-
- sh64_teardown_dtlb_cache_slot();
- eaddr_start += PAGE_SIZE;
- }
-}
-
-static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
- unsigned long addr, unsigned long end)
-{
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- pte_t entry;
- spinlock_t *ptl;
- unsigned long paddr;
-
- if (!mm)
- return; /* No way to find physical address of page */
-
- pgd = pgd_offset(mm, addr);
- if (pgd_bad(*pgd))
- return;
-
- p4d = p4d_offset(pgd, addr);
- if (p4d_none(*p4d) || p4d_bad(*p4d))
- return;
-
- pud = pud_offset(p4d, addr);
- if (pud_none(*pud) || pud_bad(*pud))
- return;
-
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd) || pmd_bad(*pmd))
- return;
-
- pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
- do {
- entry = *pte;
- if (pte_none(entry) || !pte_present(entry))
- continue;
- paddr = pte_val(entry) & PAGE_MASK;
- sh64_dcache_purge_coloured_phy_page(paddr, addr);
- } while (pte++, addr += PAGE_SIZE, addr != end);
- pte_unmap_unlock(pte - 1, ptl);
-}
-
-/*
- * There are at least 5 choices for the implementation of this, with
- * pros (+), cons(-), comments(*):
- *
- * 1. ocbp each line in the range through the original user's ASID
- * + no lines spuriously evicted
- * - tlbmiss handling (must either handle faults on demand => extra
- * special-case code in tlbmiss critical path), or map the page in
- * advance (=> flush_tlb_range in advance to avoid multiple hits)
- * - ASID switching
- * - expensive for large ranges
- *
- * 2. temporarily map each page in the range to a special effective
- * address and ocbp through the temporary mapping; relies on the
- * fact that SH-5 OCB* always do TLB lookup and match on ptags (they
- * never look at the etags)
- * + no spurious evictions
- * - expensive for large ranges
- * * surely cheaper than (1)
- *
- * 3. walk all the lines in the cache, check the tags, if a match
- * occurs create a page mapping to ocbp the line through
- * + no spurious evictions
- * - tag inspection overhead
- * - (especially for small ranges)
- * - potential cost of setting up/tearing down page mapping for
- * every line that matches the range
- * * cost partly independent of range size
- *
- * 4. walk all the lines in the cache, check the tags, if a match
- * occurs use 4 * alloco to purge the line (+3 other probably
- * innocent victims) by natural eviction
- * + no tlb mapping overheads
- * - spurious evictions
- * - tag inspection overhead
- *
- * 5. implement like flush_cache_all
- * + no tag inspection overhead
- * - spurious evictions
- * - bad for small ranges
- *
- * (1) can be ruled out as more expensive than (2). (2) appears best
- * for small ranges. The choice between (3), (4) and (5) for large
- * ranges and the range size for the large/small boundary need
- * benchmarking to determine.
- *
- * For now use approach (2) for small ranges and (5) for large ones.
- */
-static void sh64_dcache_purge_user_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- int n_pages = ((end - start) >> PAGE_SHIFT);
-
- if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
- sh64_dcache_purge_all();
- } else {
- /* Small range, covered by a single page table page */
- start &= PAGE_MASK; /* should already be so */
- end = PAGE_ALIGN(end); /* should already be so */
- sh64_dcache_purge_user_pages(mm, start, end);
- }
-}
-
-/*
- * Invalidate the entire contents of both caches, after writing back to
- * memory any dirty data from the D-cache.
- */
-static void sh5_flush_cache_all(void *unused)
-{
- sh64_dcache_purge_all();
- sh64_icache_inv_all();
-}
-
-/*
- * Invalidate an entire user-address space from both caches, after
- * writing back dirty data (e.g. for shared mmap etc).
- *
- * This could be coded selectively by inspecting all the tags then
- * doing 4*alloco on any set containing a match (as for
- * flush_cache_range), but fork/exit/execve (where this is called from)
- * are expensive anyway.
- *
- * Have to do a purge here, despite the comments re I-cache below.
- * There could be odd-coloured dirty data associated with the mm still
- * in the cache - if this gets written out through natural eviction
- * after the kernel has reused the page there will be chaos.
- *
- * The mm being torn down won't ever be active again, so any Icache
- * lines tagged with its ASID won't be visible for the rest of the
- * lifetime of this ASID cycle. Before the ASID gets reused, there
- * will be a flush_cache_all. Hence we don't need to touch the
- * I-cache. This is similar to the lack of action needed in
- * flush_tlb_mm - see fault.c.
- */
-static void sh5_flush_cache_mm(void *unused)
-{
- sh64_dcache_purge_all();
-}
-
-/*
- * Invalidate (from both caches) the range [start,end) of virtual
- * addresses from the user address space specified by mm, after writing
- * back any dirty data.
- *
- * Note, 'end' is 1 byte beyond the end of the range to flush.
- */
-static void sh5_flush_cache_range(void *args)
-{
- struct flusher_data *data = args;
- struct vm_area_struct *vma;
- unsigned long start, end;
-
- vma = data->vma;
- start = data->addr1;
- end = data->addr2;
-
- sh64_dcache_purge_user_range(vma->vm_mm, start, end);
- sh64_icache_inv_user_page_range(vma->vm_mm, start, end);
-}
-
-/*
- * Invalidate any entries in either cache for the vma within the user
- * address space vma->vm_mm for the page starting at virtual address
- * 'eaddr'. This seems to be used primarily in breaking COW. Note,
- * the I-cache must be searched too in case the page in question is
- * both writable and being executed from (e.g. stack trampolines.)
- *
- * Note, this is called with pte lock held.
- */
-static void sh5_flush_cache_page(void *args)
-{
- struct flusher_data *data = args;
- struct vm_area_struct *vma;
- unsigned long eaddr, pfn;
-
- vma = data->vma;
- eaddr = data->addr1;
- pfn = data->addr2;
-
- sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
-
- if (vma->vm_flags & VM_EXEC)
- sh64_icache_inv_user_page(vma, eaddr);
-}
-
-static void sh5_flush_dcache_page(void *page)
-{
- sh64_dcache_purge_phy_page(page_to_phys((struct page *)page));
- wmb();
-}
-
-/*
- * Flush the range [start,end] of kernel virtual address space from
- * the I-cache. The corresponding range must be purged from the
- * D-cache also because the SH-5 doesn't have cache snooping between
- * the caches. The addresses will be visible through the superpage
- * mapping, therefore it's guaranteed that there no cache entries for
- * the range in cache sets of the wrong colour.
- */
-static void sh5_flush_icache_range(void *args)
-{
- struct flusher_data *data = args;
- unsigned long start, end;
-
- start = data->addr1;
- end = data->addr2;
-
- __flush_purge_region((void *)start, end);
- wmb();
- sh64_icache_inv_kernel_range(start, end);
-}
-
-/*
- * For the address range [start,end), write back the data from the
- * D-cache and invalidate the corresponding region of the I-cache for the
- * current process. Used to flush signal trampolines on the stack to
- * make them executable.
- */
-static void sh5_flush_cache_sigtramp(void *vaddr)
-{
- unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES;
-
- __flush_wback_region(vaddr, L1_CACHE_BYTES);
- wmb();
- sh64_icache_inv_current_user_range((unsigned long)vaddr, end);
-}
-
-void __init sh5_cache_init(void)
-{
- local_flush_cache_all = sh5_flush_cache_all;
- local_flush_cache_mm = sh5_flush_cache_mm;
- local_flush_cache_dup_mm = sh5_flush_cache_mm;
- local_flush_cache_page = sh5_flush_cache_page;
- local_flush_cache_range = sh5_flush_cache_range;
- local_flush_dcache_page = sh5_flush_dcache_page;
- local_flush_icache_range = sh5_flush_icache_range;
- local_flush_cache_sigtramp = sh5_flush_cache_sigtramp;
-
- /* Reserve a slot for dcache colouring in the DTLB */
- dtlb_cache_slot = sh64_get_wired_dtlb_entry();
-
- sh4__flush_region_init();
-}
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index ed25eba80667..48978293226c 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -16,7 +16,6 @@
#include <linux/threads.h>
#include <asm/addrspace.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/io.h>
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 464f160a9576..3aef78ceb820 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -355,12 +355,6 @@ void __init cpu_cache_init(void)
}
}
- if (boot_cpu_data.family == CPU_FAMILY_SH5) {
- extern void __weak sh5_cache_init(void);
-
- sh5_cache_init();
- }
-
skip:
emit_cache_params();
}
diff --git a/arch/sh/mm/extable_64.c b/arch/sh/mm/extable_64.c
deleted file mode 100644
index 7a3b4d33d2e7..000000000000
--- a/arch/sh/mm/extable_64.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * arch/sh/mm/extable_64.c
- *
- * Copyright (C) 2003 Richard Curnow
- * Copyright (C) 2003, 2004 Paul Mundt
- *
- * Cloned from the 2.5 SH version..
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/bsearch.h>
-#include <linux/rwsem.h>
-#include <linux/extable.h>
-#include <linux/uaccess.h>
-
-extern unsigned long copy_user_memcpy, copy_user_memcpy_end;
-extern void __copy_user_fixup(void);
-
-static const struct exception_table_entry __copy_user_fixup_ex = {
- .fixup = (unsigned long)&__copy_user_fixup,
-};
-
-/*
- * Some functions that may trap due to a bad user-mode address have too
- * many loads and stores in them to make it at all practical to label
- * each one and put them all in the main exception table.
- *
- * In particular, the fast memcpy routine is like this. It's fix-up is
- * just to fall back to a slow byte-at-a-time copy, which is handled the
- * conventional way. So it's functionally OK to just handle any trap
- * occurring in the fast memcpy with that fixup.
- */
-static const struct exception_table_entry *check_exception_ranges(unsigned long addr)
-{
- if ((addr >= (unsigned long)&copy_user_memcpy) &&
- (addr <= (unsigned long)&copy_user_memcpy_end))
- return &__copy_user_fixup_ex;
-
- return NULL;
-}
-
-static int cmp_ex_search(const void *key, const void *elt)
-{
- const struct exception_table_entry *_elt = elt;
- unsigned long _key = *(unsigned long *)key;
-
- /* avoid overflow */
- if (_key > _elt->insn)
- return 1;
- if (_key < _elt->insn)
- return -1;
- return 0;
-}
-
-/* Simple binary search */
-const struct exception_table_entry *
-search_extable(const struct exception_table_entry *base,
- const size_t num,
- unsigned long value)
-{
- const struct exception_table_entry *mid;
-
- mid = check_exception_ranges(value);
- if (mid)
- return mid;
-
- return bsearch(&value, base, num,
- sizeof(struct exception_table_entry), cmp_ex_search);
-}
-
-int fixup_exception(struct pt_regs *regs)
-{
- const struct exception_table_entry *fixup;
-
- fixup = search_exception_tables(regs->pc);
- if (fixup) {
- regs->pc = fixup->fixup;
- return 1;
- }
-
- return 0;
-}
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 7260a1a7fdca..fbe1f2fe9a8c 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -214,7 +214,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long address)
: "paging request",
address);
pr_alert("PC:");
- printk_address(regs->pc, 1);
+ printk_address(regs->pc, 1, KERN_ALERT);
show_pte(NULL, address);
}
@@ -279,7 +279,7 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
__bad_area_nosemaphore(regs, error_code, address, si_code);
}
@@ -303,7 +303,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs))
@@ -326,9 +326,9 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
return 1;
}
- /* Release mmap_sem first if necessary */
+ /* Release mmap_lock first if necessary */
if (!(fault & VM_FAULT_RETRY))
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (!(fault & VM_FAULT_ERROR))
return 0;
@@ -442,7 +442,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
}
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (unlikely(!vma)) {
@@ -502,7 +502,7 @@ good_area:
flags |= FAULT_FLAG_TRIED;
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -510,5 +510,5 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
}
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
index 0e7039137f5a..73fd7cc99430 100644
--- a/arch/sh/mm/kmap.c
+++ b/arch/sh/mm/kmap.c
@@ -14,9 +14,6 @@
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)), vaddr)
-
static pte_t *kmap_coherent_pte;
void __init kmap_coherent_init(void)
@@ -25,7 +22,7 @@ void __init kmap_coherent_init(void)
/* cache the first coherent kmap pte */
vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
- kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
+ kmap_coherent_pte = virt_to_kpte(vaddr);
}
void *kmap_coherent(struct page *page, unsigned long addr)
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c
index dca946f426c6..8b4504413c5f 100644
--- a/arch/sh/mm/nommu.c
+++ b/arch/sh/mm/nommu.c
@@ -10,7 +10,6 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/mm.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <linux/uaccess.h>
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index b59bad86b31e..b20aba6e1b37 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -23,10 +23,10 @@
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <linux/sizes.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
deleted file mode 100644
index e4bb2a8e0a69..000000000000
--- a/arch/sh/mm/tlb-sh5.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * arch/sh/mm/tlb-sh5.c
- *
- * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
- * Copyright (C) 2003 Richard Curnow <richard.curnow@superh.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <asm/page.h>
-#include <asm/tlb.h>
-#include <asm/mmu_context.h>
-
-/**
- * sh64_tlb_init - Perform initial setup for the DTLB and ITLB.
- */
-int sh64_tlb_init(void)
-{
- /* Assign some sane DTLB defaults */
- cpu_data->dtlb.entries = 64;
- cpu_data->dtlb.step = 0x10;
-
- cpu_data->dtlb.first = DTLB_FIXED | cpu_data->dtlb.step;
- cpu_data->dtlb.next = cpu_data->dtlb.first;
-
- cpu_data->dtlb.last = DTLB_FIXED |
- ((cpu_data->dtlb.entries - 1) *
- cpu_data->dtlb.step);
-
- /* And again for the ITLB */
- cpu_data->itlb.entries = 64;
- cpu_data->itlb.step = 0x10;
-
- cpu_data->itlb.first = ITLB_FIXED | cpu_data->itlb.step;
- cpu_data->itlb.next = cpu_data->itlb.first;
- cpu_data->itlb.last = ITLB_FIXED |
- ((cpu_data->itlb.entries - 1) *
- cpu_data->itlb.step);
-
- return 0;
-}
-
-/**
- * sh64_next_free_dtlb_entry - Find the next available DTLB entry
- */
-unsigned long long sh64_next_free_dtlb_entry(void)
-{
- return cpu_data->dtlb.next;
-}
-
-/**
- * sh64_get_wired_dtlb_entry - Allocate a wired (locked-in) entry in the DTLB
- */
-unsigned long long sh64_get_wired_dtlb_entry(void)
-{
- unsigned long long entry = sh64_next_free_dtlb_entry();
-
- cpu_data->dtlb.first += cpu_data->dtlb.step;
- cpu_data->dtlb.next += cpu_data->dtlb.step;
-
- return entry;
-}
-
-/**
- * sh64_put_wired_dtlb_entry - Free a wired (locked-in) entry in the DTLB.
- *
- * @entry: Address of TLB slot.
- *
- * Works like a stack, last one to allocate must be first one to free.
- */
-int sh64_put_wired_dtlb_entry(unsigned long long entry)
-{
- __flush_tlb_slot(entry);
-
- /*
- * We don't do any particularly useful tracking of wired entries,
- * so this approach works like a stack .. last one to be allocated
- * has to be the first one to be freed.
- *
- * We could potentially load wired entries into a list and work on
- * rebalancing the list periodically (which also entails moving the
- * contents of a TLB entry) .. though I have a feeling that this is
- * more trouble than it's worth.
- */
-
- /*
- * Entry must be valid .. we don't want any ITLB addresses!
- */
- if (entry <= DTLB_FIXED)
- return -EINVAL;
-
- /*
- * Next, check if we're within range to be freed. (ie, must be the
- * entry beneath the first 'free' entry!
- */
- if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step))
- return -EINVAL;
-
- /* If we are, then bring this entry back into the list */
- cpu_data->dtlb.first -= cpu_data->dtlb.step;
- cpu_data->dtlb.next = entry;
-
- return 0;
-}
-
-/**
- * sh64_setup_tlb_slot - Load up a translation in a wired slot.
- *
- * @config_addr: Address of TLB slot.
- * @eaddr: Virtual address.
- * @asid: Address Space Identifier.
- * @paddr: Physical address.
- *
- * Load up a virtual<->physical translation for @eaddr<->@paddr in the
- * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry).
- */
-void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
- unsigned long asid, unsigned long paddr)
-{
- unsigned long long pteh, ptel;
-
- pteh = neff_sign_extend(eaddr);
- pteh &= PAGE_MASK;
- pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
- ptel = neff_sign_extend(paddr);
- ptel &= PAGE_MASK;
- ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE);
-
- asm volatile("putcfg %0, 1, %1\n\t"
- "putcfg %0, 0, %2\n"
- : : "r" (config_addr), "r" (ptel), "r" (pteh));
-}
-
-/**
- * sh64_teardown_tlb_slot - Teardown a translation.
- *
- * @config_addr: Address of TLB slot.
- *
- * Teardown any existing mapping in the TLB slot @config_addr.
- */
-void sh64_teardown_tlb_slot(unsigned long long config_addr)
- __attribute__ ((alias("__flush_tlb_slot")));
-
-static int dtlb_entry;
-static unsigned long long dtlb_entries[64];
-
-void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
-{
- unsigned long long entry;
- unsigned long paddr, flags;
-
- BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries));
-
- local_irq_save(flags);
-
- entry = sh64_get_wired_dtlb_entry();
- dtlb_entries[dtlb_entry++] = entry;
-
- paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
- paddr &= ~PAGE_MASK;
-
- sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
-
- local_irq_restore(flags);
-}
-
-void tlb_unwire_entry(void)
-{
- unsigned long long entry;
- unsigned long flags;
-
- BUG_ON(!dtlb_entry);
-
- local_irq_save(flags);
- entry = dtlb_entries[dtlb_entry--];
-
- sh64_teardown_tlb_slot(entry);
- sh64_put_wired_dtlb_entry(entry);
-
- local_irq_restore(flags);
-}
-
-void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
-{
- unsigned long long ptel;
- unsigned long long pteh=0;
- struct tlb_info *tlbp;
- unsigned long long next;
- unsigned int fault_code = get_thread_fault_code();
-
- /* Get PTEL first */
- ptel = pte.pte_low;
-
- /*
- * Set PTEH register
- */
- pteh = neff_sign_extend(address & MMU_VPN_MASK);
-
- /* Set the ASID. */
- pteh |= get_asid() << PTEH_ASID_SHIFT;
- pteh |= PTEH_VALID;
-
- /* Set PTEL register, set_pte has performed the sign extension */
- ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
-
- if (fault_code & FAULT_CODE_ITLB)
- tlbp = &cpu_data->itlb;
- else
- tlbp = &cpu_data->dtlb;
-
- next = tlbp->next;
- __flush_tlb_slot(next);
- asm volatile ("putcfg %0,1,%2\n\n\t"
- "putcfg %0,0,%1\n"
- : : "r" (next), "r" (pteh), "r" (ptel) );
-
- next += TLB_STEP;
- if (next > tlbp->last)
- next = tlbp->first;
- tlbp->next = next;
-}
diff --git a/arch/sh/mm/tlbex_64.c b/arch/sh/mm/tlbex_64.c
deleted file mode 100644
index 0d015f7556fa..000000000000
--- a/arch/sh/mm/tlbex_64.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * The SH64 TLB miss.
- *
- * Original code from fault.c
- * Copyright (C) 2000, 2001 Paolo Alberelli
- *
- * Fast PTE->TLB refill path
- * Copyright (C) 2003 Richard.Curnow@superh.com
- *
- * IMPORTANT NOTES :
- * The do_fast_page_fault function is called from a context in entry.S
- * where very few registers have been saved. In particular, the code in
- * this file must be compiled not to use ANY caller-save registers that
- * are not part of the restricted save set. Also, it means that code in
- * this file must not make calls to functions elsewhere in the kernel, or
- * else the excepting context will see corruption in its caller-save
- * registers. Plus, the entry.S save area is non-reentrant, so this code
- * has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
- * on any exception.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/kprobes.h>
-#include <asm/tlb.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/mmu_context.h>
-
-static int handle_tlbmiss(unsigned long long protection_flags,
- unsigned long address)
-{
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- pte_t entry;
-
- if (is_vmalloc_addr((void *)address)) {
- pgd = pgd_offset_k(address);
- } else {
- if (unlikely(address >= TASK_SIZE || !current->mm))
- return 1;
-
- pgd = pgd_offset(current->mm, address);
- }
-
- p4d = p4d_offset(pgd, address);
- if (p4d_none(*p4d) || !p4d_present(*p4d))
- return 1;
-
- pud = pud_offset(p4d, address);
- if (pud_none(*pud) || !pud_present(*pud))
- return 1;
-
- pmd = pmd_offset(pud, address);
- if (pmd_none(*pmd) || !pmd_present(*pmd))
- return 1;
-
- pte = pte_offset_kernel(pmd, address);
- entry = *pte;
- if (pte_none(entry) || !pte_present(entry))
- return 1;
-
- /*
- * If the page doesn't have sufficient protection bits set to
- * service the kind of fault being handled, there's not much
- * point doing the TLB refill. Punt the fault to the general
- * handler.
- */
- if ((pte_val(entry) & protection_flags) != protection_flags)
- return 1;
-
- update_mmu_cache(NULL, address, pte);
-
- return 0;
-}
-
-/*
- * Put all this information into one structure so that everything is just
- * arithmetic relative to a single base address. This reduces the number
- * of movi/shori pairs needed just to load addresses of static data.
- */
-struct expevt_lookup {
- unsigned short protection_flags[8];
- unsigned char is_text_access[8];
- unsigned char is_write_access[8];
-};
-
-#define PRU (1<<9)
-#define PRW (1<<8)
-#define PRX (1<<7)
-#define PRR (1<<6)
-
-/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
- the fault happened in user mode or privileged mode. */
-static struct expevt_lookup expevt_lookup_table = {
- .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
- .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0}
-};
-
-static inline unsigned int
-expevt_to_fault_code(unsigned long expevt)
-{
- if (expevt == 0xa40)
- return FAULT_CODE_ITLB;
- else if (expevt == 0x060)
- return FAULT_CODE_WRITE;
-
- return 0;
-}
-
-/*
- This routine handles page faults that can be serviced just by refilling a
- TLB entry from an existing page table entry. (This case represents a very
- large majority of page faults.) Return 1 if the fault was successfully
- handled. Return 0 if the fault could not be handled. (This leads into the
- general fault handling in fault.c which deals with mapping file-backed
- pages, stack growth, segmentation faults, swapping etc etc)
- */
-asmlinkage int __kprobes
-do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt,
- unsigned long address)
-{
- unsigned long long protection_flags;
- unsigned long long index;
- unsigned long long expevt4;
- unsigned int fault_code;
-
- /* The next few lines implement a way of hashing EXPEVT into a
- * small array index which can be used to lookup parameters
- * specific to the type of TLBMISS being handled.
- *
- * Note:
- * ITLBMISS has EXPEVT==0xa40
- * RTLBMISS has EXPEVT==0x040
- * WTLBMISS has EXPEVT==0x060
- */
- expevt4 = (expevt >> 4);
- /* TODO : xor ssr_md into this expression too. Then we can check
- * that PRU is set when it needs to be. */
- index = expevt4 ^ (expevt4 >> 5);
- index &= 7;
-
- fault_code = expevt_to_fault_code(expevt);
-
- protection_flags = expevt_lookup_table.protection_flags[index];
-
- if (expevt_lookup_table.is_text_access[index])
- fault_code |= FAULT_CODE_ITLB;
- if (!ssr_md)
- fault_code |= FAULT_CODE_USER;
-
- set_thread_fault_code(fault_code);
-
- return handle_tlbmiss(protection_flags, address);
-}
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
deleted file mode 100644
index bd0715d5dca4..000000000000
--- a/arch/sh/mm/tlbflush_64.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * arch/sh/mm/tlb-flush_64.c
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
- * Copyright (C) 2003 - 2012 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/signal.h>
-#include <linux/rwsem.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/perf_event.h>
-#include <linux/interrupt.h>
-#include <asm/io.h>
-#include <asm/tlb.h>
-#include <linux/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/mmu_context.h>
-
-void local_flush_tlb_one(unsigned long asid, unsigned long page)
-{
- unsigned long long match, pteh=0, lpage;
- unsigned long tlb;
-
- /*
- * Sign-extend based on neff.
- */
- lpage = neff_sign_extend(page);
- match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
- match |= lpage;
-
- for_each_itlb_entry(tlb) {
- asm volatile ("getcfg %1, 0, %0"
- : "=r" (pteh)
- : "r" (tlb) );
-
- if (pteh == match) {
- __flush_tlb_slot(tlb);
- break;
- }
- }
-
- for_each_dtlb_entry(tlb) {
- asm volatile ("getcfg %1, 0, %0"
- : "=r" (pteh)
- : "r" (tlb) );
-
- if (pteh == match) {
- __flush_tlb_slot(tlb);
- break;
- }
-
- }
-}
-
-void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
- unsigned long flags;
-
- if (vma->vm_mm) {
- page &= PAGE_MASK;
- local_irq_save(flags);
- local_flush_tlb_one(get_asid(), page);
- local_irq_restore(flags);
- }
-}
-
-void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- unsigned long flags;
- unsigned long long match, pteh=0, pteh_epn, pteh_low;
- unsigned long tlb;
- unsigned int cpu = smp_processor_id();
- struct mm_struct *mm;
-
- mm = vma->vm_mm;
- if (cpu_context(cpu, mm) == NO_CONTEXT)
- return;
-
- local_irq_save(flags);
-
- start &= PAGE_MASK;
- end &= PAGE_MASK;
-
- match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
-
- /* Flush ITLB */
- for_each_itlb_entry(tlb) {
- asm volatile ("getcfg %1, 0, %0"
- : "=r" (pteh)
- : "r" (tlb) );
-
- pteh_epn = pteh & PAGE_MASK;
- pteh_low = pteh & ~PAGE_MASK;
-
- if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
- __flush_tlb_slot(tlb);
- }
-
- /* Flush DTLB */
- for_each_dtlb_entry(tlb) {
- asm volatile ("getcfg %1, 0, %0"
- : "=r" (pteh)
- : "r" (tlb) );
-
- pteh_epn = pteh & PAGE_MASK;
- pteh_low = pteh & ~PAGE_MASK;
-
- if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
- __flush_tlb_slot(tlb);
- }
-
- local_irq_restore(flags);
-}
-
-void local_flush_tlb_mm(struct mm_struct *mm)
-{
- unsigned long flags;
- unsigned int cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) == NO_CONTEXT)
- return;
-
- local_irq_save(flags);
-
- cpu_context(cpu, mm) = NO_CONTEXT;
- if (mm == current->mm)
- activate_context(mm, cpu);
-
- local_irq_restore(flags);
-}
-
-void local_flush_tlb_all(void)
-{
- /* Invalidate all, including shared pages, excluding fixed TLBs */
- unsigned long flags, tlb;
-
- local_irq_save(flags);
-
- /* Flush each ITLB entry */
- for_each_itlb_entry(tlb)
- __flush_tlb_slot(tlb);
-
- /* Flush each DTLB entry */
- for_each_dtlb_entry(tlb)
- __flush_tlb_slot(tlb);
-
- local_irq_restore(flags);
-}
-
-void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- /* FIXME: Optimize this later.. */
- flush_tlb_all();
-}
-
-void __flush_tlb_global(void)
-{
- flush_tlb_all();
-}
diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h
index fb66094a2c30..41c6d734a474 100644
--- a/arch/sparc/include/asm/cacheflush_32.h
+++ b/arch/sparc/include/asm/cacheflush_32.h
@@ -17,8 +17,6 @@
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
-
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h
index e7517434d1fa..b9341836597e 100644
--- a/arch/sparc/include/asm/cacheflush_64.h
+++ b/arch/sparc/include/asm/cacheflush_64.h
@@ -49,7 +49,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end);
void flush_dcache_page(struct page *page);
#define flush_icache_page(vma, pg) do { } while(0)
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
void flush_ptrace_access(struct vm_area_struct *, struct page *,
unsigned long uaddr, void *kaddr,
diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h
index 946dbcbf3a83..e10ab9ad3097 100644
--- a/arch/sparc/include/asm/floppy_32.h
+++ b/arch/sparc/include/asm/floppy_32.h
@@ -9,8 +9,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/idprom.h>
#include <asm/oplib.h>
#include <asm/auxio.h>
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h
index ddb03c04f1f3..6c35f0d27ee1 100644
--- a/arch/sparc/include/asm/highmem.h
+++ b/arch/sparc/include/asm/highmem.h
@@ -22,9 +22,9 @@
#ifdef __KERNEL__
#include <linux/interrupt.h>
+#include <linux/pgtable.h>
#include <asm/vaddrs.h>
#include <asm/kmap_types.h>
-#include <asm/pgtable.h>
#include <asm/pgtsrmmu.h>
/* declarations for highmem.c */
diff --git a/arch/sparc/include/asm/ide.h b/arch/sparc/include/asm/ide.h
index 09f026585550..499aa2e6e276 100644
--- a/arch/sparc/include/asm/ide.h
+++ b/arch/sparc/include/asm/ide.h
@@ -18,7 +18,7 @@
#include <asm/cacheflush.h>
#include <asm/page.h>
#else
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/psr.h>
#endif
diff --git a/arch/sparc/include/asm/io-unit.h b/arch/sparc/include/asm/io-unit.h
index 3ce96e8c088f..8c38f5b9f927 100644
--- a/arch/sparc/include/asm/io-unit.h
+++ b/arch/sparc/include/asm/io-unit.h
@@ -7,8 +7,8 @@
#define _SPARC_IO_UNIT_H
#include <linux/spinlock.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
/* The io-unit handles all virtual to physical address translations
* that occur between the SBUS and physical memory. Access by
diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
index 478260002836..fff8861df107 100644
--- a/arch/sparc/include/asm/page_32.h
+++ b/arch/sparc/include/asm/page_32.h
@@ -54,7 +54,7 @@ extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long iopte; } iopte_t;
-typedef struct { unsigned long pmdv[16]; } pmd_t;
+typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long ctxd; } ctxd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
@@ -62,7 +62,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
#define pte_val(x) ((x).pte)
#define iopte_val(x) ((x).iopte)
-#define pmd_val(x) ((x).pmdv[0])
+#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd)
#define ctxd_val(x) ((x).ctxd)
#define pgprot_val(x) ((x).pgprot)
@@ -82,7 +82,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
*/
typedef unsigned long pte_t;
typedef unsigned long iopte_t;
-typedef struct { unsigned long pmdv[16]; } pmd_t;
+typedef unsigned long pmd_t;
typedef unsigned long pgd_t;
typedef unsigned long ctxd_t;
typedef unsigned long pgprot_t;
@@ -90,14 +90,14 @@ typedef unsigned long iopgprot_t;
#define pte_val(x) (x)
#define iopte_val(x) (x)
-#define pmd_val(x) ((x).pmdv[0])
+#define pmd_val(x) (x)
#define pgd_val(x) (x)
#define ctxd_val(x) (x)
#define pgprot_val(x) (x)
#define iopgprot_val(x) (x)
#define __pte(x) (x)
-#define __pmd(x) ((pmd_t) { { (x) }, })
+#define __pmd(x) (x)
#define __iopte(x) (x)
#define __pgd(x) (x)
#define __ctxd(x) (x)
@@ -106,7 +106,7 @@ typedef unsigned long iopgprot_t;
#endif
-typedef struct page *pgtable_t;
+typedef pte_t *pgtable_t;
#define TASK_UNMAPPED_BASE 0x50000000
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index eae0c92ec422..9d353e6dc5a9 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -4,9 +4,9 @@
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/pgtable.h>
#include <asm/pgtsrmmu.h>
-#include <asm/pgtable.h>
#include <asm/vaddrs.h>
#include <asm/page.h>
@@ -50,23 +50,24 @@ static inline void free_pmd_fast(pmd_t * pmd)
#define pmd_free(mm, pmd) free_pmd_fast(pmd)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
-void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep);
-#define pmd_pgtable(pmd) pmd_page(pmd)
+#define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
+#define pmd_pgtable(pmd) (pgtable_t)__pmd_page(pmd)
void pmd_set(pmd_t *pmdp, pte_t *ptep);
-#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
+#define pmd_populate_kernel pmd_populate
pgtable_t pte_alloc_one(struct mm_struct *mm);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{
- return srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
+ return srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE,
+ SRMMU_PTE_TABLE_SIZE);
}
static inline void free_pte_fast(pte_t *pte)
{
- srmmu_free_nocache(pte, PTE_SIZE);
+ srmmu_free_nocache(pte, SRMMU_PTE_TABLE_SIZE);
}
#define pte_free_kernel(mm, pte) free_pte_fast(pte)
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
index 264e76ceccf6..a8dafc550985 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
@@ -67,7 +67,7 @@ void pte_free(struct mm_struct *mm, pgtable_t ptepage);
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
-#define pmd_pgtable(PMD) ((pte_t *)__pmd_page(PMD))
+#define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD))
void pgtable_free(void *table, bool is_page);
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 0de659ae0ba4..632cdb959542 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -11,6 +11,16 @@
#include <linux/const.h>
+#define PMD_SHIFT 18
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
+
+#define PGDIR_SHIFT 24
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PGDIR_ALIGN(__addr) (((__addr) + ~PGDIR_MASK) & PGDIR_MASK)
+
#ifndef __ASSEMBLY__
#include <asm-generic/pgtable-nopud.h>
@@ -34,17 +44,10 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
#define pmd_ERROR(e) __builtin_trap()
#define pgd_ERROR(e) __builtin_trap()
-#define PMD_SHIFT 22
-#define PMD_SIZE (1UL << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE-1))
-#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
-#define PGDIR_SHIFT SRMMU_PGDIR_SHIFT
-#define PGDIR_SIZE SRMMU_PGDIR_SIZE
-#define PGDIR_MASK SRMMU_PGDIR_MASK
-#define PTRS_PER_PTE 1024
-#define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
-#define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
-#define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
+#define PTRS_PER_PTE 64
+#define PTRS_PER_PMD 64
+#define PTRS_PER_PGD 256
+#define USER_PTRS_PER_PGD PAGE_OFFSET / PGDIR_SIZE
#define FIRST_USER_ADDRESS 0UL
#define PTE_SIZE (PTRS_PER_PTE*4)
@@ -132,6 +135,23 @@ static inline struct page *pmd_page(pmd_t pmd)
return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
}
+static inline unsigned long __pmd_page(pmd_t pmd)
+{
+ unsigned long v;
+
+ if (srmmu_device_memory(pmd_val(pmd)))
+ BUG();
+
+ v = pmd_val(pmd) & SRMMU_PTD_PMASK;
+ return (unsigned long)__nocache_va(v << 4);
+}
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ unsigned long v = pmd_val(pmd) & SRMMU_PTD_PMASK;
+ return (unsigned long)__nocache_va(v << 4);
+}
+
static inline unsigned long pud_page_vaddr(pud_t pud)
{
if (srmmu_device_memory(pud_val(pud))) {
@@ -179,9 +199,7 @@ static inline int pmd_none(pmd_t pmd)
static inline void pmd_clear(pmd_t *pmdp)
{
- int i;
- for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
- set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
+ set_pte((pte_t *)&pmd_val(*pmdp), __pte(0));
}
static inline int pud_none(pud_t pud)
@@ -303,30 +321,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
pgprot_val(newprot));
}
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-
-/* to find an entry in a page-table-directory */
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* Find an entry in the second-level page table.. */
-static inline pmd_t *pmd_offset(pud_t * dir, unsigned long address)
-{
- return (pmd_t *) pud_page_vaddr(*dir) +
- ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
-}
-
-/* Find an entry in the third-level page table.. */
-pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
-
-/*
- * This shortcut works on sun4m (and sun4d) because the nocache area is static.
- */
-#define pte_offset_map(d, a) pte_offset_kernel(d,a)
-#define pte_unmap(pte) do{}while(0)
-
struct seq_file;
void mmu_info(struct seq_file *m);
@@ -415,7 +409,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
-#define io_remap_pfn_range io_remap_pfn_range
+#define io_remap_pfn_range io_remap_pfn_range
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
@@ -428,8 +422,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
__changed; \
})
-#include <asm-generic/pgtable.h>
-
#endif /* !(__ASSEMBLY__) */
#define VMALLOC_START _AC(0xfe600000,UL)
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index da527b27cf7d..7ef6affa105e 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -835,7 +835,7 @@ static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
#define pud_set(pudp, pmdp) \
(pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
-static inline unsigned long __pmd_page(pmd_t pmd)
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
unsigned long pfn;
@@ -855,7 +855,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
return ((unsigned long) __va(pfn << PAGE_SHIFT));
}
-#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
+#define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
#define pud_present(pud) (pud_val(pud) != 0U)
@@ -889,31 +889,6 @@ static inline unsigned long pud_pfn(pud_t pud)
#define p4d_set(p4dp, pudp) \
(p4d_val(*(p4dp)) = (__pa((unsigned long) (pudp))))
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* Find an entry in the third-level page table.. */
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-#define pud_offset(p4dp, address) \
- ((pud_t *) p4d_page_vaddr(*(p4dp)) + pud_index(address))
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(pudp, address) \
- ((pmd_t *) pud_page_vaddr(*(pudp)) + \
- (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) \
- ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) __pmd_page(*(dir)) + pte_index(address))
-#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
-#define pte_unmap(pte) do { } while (0)
-
/* We cannot include <linux/mm_types.h> at this point yet: */
extern struct mm_struct init_mm;
@@ -1078,7 +1053,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
-#define io_remap_pfn_range io_remap_pfn_range
+#define io_remap_pfn_range io_remap_pfn_range
static inline unsigned long __untagged_addr(unsigned long start)
{
@@ -1122,7 +1097,6 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
#define pte_access_permitted pte_access_permitted
#include <asm/tlbflush.h>
-#include <asm-generic/pgtable.h>
/* We provide our own get_unmapped_area to cope with VA holes and
* SHM area cache aliasing for userland.
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index 32a508897501..7708d015712b 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -17,39 +17,9 @@
/* Number of contexts is implementation-dependent; 64k is the most we support */
#define SRMMU_MAX_CONTEXTS 65536
-/* PMD_SHIFT determines the size of the area a second-level page table entry can map */
-#define SRMMU_REAL_PMD_SHIFT 18
-#define SRMMU_REAL_PMD_SIZE (1UL << SRMMU_REAL_PMD_SHIFT)
-#define SRMMU_REAL_PMD_MASK (~(SRMMU_REAL_PMD_SIZE-1))
-#define SRMMU_REAL_PMD_ALIGN(__addr) (((__addr)+SRMMU_REAL_PMD_SIZE-1)&SRMMU_REAL_PMD_MASK)
-
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define SRMMU_PGDIR_SHIFT 24
-#define SRMMU_PGDIR_SIZE (1UL << SRMMU_PGDIR_SHIFT)
-#define SRMMU_PGDIR_MASK (~(SRMMU_PGDIR_SIZE-1))
-#define SRMMU_PGDIR_ALIGN(addr) (((addr)+SRMMU_PGDIR_SIZE-1)&SRMMU_PGDIR_MASK)
-
-#define SRMMU_REAL_PTRS_PER_PTE 64
-#define SRMMU_REAL_PTRS_PER_PMD 64
-#define SRMMU_PTRS_PER_PGD 256
-
-#define SRMMU_REAL_PTE_TABLE_SIZE (SRMMU_REAL_PTRS_PER_PTE*4)
-#define SRMMU_PMD_TABLE_SIZE (SRMMU_REAL_PTRS_PER_PMD*4)
-#define SRMMU_PGD_TABLE_SIZE (SRMMU_PTRS_PER_PGD*4)
-
-/*
- * To support pagetables in highmem, Linux introduces APIs which
- * return struct page* and generally manipulate page tables when
- * they are not mapped into kernel space. Our hardware page tables
- * are smaller than pages. We lump hardware tabes into big, page sized
- * software tables.
- *
- * PMD_SHIFT determines the size of the area a second-level page table entry
- * can map, and our pmd_t is 16 times larger than normal. The values which
- * were once defined here are now generic for 4c and srmmu, so they're
- * found in pgtable.h.
- */
-#define SRMMU_PTRS_PER_PMD 4
+#define SRMMU_PTE_TABLE_SIZE (PTRS_PER_PTE*4)
+#define SRMMU_PMD_TABLE_SIZE (PTRS_PER_PMD*4)
+#define SRMMU_PGD_TABLE_SIZE (PTRS_PER_PGD*4)
/* Definition of the values in the ET field of PTD's and PTE's */
#define SRMMU_ET_MASK 0x3
diff --git a/arch/sparc/include/asm/viking.h b/arch/sparc/include/asm/viking.h
index 0bbefd184221..08ffc605035f 100644
--- a/arch/sparc/include/asm/viking.h
+++ b/arch/sparc/include/asm/viking.h
@@ -10,6 +10,7 @@
#include <asm/asi.h>
#include <asm/mxcc.h>
+#include <asm/pgtable.h>
#include <asm/pgtsrmmu.h>
/* Bits in the SRMMU control register for GNU/Viking modules.
@@ -227,7 +228,7 @@ static inline unsigned long viking_hwprobe(unsigned long vaddr)
: "=r" (val)
: "r" (vaddr | 0x200), "i" (ASI_M_FLUSH_PROBE));
if ((val & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
- vaddr &= ~SRMMU_PGDIR_MASK;
+ vaddr &= ~PGDIR_MASK;
vaddr >>= PAGE_SHIFT;
return val | (vaddr << 8);
}
@@ -237,7 +238,7 @@ static inline unsigned long viking_hwprobe(unsigned long vaddr)
: "=r" (val)
: "r" (vaddr | 0x100), "i" (ASI_M_FLUSH_PROBE));
if ((val & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
- vaddr &= ~SRMMU_REAL_PMD_MASK;
+ vaddr &= ~PMD_MASK;
vaddr >>= PAGE_SHIFT;
return val | (vaddr << 8);
}
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 4401dee30018..79cd6ccfeac0 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -11,9 +11,9 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/threads.h>
+#include <linux/pgtable.h>
#include <asm/spitfire.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/setup.h>
#include <asm/page.h>
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index 1cb62bfeaa1f..f07ea88a83af 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -50,7 +50,7 @@ struct cpuinfo_tree {
/* Offsets into nodes[] for each level of the tree */
struct cpuinfo_level level[CPUINFO_LVL_MAX];
- struct cpuinfo_node nodes[0];
+ struct cpuinfo_node nodes[];
};
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 75232cbd58bf..522e5b51050c 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -87,7 +87,7 @@ struct ds_reg_req {
__u64 handle;
__u16 major;
__u16 minor;
- char svc_id[0];
+ char svc_id[];
};
struct ds_reg_ack {
@@ -701,12 +701,12 @@ struct ds_var_hdr {
struct ds_var_set_msg {
struct ds_var_hdr hdr;
- char name_and_value[0];
+ char name_and_value[];
};
struct ds_var_delete_msg {
struct ds_var_hdr hdr;
- char name[0];
+ char name[];
};
struct ds_var_resp {
@@ -989,7 +989,7 @@ struct ds_queue_entry {
struct ds_info *dp;
int req_len;
int __pad;
- u64 req[0];
+ u64 req[];
};
static void process_ds_work(void)
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 4d3696973325..f636acf3312f 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -10,6 +10,7 @@
#include <linux/linkage.h>
#include <linux/errno.h>
+#include <linux/pgtable.h>
#include <asm/head.h>
#include <asm/asi.h>
@@ -20,7 +21,6 @@
#include <asm/psr.h>
#include <asm/vaddrs.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/winmacro.h>
#include <asm/signal.h>
#include <asm/obio.h>
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index e55f2c075165..be30c8d4cc73 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -24,7 +24,7 @@
#include <asm/winmacro.h>
#include <asm/thread_info.h> /* TI_UWINMASK */
#include <asm/errno.h>
-#include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */
+#include <asm/pgtable.h> /* PGDIR_SHIFT */
#include <asm/export.h>
.data
@@ -273,7 +273,7 @@ not_a_sun4:
lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd
/* Calculate to KERNBASE entry. */
- add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3
+ add %o1, KERNBASE >> (PGDIR_SHIFT - 2), %o3
/* Poke the entry into the calculated address. */
sta %o2, [%o3] ASI_M_BYPASS
@@ -317,7 +317,7 @@ srmmu_not_viking:
sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
- add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
+ add %g1, KERNBASE >> (PGDIR_SHIFT - 2), %g3
sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
b go_to_highmem
nop ! wheee....
@@ -341,7 +341,7 @@ leon_remap:
sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
- add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
+ add %g1, KERNBASE >> (PGDIR_SHIFT - 2), %g3
sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
b go_to_highmem
nop ! wheee....
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 540bfc98472c..c5ff2472b3d9 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -12,13 +12,13 @@
#include <linux/threads.h>
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/thread_info.h>
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
#include <asm/spitfire.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/errno.h>
#include <asm/signal.h>
#include <asm/processor.h>
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 1cf91c05e275..6bfaf73ce8a0 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -7,10 +7,10 @@
* Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#include <linux/pgtable.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/tsb.h>
.text
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index da6f1486318e..41829c024f92 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -39,7 +39,6 @@
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/cpudata.h>
#include <asm/asi.h>
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 5ed43828e078..5d45b6d766d6 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -21,9 +21,9 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pgtable.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/apb.h>
@@ -593,7 +593,7 @@ show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char *
pdev = to_pci_dev(dev);
dp = pdev->dev.of_node;
- return snprintf (buf, PAGE_SIZE, "%pOF\n", dp);
+ return scnprintf(buf, PAGE_SIZE, "%pOF\n", dp);
}
static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 26cca65e9246..13cb5638fab8 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -35,7 +35,6 @@
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/delay.h>
#include <asm/processor.h>
#include <asm/psr.h>
@@ -145,10 +144,10 @@ void show_regs(struct pt_regs *r)
}
/*
- * The show_stack is an external API which we do not use ourselves.
+ * The show_stack() is external API which we do not use ourselves.
* The oops is printed in die_if_kernel.
*/
-void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+void show_stack(struct task_struct *tsk, unsigned long *_ksp, const char *loglvl)
{
unsigned long pc, fp;
unsigned long task_base;
@@ -170,11 +169,11 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
break;
rw = (struct reg_window32 *) fp;
pc = rw->ins[7];
- printk("[%08lx : ", pc);
- printk("%pS ] ", (void *) pc);
+ printk("%s[%08lx : ", loglvl, pc);
+ printk("%s%pS ] ", loglvl, (void *) pc);
fp = rw->ins[6];
} while (++count < 16);
- printk("\n");
+ printk("%s\n", loglvl);
}
/*
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 4282116e28e7..54945eacd3b5 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -41,7 +41,6 @@
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/pstate.h>
#include <asm/elf.h>
@@ -195,7 +194,7 @@ void show_regs(struct pt_regs *regs)
regs->u_regs[15]);
printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
show_regwindow(regs);
- show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
+ show_stack(current, (unsigned long *)regs->u_regs[UREG_FP], KERN_DEFAULT);
}
union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
@@ -313,7 +312,7 @@ static void sysrq_handle_globreg(int key)
trigger_all_cpu_backtrace();
}
-static struct sysrq_key_op sparc_globalreg_op = {
+static const struct sysrq_key_op sparc_globalreg_op = {
.handler = sysrq_handle_globreg,
.help_msg = "global-regs(y)",
.action_msg = "Show Global CPU Regs",
@@ -388,7 +387,7 @@ static void sysrq_handle_globpmu(int key)
pmu_snapshot_all_cpus();
}
-static struct sysrq_key_op sparc_globalpmu_op = {
+static const struct sysrq_key_op sparc_globalpmu_op = {
.handler = sysrq_handle_globpmu,
.help_msg = "global-pmu(x)",
.action_msg = "Show Global PMU Regs",
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
index 16b50afe7b52..47eb315d411c 100644
--- a/arch/sparc/kernel/ptrace_32.c
+++ b/arch/sparc/kernel/ptrace_32.c
@@ -23,7 +23,6 @@
#include <linux/elf.h>
#include <linux/tracehook.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
@@ -46,82 +45,79 @@ enum sparc_regset {
REGSET_FP,
};
+static int regwindow32_get(struct task_struct *target,
+ const struct pt_regs *regs,
+ u32 *uregs)
+{
+ unsigned long reg_window = regs->u_regs[UREG_I6];
+ int size = 16 * sizeof(u32);
+
+ if (target == current) {
+ if (copy_from_user(uregs, (void __user *)reg_window, size))
+ return -EFAULT;
+ } else {
+ if (access_process_vm(target, reg_window, uregs, size,
+ FOLL_FORCE) != size)
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int regwindow32_set(struct task_struct *target,
+ const struct pt_regs *regs,
+ u32 *uregs)
+{
+ unsigned long reg_window = regs->u_regs[UREG_I6];
+ int size = 16 * sizeof(u32);
+
+ if (target == current) {
+ if (copy_to_user((void __user *)reg_window, uregs, size))
+ return -EFAULT;
+ } else {
+ if (access_process_vm(target, reg_window, uregs, size,
+ FOLL_FORCE | FOLL_WRITE) != size)
+ return -EFAULT;
+ }
+ return 0;
+}
+
static int genregs32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_regs *regs = target->thread.kregs;
- unsigned long __user *reg_window;
- unsigned long *k = kbuf;
- unsigned long __user *u = ubuf;
- unsigned long reg;
+ u32 uregs[16];
+ int ret;
if (target == current)
flush_user_windows();
- pos /= sizeof(reg);
- count /= sizeof(reg);
-
- if (kbuf) {
- for (; count > 0 && pos < 16; count--)
- *k++ = regs->u_regs[pos++];
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (get_user(*k++, &reg_window[pos++]))
- return -EFAULT;
- }
- } else {
- for (; count > 0 && pos < 16; count--) {
- if (put_user(regs->u_regs[pos++], u++))
- return -EFAULT;
- }
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (get_user(reg, &reg_window[pos++]) ||
- put_user(reg, u++))
- return -EFAULT;
- }
- }
- while (count > 0) {
- switch (pos) {
- case 32: /* PSR */
- reg = regs->psr;
- break;
- case 33: /* PC */
- reg = regs->pc;
- break;
- case 34: /* NPC */
- reg = regs->npc;
- break;
- case 35: /* Y */
- reg = regs->y;
- break;
- case 36: /* WIM */
- case 37: /* TBR */
- reg = 0;
- break;
- default:
- goto finish;
- }
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs->u_regs,
+ 0, 16 * sizeof(u32));
+ if (ret || !count)
+ return ret;
- if (kbuf)
- *k++ = reg;
- else if (put_user(reg, u++))
+ if (pos < 32 * sizeof(u32)) {
+ if (regwindow32_get(target, regs, uregs))
return -EFAULT;
- pos++;
- count--;
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ uregs,
+ 16 * sizeof(u32), 32 * sizeof(u32));
+ if (ret || !count)
+ return ret;
}
-finish:
- pos *= sizeof(reg);
- count *= sizeof(reg);
- return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- 38 * sizeof(reg), -1);
+ uregs[0] = regs->psr;
+ uregs[1] = regs->pc;
+ uregs[2] = regs->npc;
+ uregs[3] = regs->y;
+ uregs[4] = 0; /* WIM */
+ uregs[5] = 0; /* TBR */
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ uregs,
+ 32 * sizeof(u32), 38 * sizeof(u32));
}
static int genregs32_set(struct task_struct *target,
@@ -130,82 +126,58 @@ static int genregs32_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = target->thread.kregs;
- unsigned long __user *reg_window;
- const unsigned long *k = kbuf;
- const unsigned long __user *u = ubuf;
- unsigned long reg;
+ u32 uregs[16];
+ u32 psr;
+ int ret;
if (target == current)
flush_user_windows();
- pos /= sizeof(reg);
- count /= sizeof(reg);
-
- if (kbuf) {
- for (; count > 0 && pos < 16; count--)
- regs->u_regs[pos++] = *k++;
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (put_user(*k++, &reg_window[pos++]))
- return -EFAULT;
- }
- } else {
- for (; count > 0 && pos < 16; count--) {
- if (get_user(reg, u++))
- return -EFAULT;
- regs->u_regs[pos++] = reg;
- }
-
- reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
- reg_window -= 16;
- for (; count > 0 && pos < 32; count--) {
- if (get_user(reg, u++) ||
- put_user(reg, &reg_window[pos++]))
- return -EFAULT;
- }
- }
- while (count > 0) {
- unsigned long psr;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->u_regs,
+ 0, 16 * sizeof(u32));
+ if (ret || !count)
+ return ret;
- if (kbuf)
- reg = *k++;
- else if (get_user(reg, u++))
+ if (pos < 32 * sizeof(u32)) {
+ if (regwindow32_get(target, regs, uregs))
return -EFAULT;
-
- switch (pos) {
- case 32: /* PSR */
- psr = regs->psr;
- psr &= ~(PSR_ICC | PSR_SYSCALL);
- psr |= (reg & (PSR_ICC | PSR_SYSCALL));
- regs->psr = psr;
- break;
- case 33: /* PC */
- regs->pc = reg;
- break;
- case 34: /* NPC */
- regs->npc = reg;
- break;
- case 35: /* Y */
- regs->y = reg;
- break;
- case 36: /* WIM */
- case 37: /* TBR */
- break;
- default:
- goto finish;
- }
-
- pos++;
- count--;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ uregs,
+ 16 * sizeof(u32), 32 * sizeof(u32));
+ if (ret)
+ return ret;
+ if (regwindow32_set(target, regs, uregs))
+ return -EFAULT;
+ if (!count)
+ return 0;
}
-finish:
- pos *= sizeof(reg);
- count *= sizeof(reg);
-
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &psr,
+ 32 * sizeof(u32), 33 * sizeof(u32));
+ if (ret)
+ return ret;
+ regs->psr = (regs->psr & ~(PSR_ICC | PSR_SYSCALL)) |
+ (psr & (PSR_ICC | PSR_SYSCALL));
+ if (!count)
+ return 0;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->pc,
+ 33 * sizeof(u32), 34 * sizeof(u32));
+ if (ret || !count)
+ return ret;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->npc,
+ 34 * sizeof(u32), 35 * sizeof(u32));
+ if (ret || !count)
+ return ret;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->y,
+ 35 * sizeof(u32), 36 * sizeof(u32));
+ if (ret || !count)
+ return ret;
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- 38 * sizeof(reg), -1);
+ 36 * sizeof(u32), 38 * sizeof(u32));
}
static int fpregs32_get(struct task_struct *target,
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index c9d41a96468f..7122efb4b1cc 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -32,7 +32,6 @@
#include <linux/context_tracking.h>
#include <asm/asi.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/psrcompat.h>
#include <asm/visasm.h>
@@ -572,19 +571,13 @@ static int genregs32_get(struct task_struct *target,
for (; count > 0 && pos < 32; count--) {
if (access_process_vm(target,
(unsigned long)
- &reg_window[pos],
+ &reg_window[pos++],
&reg, sizeof(reg),
FOLL_FORCE)
!= sizeof(reg))
return -EFAULT;
- if (access_process_vm(target,
- (unsigned long) u,
- &reg, sizeof(reg),
- FOLL_FORCE | FOLL_WRITE)
- != sizeof(reg))
+ if (put_user(reg, u++))
return -EFAULT;
- pos++;
- u++;
}
}
}
@@ -684,12 +677,7 @@ static int genregs32_set(struct task_struct *target,
}
} else {
for (; count > 0 && pos < 32; count--) {
- if (access_process_vm(target,
- (unsigned long)
- u,
- &reg, sizeof(reg),
- FOLL_FORCE)
- != sizeof(reg))
+ if (get_user(reg, u++))
return -EFAULT;
if (access_process_vm(target,
(unsigned long)
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 5d1bcfce05d8..6d07b85b9e24 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -40,7 +40,6 @@
#include <asm/processor.h>
#include <asm/oplib.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/vaddrs.h>
#include <asm/mbus.h>
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 75e3992203b6..f765fda871eb 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -39,7 +39,6 @@
#include <asm/processor.h>
#include <asm/oplib.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/idprom.h>
#include <asm/head.h>
#include <asm/starfire.h>
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 2a734ecd0a40..e2c6f0abda00 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -24,7 +24,6 @@
#include <linux/uaccess.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/psrcompat.h>
#include <asm/fpumacro.h>
#include <asm/visasm.h>
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 42c3de313fd6..3b005b6c3e0f 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -24,7 +24,6 @@
#include <linux/uaccess.h>
#include <asm/ptrace.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h> /* flush_sig_insns */
#include <asm/switch_to.h>
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 69ae814b7e90..6937339a272c 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -25,7 +25,6 @@
#include <linux/uaccess.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/fpumacro.h>
#include <asm/uctx.h>
#include <asm/siginfo.h>
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index e078680a1768..76ce290c67cf 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -30,7 +30,6 @@
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 80f20b3808ee..0085e28bf019 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -43,7 +43,6 @@
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <linux/uaccess.h>
#include <asm/starfire.h>
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
index c01767a0480e..91b61f012d19 100644
--- a/arch/sparc/kernel/sun4m_irq.c
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -12,11 +12,11 @@
#include <linux/slab.h>
#include <linux/sched/debug.h>
+#include <linux/pgtable.h>
#include <asm/timer.h>
#include <asm/traps.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index b5da3bfdc225..f84a02ab6bf9 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -22,7 +22,6 @@
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/uio.h>
-#include <linux/nfs_fs.h>
#include <linux/quota.h>
#include <linux/poll.h>
#include <linux/personality.h>
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index fe59122d257d..51bf1eb92a36 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -6,6 +6,7 @@
*/
+#include <linux/pgtable.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/lsu.h>
@@ -13,7 +14,6 @@
#include <asm/dcu.h>
#include <asm/pstate.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/spitfire.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 4ceecad556a9..247a0d9683b2 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -18,12 +18,12 @@
#include <linux/smp.h>
#include <linux/kdebug.h>
#include <linux/export.h>
+#include <linux/pgtable.h>
#include <asm/delay.h>
#include <asm/ptrace.h>
#include <asm/oplib.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/unistd.h>
#include <asm/traps.h>
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 27778b65a965..d92e5eaa4c1d 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -18,6 +18,7 @@
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/init.h>
+#include <linux/kallsyms.h>
#include <linux/kdebug.h>
#include <linux/ftrace.h>
#include <linux/reboot.h>
@@ -29,7 +30,6 @@
#include <asm/ptrace.h>
#include <asm/oplib.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/unistd.h>
#include <linux/uaccess.h>
#include <asm/fpumacro.h>
@@ -2452,7 +2452,7 @@ static void user_instruction_dump(unsigned int __user *pc)
printk("\n");
}
-void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+void show_stack(struct task_struct *tsk, unsigned long *_ksp, const char *loglvl)
{
unsigned long fp, ksp;
struct thread_info *tp;
@@ -2476,7 +2476,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
fp = ksp + STACK_BIAS;
- printk("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
do {
struct sparc_stackf *sf;
struct pt_regs *regs;
@@ -2497,14 +2497,14 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
fp = (unsigned long)sf->fp + STACK_BIAS;
}
- printk(" [%016lx] %pS\n", pc, (void *) pc);
+ print_ip_sym(loglvl, pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
struct ftrace_ret_stack *ret_stack;
ret_stack = ftrace_graph_get_ret_stack(tsk, graph);
if (ret_stack) {
pc = ret_stack->ret;
- printk(" [%016lx] %pS\n", pc, (void *) pc);
+ print_ip_sym(loglvl, pc);
graph++;
}
}
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index c7cad9b7bba7..4f57056ed463 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -193,7 +193,7 @@ show_pciobppath_attr(struct device *dev, struct device_attribute *attr,
vdev = to_vio_dev(dev);
dp = vdev->dp;
- return snprintf (buf, PAGE_SIZE, "%pOF\n", dp);
+ return scnprintf(buf, PAGE_SIZE, "%pOF\n", dp);
}
static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH,
diff --git a/arch/sparc/lib/clear_page.S b/arch/sparc/lib/clear_page.S
index 8a6c783a6301..302d3454a994 100644
--- a/arch/sparc/lib/clear_page.S
+++ b/arch/sparc/lib/clear_page.S
@@ -5,10 +5,10 @@
* Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
*/
+#include <linux/pgtable.h>
#include <asm/visasm.h>
#include <asm/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/spitfire.h>
#include <asm/head.h>
#include <asm/export.h>
diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S
index c088e871e8e3..5ebcfd479f4f 100644
--- a/arch/sparc/lib/copy_page.S
+++ b/arch/sparc/lib/copy_page.S
@@ -8,7 +8,7 @@
#include <asm/visasm.h>
#include <asm/thread_info.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/spitfire.h>
#include <asm/head.h>
#include <asm/export.h>
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index f6e0e601f857..cfef656eda0f 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -25,7 +25,6 @@
#include <linux/uaccess.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/setup.h>
@@ -196,7 +195,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
if (!from_user && address >= PAGE_OFFSET)
goto bad_area;
@@ -263,7 +262,7 @@ good_area:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -272,7 +271,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -280,7 +279,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -329,7 +328,7 @@ no_context:
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (from_user) {
pagefault_out_of_memory();
return;
@@ -337,7 +336,7 @@ out_of_memory:
goto no_context;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
if (!from_user)
goto no_context;
@@ -391,7 +390,7 @@ static void force_user_fault(unsigned long address, int write)
code = SEGV_MAPERR;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -416,15 +415,15 @@ good_area:
case VM_FAULT_OOM:
goto do_sigbus;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
}
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index c0c0dd471b6b..a3806614e4dc 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -27,7 +27,6 @@
#include <linux/uaccess.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/asi.h>
@@ -71,7 +70,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
}
/*
- * We now make sure that mmap_sem is held in all paths that call
+ * We now make sure that mmap_lock is held in all paths that call
* this. Additionally, to prevent kswapd from ripping ptes from
* under us, raise interrupts around the time that we look at the
* pte, kswapd will have to wait to get his smp ipi response from
@@ -319,7 +318,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mmap_read_trylock(mm)) {
if ((regs->tstate & TSTATE_PRIV) &&
!search_exception_tables(regs->tpc)) {
insn = get_fault_insn(regs, insn);
@@ -327,7 +326,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
}
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
}
if (fault_code & FAULT_CODE_BAD_RA)
@@ -451,7 +450,7 @@ good_area:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -459,7 +458,7 @@ good_area:
goto retry;
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mm_rss = get_mm_rss(mm);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
@@ -490,7 +489,7 @@ exit_exception:
*/
bad_area:
insn = get_fault_insn(regs, insn);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
handle_kernel_fault:
do_kernel_fault(regs, si_code, fault_code, insn, address);
@@ -502,7 +501,7 @@ handle_kernel_fault:
*/
out_of_memory:
insn = get_fault_insn(regs, insn);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!(regs->tstate & TSTATE_PRIV)) {
pagefault_out_of_memory();
goto exit_exception;
@@ -515,7 +514,7 @@ intr_or_no_mm:
do_sigbus:
insn = get_fault_insn(regs, insn);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Send a sigbus, regardless of whether we were in kernel
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 6ff6e2a9f9b3..d1fc9a7b7d78 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -36,18 +36,10 @@ static pte_t *kmap_pte;
void __init kmap_init(void)
{
- unsigned long address;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *dir;
-
- address = __fix_to_virt(FIX_KMAP_BEGIN);
- p4d = p4d_offset(pgd_offset_k(address), address);
- pud = pud_offset(p4d, address);
- dir = pmd_offset(pud, address);
+ unsigned long address = __fix_to_virt(FIX_KMAP_BEGIN);
/* cache the first kmap pte */
- kmap_pte = pte_offset_kernel(dir, address);
+ kmap_pte = virt_to_kpte(address);
}
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 7b9fa861b67c..ec423b5f17dd 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -14,7 +14,6 @@
#include <asm/mman.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
index 66885a8dc50a..6c2521e85a42 100644
--- a/arch/sparc/mm/hypersparc.S
+++ b/arch/sparc/mm/hypersparc.S
@@ -10,6 +10,7 @@
#include <asm/asm-offsets.h>
#include <asm/asi.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/pgtsrmmu.h>
#include <linux/init.h>
@@ -293,7 +294,7 @@ hypersparc_flush_tlb_range:
cmp %o3, -1
be hypersparc_flush_tlb_range_out
#endif
- sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+ sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4
sta %o3, [%g1] ASI_M_MMUREGS
and %o1, %o4, %o1
add %o1, 0x200, %o1
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 3cb3dffcbcdc..eb2946b1df8a 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -29,7 +29,6 @@
#include <asm/sections.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/vaddrs.h>
#include <asm/setup.h>
#include <asm/tlb.h>
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 5774529ceb43..02e6e5e0f106 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -31,7 +31,6 @@
#include <asm/head.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/iommu.h>
#include <asm/io.h>
@@ -504,11 +503,7 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end)
if (kaddr >= PAGE_OFFSET)
paddr = kaddr & mask;
else {
- pgd_t *pgdp = pgd_offset_k(kaddr);
- p4d_t *p4dp = p4d_offset(pgdp, kaddr);
- pud_t *pudp = pud_offset(p4dp, kaddr);
- pmd_t *pmdp = pmd_offset(pudp, kaddr);
- pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
+ pte_t *ptep = virt_to_kpte(kaddr);
paddr = pte_val(*ptep) & mask;
}
@@ -1649,29 +1644,29 @@ bool kern_addr_valid(unsigned long addr)
pgd = pgd_offset_k(addr);
if (pgd_none(*pgd))
- return 0;
+ return false;
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d))
- return 0;
+ return false;
pud = pud_offset(p4d, addr);
if (pud_none(*pud))
- return 0;
+ return false;
if (pud_large(*pud))
return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
- return 0;
+ return false;
if (pmd_large(*pmd))
return pfn_valid(pmd_pfn(*pmd));
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte))
- return 0;
+ return false;
return pfn_valid(pte_pfn(*pte));
}
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index 08238d989cfd..bfcc04bfce54 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -16,7 +16,6 @@
#include <linux/of_device.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/io-unit.h>
#include <asm/mxcc.h>
@@ -241,21 +240,15 @@ static void *iounit_alloc(struct device *dev, size_t len,
while(addr < end) {
page = va;
{
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
long i;
- pgdp = pgd_offset(&init_mm, addr);
- p4dp = p4d_offset(pgdp, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
+ pmdp = pmd_off_k(addr);
ptep = pte_offset_map(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
-
+
i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
iopte = iounit->page_table + i;
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index f1e08e30b64e..35b002eb312e 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -17,7 +17,6 @@
#include <linux/of_device.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/mxcc.h>
#include <asm/mbus.h>
@@ -349,9 +348,6 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
while(addr < end) {
page = va;
{
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -362,10 +358,7 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
else
__flush_page_to_ram(page);
- pgdp = pgd_offset(&init_mm, addr);
- p4dp = p4d_offset(pgdp, addr);
- pudp = pud_offset(p4dp, addr);
- pmdp = pmd_offset(pudp, addr);
+ pmdp = pmd_off_k(addr);
ptep = pte_offset_map(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 7c2278dd308d..0070f8b9a753 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -136,36 +136,8 @@ static void msi_set_sync(void)
void pmd_set(pmd_t *pmdp, pte_t *ptep)
{
- unsigned long ptp; /* Physical address, shifted right by 4 */
- int i;
-
- ptp = __nocache_pa(ptep) >> 4;
- for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
- set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
- ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
- }
-}
-
-void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
-{
- unsigned long ptp; /* Physical address, shifted right by 4 */
- int i;
-
- ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
- for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
- set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
- ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
- }
-}
-
-/* Find an entry in the third-level page table.. */
-pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
-{
- void *pte;
-
- pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
- return (pte_t *) pte +
- ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
+ unsigned long ptp = __nocache_pa(ptep) >> 4;
+ set_pte((pte_t *)&pmd_val(*pmdp), __pte(SRMMU_ET_PTD | ptp));
}
/*
@@ -175,18 +147,18 @@ pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
*/
static void *__srmmu_get_nocache(int size, int align)
{
- int offset;
+ int offset, minsz = 1 << SRMMU_NOCACHE_BITMAP_SHIFT;
unsigned long addr;
- if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
+ if (size < minsz) {
printk(KERN_ERR "Size 0x%x too small for nocache request\n",
size);
- size = SRMMU_NOCACHE_BITMAP_SHIFT;
+ size = minsz;
}
- if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {
- printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",
+ if (size & (minsz - 1)) {
+ printk(KERN_ERR "Size 0x%x unaligned in nocache request\n",
size);
- size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;
+ size += minsz - 1;
}
BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
@@ -376,31 +348,33 @@ pgd_t *get_pgd_fast(void)
*/
pgtable_t pte_alloc_one(struct mm_struct *mm)
{
- unsigned long pte;
+ pte_t *ptep;
struct page *page;
- if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0)
- return NULL;
- page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
- if (!pgtable_pte_page_ctor(page)) {
- __free_page(page);
+ if ((ptep = pte_alloc_one_kernel(mm)) == 0)
return NULL;
+ page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
+ spin_lock(&mm->page_table_lock);
+ if (page_ref_inc_return(page) == 2 && !pgtable_pte_page_ctor(page)) {
+ page_ref_dec(page);
+ ptep = NULL;
}
- return page;
+ spin_unlock(&mm->page_table_lock);
+
+ return ptep;
}
-void pte_free(struct mm_struct *mm, pgtable_t pte)
+void pte_free(struct mm_struct *mm, pgtable_t ptep)
{
- unsigned long p;
+ struct page *page;
- pgtable_pte_page_dtor(pte);
- p = (unsigned long)page_address(pte); /* Cached address (for test) */
- if (p == 0)
- BUG();
- p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
+ page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
+ spin_lock(&mm->page_table_lock);
+ if (page_ref_dec_return(page) == 1)
+ pgtable_pte_page_dtor(page);
+ spin_unlock(&mm->page_table_lock);
- /* free non cached virtual address*/
- srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
+ srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE);
}
/* context handling - a dynamically sized pool is used */
@@ -822,13 +796,13 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
what = 0;
addr = start - PAGE_SIZE;
- if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
- if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed)
+ if (!(start & ~(PMD_MASK))) {
+ if (srmmu_probe(addr + PMD_SIZE) == probed)
what = 1;
}
- if (!(start & ~(SRMMU_PGDIR_MASK))) {
- if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed)
+ if (!(start & ~(PGDIR_MASK))) {
+ if (srmmu_probe(addr + PGDIR_SIZE) == probed)
what = 2;
}
@@ -837,7 +811,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
pudp = pud_offset(p4dp, start);
if (what == 2) {
*(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
- start += SRMMU_PGDIR_SIZE;
+ start += PGDIR_SIZE;
continue;
}
if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
@@ -849,6 +823,11 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
pud_set(__nocache_fix(pudp), pmdp);
}
pmdp = pmd_offset(__nocache_fix(pgdp), start);
+ if (what == 1) {
+ *(pmd_t *)__nocache_fix(pmdp) = __pmd(probed);
+ start += PMD_SIZE;
+ continue;
+ }
if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL)
@@ -856,19 +835,6 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
memset(__nocache_fix(ptep), 0, PTE_SIZE);
pmd_set(__nocache_fix(pmdp), ptep);
}
- if (what == 1) {
- /* We bend the rule where all 16 PTPs in a pmd_t point
- * inside the same PTE page, and we leak a perfectly
- * good hardware PTE piece. Alternatives seem worse.
- */
- unsigned int x; /* Index of HW PMD in soft cluster */
- unsigned long *val;
- x = (start >> PMD_SHIFT) & 15;
- val = &pmdp->pmdv[x];
- *(unsigned long *)__nocache_fix(val) = probed;
- start += SRMMU_REAL_PMD_SIZE;
- continue;
- }
ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
*(pte_t *)__nocache_fix(ptep) = __pte(probed);
start += PAGE_SIZE;
@@ -890,9 +856,9 @@ static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base
/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
{
- unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
- unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
- unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
+ unsigned long pstart = (sp_banks[sp_entry].base_addr & PGDIR_MASK);
+ unsigned long vstart = (vbase & PGDIR_MASK);
+ unsigned long vend = PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
/* Map "low" memory only */
const unsigned long min_vaddr = PAGE_OFFSET;
const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
@@ -905,7 +871,7 @@ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
while (vstart < vend) {
do_large_mapping(vstart, pstart);
- vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
+ vstart += PGDIR_SIZE; pstart += PGDIR_SIZE;
}
return vstart;
}
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 3d72d2deb13b..a32a16c18617 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -10,7 +10,6 @@
#include <linux/swap.h>
#include <linux/preempt.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index f5edc28aa3a5..0dce4b7ff73e 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -8,9 +8,9 @@
#include <linux/preempt.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/setup.h>
#include <asm/tsb.h>
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index d220b6848746..70e658d107e0 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -5,8 +5,8 @@
* Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
*/
+#include <linux/pgtable.h>
#include <asm/asi.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/spitfire.h>
#include <asm/mmu_context.h>
diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
index adaef6e7b8cf..48f062de7a7f 100644
--- a/arch/sparc/mm/viking.S
+++ b/arch/sparc/mm/viking.S
@@ -13,6 +13,7 @@
#include <asm/asi.h>
#include <asm/mxcc.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/pgtsrmmu.h>
#include <asm/viking.h>
@@ -157,7 +158,7 @@ viking_flush_tlb_range:
cmp %o3, -1
be 2f
#endif
- sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+ sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4
sta %o3, [%g1] ASI_M_MMUREGS
and %o1, %o4, %o1
add %o1, 0x200, %o1
@@ -243,7 +244,7 @@ sun4dsmp_flush_tlb_range:
ld [%o0 + VMA_VM_MM], %o0
ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5
- sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+ sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4
sta %o3, [%g1] ASI_M_MMUREGS
and %o1, %o4, %o1
add %o1, 0x200, %o1
diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
index 9961b0f81693..cc19e09b0fa1 100644
--- a/arch/sparc/vdso/vma.c
+++ b/arch/sparc/vdso/vma.c
@@ -366,7 +366,7 @@ static int map_vdso(const struct vdso_image *image,
unsigned long text_start, addr = 0;
int ret = 0;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
/*
* First, get an unmapped region: then randomize it, and make sure that
@@ -422,7 +422,7 @@ up_fail:
if (ret)
current->mm->context.vdso = NULL;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 275f5ffdf6f0..3f27aa3ec0a6 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -140,7 +140,7 @@ export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
# When cleaning we don't include .config, so we don't include
# TT or skas makefiles and don't clean skas_ptregs.h.
CLEAN_FILES += linux x.i gmon.out
-MRPROPER_DIRS += arch/$(SUBARCH)/include/generated
+MRPROPER_FILES += arch/$(SUBARCH)/include/generated
archclean:
@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index a290821e355c..2a249f619467 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -18,9 +18,9 @@ ubd-objs := ubd_kern.o ubd_user.o
port-objs := port_kern.o port_user.o
harddog-objs := harddog_kern.o harddog_user.o
-LDFLAGS_pcap.o := -r $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a)
+LDFLAGS_pcap.o = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a)
-LDFLAGS_vde.o := -r $(shell $(CC) $(CFLAGS) -print-file-name=libvdeplug.a)
+LDFLAGS_vde.o = $(shell $(CC) $(CFLAGS) -print-file-name=libvdeplug.a)
targets := pcap_kern.o pcap_user.o vde_kern.o vde_user.o
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 30575bd92975..a2e680f7d39f 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -648,7 +648,7 @@ static void stack_proc(void *arg)
{
struct task_struct *task = arg;
- show_stack(task, NULL);
+ show_stack(task, NULL, KERN_INFO);
}
/*
diff --git a/arch/um/drivers/vector_kern.h b/arch/um/drivers/vector_kern.h
index d0159082faf0..8fff93a75a92 100644
--- a/arch/um/drivers/vector_kern.h
+++ b/arch/um/drivers/vector_kern.h
@@ -129,7 +129,7 @@ struct vector_private {
struct vector_estats estats;
struct sock_fprog *bpf;
- char user[0];
+ char user[];
};
extern int build_transport_data(struct vector_private *vp);
diff --git a/arch/um/drivers/vector_user.c b/arch/um/drivers/vector_user.c
index aa28e9eecb7b..c4a0f26b2824 100644
--- a/arch/um/drivers/vector_user.c
+++ b/arch/um/drivers/vector_user.c
@@ -29,6 +29,7 @@
#include <netdb.h>
#include <stdlib.h>
#include <os.h>
+#include <limits.h>
#include <um_malloc.h>
#include "vector_user.h"
@@ -42,6 +43,9 @@
#define TRANS_RAW "raw"
#define TRANS_RAW_LEN strlen(TRANS_RAW)
+#define TRANS_FD "fd"
+#define TRANS_FD_LEN strlen(TRANS_FD)
+
#define VNET_HDR_FAIL "could not enable vnet headers on fd %d"
#define TUN_GET_F_FAIL "tapraw: TUNGETFEATURES failed: %s"
#define L2TPV3_BIND_FAIL "l2tpv3_open : could not bind socket err=%i"
@@ -347,6 +351,59 @@ unix_cleanup:
return NULL;
}
+static int strtofd(const char *nptr)
+{
+ long fd;
+ char *endptr;
+
+ if (nptr == NULL)
+ return -1;
+
+ errno = 0;
+ fd = strtol(nptr, &endptr, 10);
+ if (nptr == endptr ||
+ errno != 0 ||
+ *endptr != '\0' ||
+ fd < 0 ||
+ fd > INT_MAX) {
+ return -1;
+ }
+ return fd;
+}
+
+static struct vector_fds *user_init_fd_fds(struct arglist *ifspec)
+{
+ int fd = -1;
+ char *fdarg = NULL;
+ struct vector_fds *result = NULL;
+
+ fdarg = uml_vector_fetch_arg(ifspec, "fd");
+ fd = strtofd(fdarg);
+ if (fd == -1) {
+ printk(UM_KERN_ERR "fd open: bad or missing fd argument");
+ goto fd_cleanup;
+ }
+
+ result = uml_kmalloc(sizeof(struct vector_fds), UM_GFP_KERNEL);
+ if (result == NULL) {
+ printk(UM_KERN_ERR "fd open: allocation failed");
+ goto fd_cleanup;
+ }
+
+ result->rx_fd = fd;
+ result->tx_fd = fd;
+ result->remote_addr_size = 0;
+ result->remote_addr = NULL;
+ return result;
+
+fd_cleanup:
+ if (fd >= 0)
+ os_close_file(fd);
+ if (result != NULL)
+ kfree(result);
+ return NULL;
+}
+
static struct vector_fds *user_init_raw_fds(struct arglist *ifspec)
{
int rxfd = -1, txfd = -1;
@@ -578,6 +635,8 @@ struct vector_fds *uml_vector_user_open(
return user_init_socket_fds(parsed, ID_L2TPV3);
if (strncmp(transport, TRANS_BESS, TRANS_BESS_LEN) == 0)
return user_init_unix_fds(parsed, ID_BESS);
+ if (strncmp(transport, TRANS_FD, TRANS_FD_LEN) == 0)
+ return user_init_fd_fds(parsed);
return NULL;
}
diff --git a/arch/um/drivers/vhost_user.h b/arch/um/drivers/vhost_user.h
index 6c71b6005177..6f147cd3c9f7 100644
--- a/arch/um/drivers/vhost_user.h
+++ b/arch/um/drivers/vhost_user.h
@@ -78,7 +78,7 @@ struct vhost_user_config {
u32 offset;
u32 size;
u32 flags;
- u8 payload[0]; /* Variable length */
+ u8 payload[]; /* Variable length */
} __packed;
struct vhost_user_vring_state {
diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
index be54d368e73d..351aee52aca6 100644
--- a/arch/um/drivers/virtio_uml.c
+++ b/arch/um/drivers/virtio_uml.c
@@ -74,7 +74,7 @@ struct virtio_uml_vq_info {
extern unsigned long long physmem_size, highmem;
-#define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, __VA_ARGS__)
+#define vu_err(vu_dev, ...) dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
/* Vhost-user protocol */
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index b4deb1bfbb68..17ddd4edf875 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
#include <asm/mmu.h>
@@ -47,9 +48,9 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
* when the new ->mm is used for the first time.
*/
__switch_mm(&new->context.id);
- down_write_nested(&new->mmap_sem, 1);
+ mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING);
uml_setup_stubs(new);
- up_write(&new->mmap_sem);
+ mmap_write_unlock(new);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
index 8a3b689e0f86..36f452957cef 100644
--- a/arch/um/include/asm/pgtable-3level.h
+++ b/arch/um/include/asm/pgtable-3level.h
@@ -89,10 +89,6 @@ static inline void pud_clear (pud_t *pud)
#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
- pmd_index(address))
-
static inline unsigned long pte_pfn(pte_t pte)
{
return phys_to_pfn(pte_val(pte));
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index b5ddf5d98bd5..def376194dce 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
+/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright 2003 PathScale, Inc.
* Derived from include/asm-i386/pgtable.h
@@ -131,7 +131,7 @@ static inline int pte_none(pte_t pte)
* Undefined behaviour if not..
*/
static inline int pte_read(pte_t pte)
-{
+{
return((pte_get_bits(pte, _PAGE_USER)) &&
!(pte_get_bits(pte, _PAGE_PROTNONE)));
}
@@ -163,7 +163,7 @@ static inline int pte_newpage(pte_t pte)
}
static inline int pte_newprot(pte_t pte)
-{
+{
return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
}
@@ -185,31 +185,31 @@ static inline pte_t pte_mkclean(pte_t pte)
return(pte);
}
-static inline pte_t pte_mkold(pte_t pte)
-{
+static inline pte_t pte_mkold(pte_t pte)
+{
pte_clear_bits(pte, _PAGE_ACCESSED);
return(pte);
}
static inline pte_t pte_wrprotect(pte_t pte)
-{
+{
if (likely(pte_get_bits(pte, _PAGE_RW)))
pte_clear_bits(pte, _PAGE_RW);
else
return pte;
- return(pte_mknewprot(pte));
+ return(pte_mknewprot(pte));
}
static inline pte_t pte_mkread(pte_t pte)
-{
+{
if (unlikely(pte_get_bits(pte, _PAGE_USER)))
return pte;
pte_set_bits(pte, _PAGE_USER);
- return(pte_mknewprot(pte));
+ return(pte_mknewprot(pte));
}
static inline pte_t pte_mkdirty(pte_t pte)
-{
+{
pte_set_bits(pte, _PAGE_DIRTY);
return(pte);
}
@@ -220,20 +220,20 @@ static inline pte_t pte_mkyoung(pte_t pte)
return(pte);
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkwrite(pte_t pte)
{
if (unlikely(pte_get_bits(pte, _PAGE_RW)))
return pte;
pte_set_bits(pte, _PAGE_RW);
- return(pte_mknewprot(pte));
+ return(pte_mknewprot(pte));
}
-static inline pte_t pte_mkuptodate(pte_t pte)
+static inline pte_t pte_mkuptodate(pte_t pte)
{
pte_clear_bits(pte, _PAGE_NEWPAGE);
if(pte_present(pte))
pte_clear_bits(pte, _PAGE_NEWPROT);
- return(pte);
+ return(pte);
}
static inline pte_t pte_mknewpage(pte_t pte)
@@ -288,53 +288,16 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
- return pte;
+ return pte;
}
/*
- * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
- *
- * this macro returns the index of the entry in the pgd page which would
- * control the given virtual address
- */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-
-/*
- * pgd_offset() returns a (pgd_t *)
- * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
- */
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
-/*
- * a shortcut which implies the use of the kernel's pgd, instead
- * of a process's
- */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
*
* this macro returns the index of the entry in the pmd page which would
* control the given virtual address
*/
#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-#define pmd_page_vaddr(pmd) \
- ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
-/*
- * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- *
- * this macro returns the index of the entry in the pte page which would
- * control the given virtual address
- */
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_unmap(pte) do { } while (0)
struct mm_struct;
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
@@ -353,8 +316,6 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
/* Clear a kernel PTE and flush it from the TLB */
#define kpte_clear_flush(ptep, vaddr) \
do { \
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 70ee60383900..ff9c62828962 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -2,6 +2,8 @@
#ifndef __UM_TLB_H
#define __UM_TLB_H
+#include <linux/mm.h>
+
#include <asm/tlbflush.h>
#include <asm-generic/cacheflush.h>
#include <asm-generic/tlb.h>
diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c
index 67b2e0fa92bb..e929c0966696 100644
--- a/arch/um/kernel/maccess.c
+++ b/arch/um/kernel/maccess.c
@@ -7,15 +7,13 @@
#include <linux/kernel.h>
#include <os.h>
-long probe_kernel_read(void *dst, const void *src, size_t size)
+bool probe_kernel_read_allowed(const void *src, size_t size)
{
void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
if ((unsigned long)src < PAGE_SIZE || size <= 0)
- return -EFAULT;
-
+ return false;
if (os_mincore(psrc, size + src - psrc) <= 0)
- return -EFAULT;
-
- return __probe_kernel_read(dst, src, size);
+ return false;
+ return true;
}
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 401b22f14743..c2ff76c8981e 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -125,10 +125,6 @@ static void __init fixaddr_user_init( void)
{
#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
long size = FIXADDR_USER_END - FIXADDR_USER_START;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
pte_t *pte;
phys_t p;
unsigned long v, vaddr = FIXADDR_USER_START;
@@ -146,11 +142,7 @@ static void __init fixaddr_user_init( void)
p = __pa(v);
for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
p += PAGE_SIZE) {
- pgd = swapper_pg_dir + pgd_index(vaddr);
- p4d = p4d_offset(pgd, vaddr);
- pud = pud_offset(p4d, vaddr);
- pmd = pmd_offset(pud, vaddr);
- pte = pte_offset_kernel(pmd, vaddr);
+ pte = virt_to_kpte(vaddr);
pte_set_val(*pte, p, PAGE_READONLY);
}
#endif
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index cbe33af2a880..e3a2cf92a373 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -25,7 +25,6 @@
#include <linux/threads.h>
#include <linux/tracehook.h>
#include <asm/current.h>
-#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <linux/uaccess.h>
#include <as-layout.h>
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 3f0d9a573fd6..d9961163da66 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -9,7 +9,6 @@
#include <linux/slab.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <as-layout.h>
#include <os.h>
@@ -115,7 +114,7 @@ void uml_setup_stubs(struct mm_struct *mm)
mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start);
mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack);
- /* dup_mmap already holds mmap_sem */
+ /* dup_mmap already holds mmap_lock */
err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
VM_READ | VM_MAYREAD | VM_EXEC |
VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP,
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index d617f8dc9c19..2dec915abe6f 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -10,7 +10,6 @@
#include <linux/sched.h>
#include <asm/current.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <kern_util.h>
#include <os.h>
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index c71b5ef7ea8c..acbc879d2773 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -17,7 +17,9 @@
static void _print_addr(void *data, unsigned long address, int reliable)
{
- pr_info(" [<%08lx>] %s%pS\n", address, reliable ? "" : "? ",
+ const char *loglvl = data;
+
+ printk("%s [<%08lx>] %s%pS\n", loglvl, address, reliable ? "" : "? ",
(void *)address);
}
@@ -25,9 +27,9 @@ static const struct stacktrace_ops stackops = {
.address = _print_addr
};
-void show_stack(struct task_struct *task, unsigned long *stack)
+void show_stack(struct task_struct *task, unsigned long *stack,
+ const char *loglvl)
{
- unsigned long *sp = stack;
struct pt_regs *segv_regs = current->thread.segv_regs;
int i;
@@ -38,20 +40,19 @@ void show_stack(struct task_struct *task, unsigned long *stack)
}
if (!stack)
- sp = get_stack_pointer(task, segv_regs);
+ stack = get_stack_pointer(task, segv_regs);
- pr_info("Stack:\n");
- stack = sp;
+ printk("%sStack:\n", loglvl);
for (i = 0; i < 3 * STACKSLOTS_PER_LINE; i++) {
if (kstack_end(stack))
break;
if (i && ((i % STACKSLOTS_PER_LINE) == 0))
- pr_cont("\n");
+ printk("%s\n", loglvl);
pr_cont(" %08lx", *stack++);
}
- pr_cont("\n");
+ printk("%s\n", loglvl);
- pr_info("Call Trace:\n");
- dump_trace(current, &stackops, NULL);
- pr_info("\n");
+ printk("%sCall Trace:\n", loglvl);
+ dump_trace(current, &stackops, (void *)loglvl);
+ printk("%s\n", loglvl);
}
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 80a358c6d652..61776790cd67 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -7,7 +7,6 @@
#include <linux/module.h>
#include <linux/sched/signal.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <as-layout.h>
#include <mem_user.h>
@@ -349,8 +348,8 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
if (ret) {
printk(KERN_ERR "fix_range_common: failed, killing current "
"process: %d\n", task_tgid_vnr(current));
- /* We are under mmap_sem, release it such that current can terminate */
- up_write(&current->mm->mmap_sem);
+ /* We are under mmap_lock, release it such that current can terminate */
+ mmap_write_unlock(current->mm);
force_sig(SIGKILL);
do_signal(&current->thread.regs);
}
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 8f18cf56b3dd..2b3afa354a90 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -10,7 +10,6 @@
#include <linux/uaccess.h>
#include <linux/sched/debug.h>
#include <asm/current.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <arch.h>
#include <as-layout.h>
@@ -27,9 +26,6 @@ int handle_page_fault(unsigned long address, unsigned long ip,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int err = -EFAULT;
@@ -47,7 +43,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
if (is_user)
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
goto out;
@@ -103,10 +99,7 @@ good_area:
}
}
- pgd = pgd_offset(mm, address);
- p4d = p4d_offset(pgd, address);
- pud = pud_offset(p4d, address);
- pmd = pmd_offset(pud, address);
+ pmd = pmd_off(mm, address);
pte = pte_offset_kernel(pmd, address);
} while (!pte_present(*pte));
err = 0;
@@ -123,7 +116,7 @@ good_area:
#endif
flush_tlb_page(vma, address);
out:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out_nosemaphore:
return err;
@@ -132,7 +125,7 @@ out_of_memory:
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!is_user)
goto out_nosemaphore;
pagefault_out_of_memory();
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 375ab720e4aa..00141e70de56 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -14,7 +14,6 @@
#include <linux/sched/task.h>
#include <linux/kmsg_dump.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/setup.h>
diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
index 26ecbd64c409..e4421dbc4c36 100644
--- a/arch/um/os-Linux/file.c
+++ b/arch/um/os-Linux/file.c
@@ -6,6 +6,7 @@
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
+#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
@@ -289,7 +290,7 @@ int os_write_file(int fd, const void *buf, int len)
int os_sync_file(int fd)
{
- int n = fsync(fd);
+ int n = fdatasync(fd);
if (n < 0)
return -errno;
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 41fe944005f8..11ba1839d198 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -70,7 +70,7 @@ config ARCH_PUV3
def_bool y
select CPU_UCV2
select GENERIC_CLOCKEVENTS
- select HAVE_CLK
+ select HAVE_LEGACY_CLK
select GPIOLIB
# CONFIGs for ARCH_PUV3
diff --git a/arch/unicore32/include/asm/cacheflush.h b/arch/unicore32/include/asm/cacheflush.h
index dc8c0b41538f..ff0be92ebc32 100644
--- a/arch/unicore32/include/asm/cacheflush.h
+++ b/arch/unicore32/include/asm/cacheflush.h
@@ -133,14 +133,6 @@ extern void flush_cache_page(struct vm_area_struct *vma,
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
/*
- * flush_cache_user_range is used when we want to ensure that the
- * Harvard caches are synchronised for the user space address range.
- * This is used for the UniCore private sys_cacheflush system call.
- */
-#define flush_cache_user_range(vma, start, end) \
- __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
-
-/*
* Perform necessary cache operations to ensure that data previously
* stored within this range of addresses can be executed by the CPU.
*/
@@ -170,9 +162,6 @@ extern void flush_dcache_page(struct page *);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-#define flush_icache_user_range(vma, page, addr, len) \
- flush_dcache_page(page)
-
/*
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h
index 826f49edd94e..97f564c8ecba 100644
--- a/arch/unicore32/include/asm/pgtable.h
+++ b/arch/unicore32/include/asm/pgtable.h
@@ -153,12 +153,6 @@ extern struct page *empty_zero_page;
#define pte_none(pte) (!pte_val(pte))
#define pte_clear(mm, addr, ptep) set_pte(ptep, __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
-#define pte_offset_kernel(dir, addr) (pmd_page_vaddr(*(dir)) \
- + __pte_index(addr))
-
-#define pte_offset_map(dir, addr) (pmd_page_vaddr(*(dir)) \
- + __pte_index(addr))
-#define pte_unmap(pte) do { } while (0)
#define set_pte(ptep, pte) cpu_set_pte(ptep, pte)
@@ -221,17 +215,6 @@ PTE_BIT_FUNC(mkyoung, |= PTE_YOUNG);
*/
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
-/* to find an entry in a page-table-directory */
-#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
-
-#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-
-/* Find an entry in the third-level page table.. */
-#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const unsigned long mask = PTE_EXEC | PTE_WRITE | PTE_READ;
@@ -279,8 +262,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* FIXME: this is not correct */
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
#endif /* !__ASSEMBLY__ */
#endif /* __UNICORE_PGTABLE_H__ */
diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c
index ccad051a79b6..4cdf3c846a2d 100644
--- a/arch/unicore32/kernel/hibernate.c
+++ b/arch/unicore32/kernel/hibernate.c
@@ -11,9 +11,9 @@
#include <linux/gfp.h>
#include <linux/suspend.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/sections.h>
#include <asm/suspend.h>
diff --git a/arch/unicore32/kernel/hibernate_asm.S b/arch/unicore32/kernel/hibernate_asm.S
index 7e7499c49089..a589bc189e24 100644
--- a/arch/unicore32/kernel/hibernate_asm.S
+++ b/arch/unicore32/kernel/hibernate_asm.S
@@ -11,9 +11,9 @@
#include <linux/sys.h>
#include <linux/errno.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <generated/asm-offsets.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/assembler.h>
@ restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist)
diff --git a/arch/unicore32/kernel/module.c b/arch/unicore32/kernel/module.c
index 717ee1b78350..67c89ef2d6ee 100644
--- a/arch/unicore32/kernel/module.c
+++ b/arch/unicore32/kernel/module.c
@@ -16,7 +16,6 @@
#include <linux/string.h>
#include <linux/gfp.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
void *module_alloc(unsigned long size)
diff --git a/arch/unicore32/kernel/setup.h b/arch/unicore32/kernel/setup.h
index e40d3603c7e7..967352323185 100644
--- a/arch/unicore32/kernel/setup.h
+++ b/arch/unicore32/kernel/setup.h
@@ -29,7 +29,7 @@ extern void kernel_thread_helper(void);
extern void __init early_signal_init(void);
extern asmlinkage void __backtrace(void);
-extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+extern asmlinkage void c_backtrace(unsigned long fp, const char *loglvl);
extern void __show_regs(struct pt_regs *);
diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c
index e24f67283864..a3ac01df1a2e 100644
--- a/arch/unicore32/kernel/traps.c
+++ b/arch/unicore32/kernel/traps.c
@@ -135,44 +135,42 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
set_fs(fs);
}
-static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
+ const char *loglvl)
{
- unsigned int fp, mode;
+ unsigned int fp;
int ok = 1;
- printk(KERN_DEFAULT "Backtrace: ");
+ printk("%sBacktrace: ", loglvl);
if (!tsk)
tsk = current;
- if (regs) {
+ if (regs)
fp = regs->UCreg_fp;
- mode = processor_mode(regs);
- } else if (tsk != current) {
+ else if (tsk != current)
fp = thread_saved_fp(tsk);
- mode = 0x10;
- } else {
+ else
asm("mov %0, fp" : "=r" (fp) : : "cc");
- mode = 0x10;
- }
if (!fp) {
- printk("no frame pointer");
+ printk("%sno frame pointer", loglvl);
ok = 0;
} else if (verify_stack(fp)) {
- printk("invalid frame pointer 0x%08x", fp);
+ printk("%sinvalid frame pointer 0x%08x", loglvl, fp);
ok = 0;
} else if (fp < (unsigned long)end_of_stack(tsk))
- printk("frame pointer underflow");
- printk("\n");
+ printk("%sframe pointer underflow", loglvl);
+ printk("%s\n", loglvl);
if (ok)
- c_backtrace(fp, mode);
+ c_backtrace(fp, loglvl);
}
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp,
+ const char *loglvl)
{
- dump_backtrace(NULL, tsk);
+ dump_backtrace(NULL, tsk, loglvl);
barrier();
}
@@ -200,7 +198,7 @@ static int __die(const char *str, int err, struct thread_info *thread,
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->UCreg_sp,
THREAD_SIZE + (unsigned long)task_stack_page(tsk));
- dump_backtrace(regs, tsk);
+ dump_backtrace(regs, tsk, KERN_EMERG);
dump_instr(KERN_EMERG, regs);
}
diff --git a/arch/unicore32/lib/Makefile b/arch/unicore32/lib/Makefile
index 098981a01841..5af06645b8f0 100644
--- a/arch/unicore32/lib/Makefile
+++ b/arch/unicore32/lib/Makefile
@@ -10,12 +10,12 @@ lib-y += strncpy_from_user.o strnlen_user.o
lib-y += clear_user.o copy_page.o
lib-y += copy_from_user.o copy_to_user.o
-GNU_LIBC_A := $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libc.a)
+GNU_LIBC_A = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libc.a)
GNU_LIBC_A_OBJS := memchr.o memcpy.o memmove.o memset.o
GNU_LIBC_A_OBJS += strchr.o strrchr.o
GNU_LIBC_A_OBJS += rawmemchr.o # needed by strrchr.o
-GNU_LIBGCC_A := $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libgcc.a)
+GNU_LIBGCC_A = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libgcc.a)
GNU_LIBGCC_A_OBJS := _ashldi3.o _ashrdi3.o _lshrdi3.o
GNU_LIBGCC_A_OBJS += _divsi3.o _modsi3.o _ucmpdi2.o _umodsi3.o _udivsi3.o
diff --git a/arch/unicore32/lib/backtrace.S b/arch/unicore32/lib/backtrace.S
index f303671e2a4e..6221944b81f3 100644
--- a/arch/unicore32/lib/backtrace.S
+++ b/arch/unicore32/lib/backtrace.S
@@ -16,6 +16,7 @@
#define sv_fp v5
#define sv_pc v6
#define offset v8
+#define loglvl v9
ENTRY(__backtrace)
mov r0, fp
@@ -27,10 +28,11 @@ ENTRY(c_backtrace)
ENDPROC(__backtrace)
ENDPROC(c_backtrace)
#else
- stm.w (v4 - v8, lr), [sp-] @ Save an extra register
+ stm.w (v4 - v10, lr), [sp-] @ Save an extra register
@ so we have a location...
mov.a frame, r0 @ if frame pointer is zero
beq no_frame @ we have no stack frames
+ mov loglvl, r1
1: stm.w (pc), [sp-] @ calculate offset of PC stored
ldw.w r0, [sp]+, #4 @ by stmfd for this CPU
@@ -95,9 +97,10 @@ for_each_frame:
bua for_each_frame
1006: adr r0, .Lbad
- mov r1, frame
+ mov r1, loglvl
+ mov r2, frame
b.l printk
-no_frame: ldm.w (v4 - v8, pc), [sp]+
+no_frame: ldm.w (v4 - v10, pc), [sp]+
ENDPROC(__backtrace)
ENDPROC(c_backtrace)
@@ -128,8 +131,11 @@ ENDPROC(c_backtrace)
add v7, v7, #1
cxor.a v7, #6
cmoveq v7, #1
- cmoveq r1, #'\n'
- cmovne r1, #' '
+ bne 201f
+ adr r0, .Lcr
+ mov r1, loglvl
+ b.l printk
+201:
ldw.w r3, [stack]+, #-4
mov r2, reg
csub.a r2, #8
@@ -141,18 +147,20 @@ ENDPROC(c_backtrace)
add r2, r2, #0x10 @ so r2 need add 16
201:
adr r0, .Lfp
+ mov r1, loglvl
b.l printk
2: sub.a reg, reg, #1
bns 1b
cxor.a v7, #0
beq 201f
adr r0, .Lcr
+ mov r1, loglvl
b.l printk
201: ldm.w (instr, reg, stack, v7, pc), [sp]+
-.Lfp: .asciz "%cr%d:%08x"
-.Lcr: .asciz "\n"
-.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
+.Lfp: .asciz "%sr%d:%08x "
+.Lcr: .asciz "%s\n"
+.Lbad: .asciz "%sBacktrace aborted due to bad frame pointer <%p>\n"
.align
.Ldsi: .word 0x92eec000 >> 14 @ stm.w sp, (... fp, ip, lr, pc)
.word 0x92e10000 >> 14 @ stm.w sp, ()
diff --git a/arch/unicore32/mm/alignment.c b/arch/unicore32/mm/alignment.c
index a07ae5cc58e5..2ea98f7a4156 100644
--- a/arch/unicore32/mm/alignment.c
+++ b/arch/unicore32/mm/alignment.c
@@ -18,8 +18,8 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/unaligned.h>
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c
index 3022104aa613..7654bddde133 100644
--- a/arch/unicore32/mm/fault.c
+++ b/arch/unicore32/mm/fault.c
@@ -17,7 +17,6 @@
#include <linux/sched/signal.h>
#include <linux/io.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
/*
@@ -224,12 +223,12 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!mmap_read_trylock(mm)) {
if (!user_mode(regs)
&& !search_exception_tables(regs->UCreg_pc))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
@@ -247,7 +246,7 @@ retry:
fault = __do_pf(mm, addr, fsr, flags, tsk);
/* If we need to retry but a fatal signal is pending, handle the
- * signal first. We do not need to release the mmap_sem because
+ * signal first. We do not need to release the mmap_lock because
* it would already be released in __lock_page_or_retry in
* mm/filemap.c. */
if (fault_signal_pending(fault, regs))
@@ -264,7 +263,7 @@ retry:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* Handle the "normal" case first - VM_FAULT_MAJOR
diff --git a/arch/unicore32/mm/mm.h b/arch/unicore32/mm/mm.h
index 27127abc95fb..f157f5d249ab 100644
--- a/arch/unicore32/mm/mm.h
+++ b/arch/unicore32/mm/mm.h
@@ -14,16 +14,6 @@ extern int sysctl_overcommit_memory;
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
-static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
-{
- return pmd_offset((pud_t *)pgd, virt);
-}
-
-static inline pmd_t *pmd_off_k(unsigned long virt)
-{
- return pmd_off(pgd_offset_k(virt), virt);
-}
-
struct mem_type {
unsigned int prot_pte;
unsigned int prot_l1;
diff --git a/arch/unicore32/mm/proc-ucv2.S b/arch/unicore32/mm/proc-ucv2.S
index 8cc9a1b16d60..18f8c4fb21a0 100644
--- a/arch/unicore32/mm/proc-ucv2.S
+++ b/arch/unicore32/mm/proc-ucv2.S
@@ -8,10 +8,10 @@
*/
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
-#include <asm/pgtable.h>
#include "proc-macros.S"
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 1a54aeb40626..dde744682e63 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -181,7 +181,6 @@ config X86
select HAVE_HW_BREAKPOINT
select HAVE_IDE
select HAVE_IOREMAP_PROT
- select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
@@ -233,6 +232,7 @@ config X86
select THREAD_INFO_IN_TASK
select USER_STACKTRACE_SUPPORT
select VIRT_TO_BUS
+ select HAVE_ARCH_KCSAN if X86_64
select X86_FEATURE_NAMES if PROC_FS
select PROC_PID_ARCH_STATUS if PROC_FS
imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI
@@ -822,14 +822,6 @@ config PVH
This option enables the PVH entry point for guest virtual machines
as specified in the x86/HVM direct boot ABI.
-config KVM_DEBUG_FS
- bool "Enable debug information for KVM Guests in debugfs"
- depends on KVM_GUEST && DEBUG_FS
- ---help---
- This option enables collection of various statistics for KVM guest.
- Statistics are displayed in debugfs filesystem. Enabling this option
- may incur significant overhead.
-
config PARAVIRT_TIME_ACCOUNTING
bool "Paravirtual steal time accounting"
depends on PARAVIRT
@@ -1524,6 +1516,7 @@ config X86_CPA_STATISTICS
config AMD_MEM_ENCRYPT
bool "AMD Secure Memory Encryption (SME) support"
depends on X86_64 && CPU_SUP_AMD
+ select DMA_COHERENT_POOL
select DYNAMIC_PHYSICAL_MASK
select ARCH_USE_MEMREMAP_PROT
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 4c5355684321..fe605205b4ce 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -9,7 +9,9 @@
# Changed by many, many contributors over the years.
#
+# Sanitizer runtimes are unavailable and cannot be linked for early boot code.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Kernel does not boot with kcov instrumentation here.
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 5f7c262bcc99..7619742f91c9 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -17,7 +17,9 @@
# (see scripts/Makefile.lib size_append)
# compressed vmlinux.bin.all + u32 size of vmlinux.bin.all
+# Sanitizer runtimes are unavailable and cannot be linked for early boot code.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c
index 9557c5a15b91..f9c5c13d979b 100644
--- a/arch/x86/boot/compressed/kaslr_64.c
+++ b/arch/x86/boot/compressed/kaslr_64.c
@@ -22,8 +22,8 @@
#include "misc.h"
/* These actually do the work of building the kernel identity maps. */
+#include <linux/pgtable.h>
#include <asm/init.h>
-#include <asm/pgtable.h>
/* Use the static base for this part of the boot process */
#undef __PAGE_OFFSET
#define __PAGE_OFFSET __PAGE_OFFSET_BASE
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index 85eb381259c2..b7a5790d8d63 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -3,7 +3,13 @@
# Makefile for the x86 low level entry code
#
-OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
+KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
+KCOV_INSTRUMENT := n
+
+CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
+CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
+CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 1c7f13bb6728..4208c1e3f601 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -341,30 +341,13 @@ For 32-bit we have the following conventions - kernel is built with
#endif
.endm
-#endif /* CONFIG_X86_64 */
+#else /* CONFIG_X86_64 */
+# undef UNWIND_HINT_IRET_REGS
+# define UNWIND_HINT_IRET_REGS
+#endif /* !CONFIG_X86_64 */
.macro STACKLEAK_ERASE
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
call stackleak_erase
#endif
.endm
-
-/*
- * This does 'call enter_from_user_mode' unless we can avoid it based on
- * kernel config or using the static jump infrastructure.
- */
-.macro CALL_enter_from_user_mode
-#ifdef CONFIG_CONTEXT_TRACKING
-#ifdef CONFIG_JUMP_LABEL
- STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_key, def=0
-#endif
- call enter_from_user_mode
-.Lafter_call_\@:
-#endif
-.endm
-
-#ifdef CONFIG_PARAVIRT_XXL
-#define GET_CR2_INTO(reg) GET_CR2_INTO_AX ; _ASM_MOV %_ASM_AX, reg
-#else
-#define GET_CR2_INTO(reg) _ASM_MOV %cr2, reg
-#endif
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 76735ec813e6..bd3f14175193 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -27,6 +27,11 @@
#include <linux/syscalls.h>
#include <linux/uaccess.h>
+#ifdef CONFIG_XEN_PV
+#include <xen/xen-ops.h>
+#include <xen/events.h>
+#endif
+
#include <asm/desc.h>
#include <asm/traps.h>
#include <asm/vdso.h>
@@ -35,21 +40,67 @@
#include <asm/nospec-branch.h>
#include <asm/io_bitmap.h>
#include <asm/syscall.h>
+#include <asm/irq_stack.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
#ifdef CONFIG_CONTEXT_TRACKING
-/* Called on entry from user mode with IRQs off. */
-__visible inline void enter_from_user_mode(void)
+/**
+ * enter_from_user_mode - Establish state when coming from user mode
+ *
+ * Syscall entry disables interrupts, but user mode is traced as interrupts
+ * enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ */
+static noinstr void enter_from_user_mode(void)
{
- CT_WARN_ON(ct_state() != CONTEXT_USER);
+ enum ctx_state state = ct_state();
+
+ lockdep_hardirqs_off(CALLER_ADDR0);
user_exit_irqoff();
+
+ instrumentation_begin();
+ CT_WARN_ON(state != CONTEXT_USER);
+ trace_hardirqs_off_finish();
+ instrumentation_end();
}
#else
-static inline void enter_from_user_mode(void) {}
+static __always_inline void enter_from_user_mode(void)
+{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
#endif
+/**
+ * exit_to_user_mode - Fixup state when exiting to user mode
+ *
+ * Syscall exit enables interrupts, but the kernel state is interrupts
+ * disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Clear CPU buffers if CPU is affected by MDS and the migitation is on.
+ * 4) Tell lockdep that interrupts are enabled
+ */
+static __always_inline void exit_to_user_mode(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ instrumentation_end();
+
+ user_enter_irqoff();
+ mds_user_clear_cpu_buffers();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
{
#ifdef CONFIG_X86_64
@@ -179,8 +230,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
}
}
-/* Called with IRQs disabled. */
-__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
+static void __prepare_exit_to_usermode(struct pt_regs *regs)
{
struct thread_info *ti = current_thread_info();
u32 cached_flags;
@@ -219,10 +269,14 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
*/
ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
#endif
+}
- user_enter_irqoff();
-
- mds_user_clear_cpu_buffers();
+__visible noinstr void prepare_exit_to_usermode(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ __prepare_exit_to_usermode(regs);
+ instrumentation_end();
+ exit_to_user_mode();
}
#define SYSCALL_EXIT_WORK_FLAGS \
@@ -251,11 +305,7 @@ static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
tracehook_report_syscall_exit(regs, step);
}
-/*
- * Called with IRQs on and fully valid regs. Returns with IRQs off in a
- * state such that we can immediately switch to user mode.
- */
-__visible inline void syscall_return_slowpath(struct pt_regs *regs)
+static void __syscall_return_slowpath(struct pt_regs *regs)
{
struct thread_info *ti = current_thread_info();
u32 cached_flags = READ_ONCE(ti->flags);
@@ -276,15 +326,29 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
syscall_slow_exit_work(regs, cached_flags);
local_irq_disable();
- prepare_exit_to_usermode(regs);
+ __prepare_exit_to_usermode(regs);
+}
+
+/*
+ * Called with IRQs on and fully valid regs. Returns with IRQs off in a
+ * state such that we can immediately switch to user mode.
+ */
+__visible noinstr void syscall_return_slowpath(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ __syscall_return_slowpath(regs);
+ instrumentation_end();
+ exit_to_user_mode();
}
#ifdef CONFIG_X86_64
-__visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
+__visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
{
struct thread_info *ti;
enter_from_user_mode();
+ instrumentation_begin();
+
local_irq_enable();
ti = current_thread_info();
if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
@@ -301,8 +365,10 @@ __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
regs->ax = x32_sys_call_table[nr](regs);
#endif
}
+ __syscall_return_slowpath(regs);
- syscall_return_slowpath(regs);
+ instrumentation_end();
+ exit_to_user_mode();
}
#endif
@@ -313,7 +379,7 @@ __visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
* extremely hot in workloads that use it, and it's usually called from
* do_fast_syscall_32, so forcibly inline it to improve performance.
*/
-static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
+static void do_syscall_32_irqs_on(struct pt_regs *regs)
{
struct thread_info *ti = current_thread_info();
unsigned int nr = (unsigned int)regs->orig_ax;
@@ -337,27 +403,62 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
regs->ax = ia32_sys_call_table[nr](regs);
}
- syscall_return_slowpath(regs);
+ __syscall_return_slowpath(regs);
}
/* Handles int $0x80 */
-__visible void do_int80_syscall_32(struct pt_regs *regs)
+__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
enter_from_user_mode();
+ instrumentation_begin();
+
local_irq_enable();
do_syscall_32_irqs_on(regs);
+
+ instrumentation_end();
+ exit_to_user_mode();
+}
+
+static bool __do_fast_syscall_32(struct pt_regs *regs)
+{
+ int res;
+
+ /* Fetch EBP from where the vDSO stashed it. */
+ if (IS_ENABLED(CONFIG_X86_64)) {
+ /*
+ * Micro-optimization: the pointer we're following is
+ * explicitly 32 bits, so it can't be out of range.
+ */
+ res = __get_user(*(u32 *)&regs->bp,
+ (u32 __user __force *)(unsigned long)(u32)regs->sp);
+ } else {
+ res = get_user(*(u32 *)&regs->bp,
+ (u32 __user __force *)(unsigned long)(u32)regs->sp);
+ }
+
+ if (res) {
+ /* User code screwed up. */
+ regs->ax = -EFAULT;
+ local_irq_disable();
+ __prepare_exit_to_usermode(regs);
+ return false;
+ }
+
+ /* Now this is just like a normal syscall. */
+ do_syscall_32_irqs_on(regs);
+ return true;
}
/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
-__visible long do_fast_syscall_32(struct pt_regs *regs)
+__visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
{
/*
* Called using the internal vDSO SYSENTER/SYSCALL32 calling
* convention. Adjust regs so it looks like we entered using int80.
*/
-
unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
- vdso_image_32.sym_int80_landing_pad;
+ vdso_image_32.sym_int80_landing_pad;
+ bool success;
/*
* SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
@@ -367,33 +468,17 @@ __visible long do_fast_syscall_32(struct pt_regs *regs)
regs->ip = landing_pad;
enter_from_user_mode();
+ instrumentation_begin();
local_irq_enable();
+ success = __do_fast_syscall_32(regs);
- /* Fetch EBP from where the vDSO stashed it. */
- if (
-#ifdef CONFIG_X86_64
- /*
- * Micro-optimization: the pointer we're following is explicitly
- * 32 bits, so it can't be out of range.
- */
- __get_user(*(u32 *)&regs->bp,
- (u32 __user __force *)(unsigned long)(u32)regs->sp)
-#else
- get_user(*(u32 *)&regs->bp,
- (u32 __user __force *)(unsigned long)(u32)regs->sp)
-#endif
- ) {
-
- /* User code screwed up. */
- local_irq_disable();
- regs->ax = -EFAULT;
- prepare_exit_to_usermode(regs);
- return 0; /* Keep it simple: use IRET. */
- }
+ instrumentation_end();
+ exit_to_user_mode();
- /* Now this is just like a normal syscall. */
- do_syscall_32_irqs_on(regs);
+ /* If it failed, keep it simple: use IRET. */
+ if (!success)
+ return 0;
#ifdef CONFIG_X86_64
/*
@@ -431,3 +516,266 @@ SYSCALL_DEFINE0(ni_syscall)
{
return -ENOSYS;
}
+
+/**
+ * idtentry_enter_cond_rcu - Handle state tracking on idtentry with conditional
+ * RCU handling
+ * @regs: Pointer to pt_regs of interrupted context
+ *
+ * Invokes:
+ * - lockdep irqflag state tracking as low level ASM entry disabled
+ * interrupts.
+ *
+ * - Context tracking if the exception hit user mode.
+ *
+ * - The hardirq tracer to keep the state consistent as low level ASM
+ * entry disabled interrupts.
+ *
+ * For kernel mode entries RCU handling is done conditional. If RCU is
+ * watching then the only RCU requirement is to check whether the tick has
+ * to be restarted. If RCU is not watching then rcu_irq_enter() has to be
+ * invoked on entry and rcu_irq_exit() on exit.
+ *
+ * Avoiding the rcu_irq_enter/exit() calls is an optimization but also
+ * solves the problem of kernel mode pagefaults which can schedule, which
+ * is not possible after invoking rcu_irq_enter() without undoing it.
+ *
+ * For user mode entries enter_from_user_mode() must be invoked to
+ * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
+ * would not be possible.
+ *
+ * Returns: True if RCU has been adjusted on a kernel entry
+ * False otherwise
+ *
+ * The return value must be fed into the rcu_exit argument of
+ * idtentry_exit_cond_rcu().
+ */
+bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
+{
+ if (user_mode(regs)) {
+ enter_from_user_mode();
+ return false;
+ }
+
+ /*
+ * If this entry hit the idle task invoke rcu_irq_enter() whether
+ * RCU is watching or not.
+ *
+ * Interupts can nest when the first interrupt invokes softirq
+ * processing on return which enables interrupts.
+ *
+ * Scheduler ticks in the idle task can mark quiescent state and
+ * terminate a grace period, if and only if the timer interrupt is
+ * not nested into another interrupt.
+ *
+ * Checking for __rcu_is_watching() here would prevent the nesting
+ * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
+ * the tick then rcu_flavor_sched_clock_irq() would wrongfully
+ * assume that it is the first interupt and eventually claim
+ * quiescient state and end grace periods prematurely.
+ *
+ * Unconditionally invoke rcu_irq_enter() so RCU state stays
+ * consistent.
+ *
+ * TINY_RCU does not support EQS, so let the compiler eliminate
+ * this part when enabled.
+ */
+ if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
+ /*
+ * If RCU is not watching then the same careful
+ * sequence vs. lockdep and tracing is required
+ * as in enter_from_user_mode().
+ */
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ rcu_irq_enter();
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+
+ return true;
+ }
+
+ /*
+ * If RCU is watching then RCU only wants to check whether it needs
+ * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
+ * already contains a warning when RCU is not watching, so no point
+ * in having another one here.
+ */
+ instrumentation_begin();
+ rcu_irq_enter_check_tick();
+ /* Use the combo lockdep/tracing function */
+ trace_hardirqs_off();
+ instrumentation_end();
+
+ return false;
+}
+
+static void idtentry_exit_cond_resched(struct pt_regs *regs, bool may_sched)
+{
+ if (may_sched && !preempt_count()) {
+ /* Sanity check RCU and thread stack */
+ rcu_irq_exit_check_preempt();
+ if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
+ WARN_ON_ONCE(!on_thread_stack());
+ if (need_resched())
+ preempt_schedule_irq();
+ }
+ /* Covers both tracing and lockdep */
+ trace_hardirqs_on();
+}
+
+/**
+ * idtentry_exit_cond_rcu - Handle return from exception with conditional RCU
+ * handling
+ * @regs: Pointer to pt_regs (exception entry regs)
+ * @rcu_exit: Invoke rcu_irq_exit() if true
+ *
+ * Depending on the return target (kernel/user) this runs the necessary
+ * preemption and work checks if possible and reguired and returns to
+ * the caller with interrupts disabled and no further work pending.
+ *
+ * This is the last action before returning to the low level ASM code which
+ * just needs to return to the appropriate context.
+ *
+ * Counterpart to idtentry_enter_cond_rcu(). The return value of the entry
+ * function must be fed into the @rcu_exit argument.
+ */
+void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
+{
+ lockdep_assert_irqs_disabled();
+
+ /* Check whether this returns to user mode */
+ if (user_mode(regs)) {
+ prepare_exit_to_usermode(regs);
+ } else if (regs->flags & X86_EFLAGS_IF) {
+ /*
+ * If RCU was not watching on entry this needs to be done
+ * carefully and needs the same ordering of lockdep/tracing
+ * and RCU as the return to user mode path.
+ */
+ if (rcu_exit) {
+ instrumentation_begin();
+ /* Tell the tracer that IRET will enable interrupts */
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ instrumentation_end();
+ rcu_irq_exit();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+ return;
+ }
+
+ instrumentation_begin();
+ idtentry_exit_cond_resched(regs, IS_ENABLED(CONFIG_PREEMPTION));
+ instrumentation_end();
+ } else {
+ /*
+ * IRQ flags state is correct already. Just tell RCU if it
+ * was not watching on entry.
+ */
+ if (rcu_exit)
+ rcu_irq_exit();
+ }
+}
+
+/**
+ * idtentry_enter_user - Handle state tracking on idtentry from user mode
+ * @regs: Pointer to pt_regs of interrupted context
+ *
+ * Invokes enter_from_user_mode() to establish the proper context for
+ * NOHZ_FULL. Otherwise scheduling on exit would not be possible.
+ */
+void noinstr idtentry_enter_user(struct pt_regs *regs)
+{
+ enter_from_user_mode();
+}
+
+/**
+ * idtentry_exit_user - Handle return from exception to user mode
+ * @regs: Pointer to pt_regs (exception entry regs)
+ *
+ * Runs the necessary preemption and work checks and returns to the caller
+ * with interrupts disabled and no further work pending.
+ *
+ * This is the last action before returning to the low level ASM code which
+ * just needs to return to the appropriate context.
+ *
+ * Counterpart to idtentry_enter_user().
+ */
+void noinstr idtentry_exit_user(struct pt_regs *regs)
+{
+ lockdep_assert_irqs_disabled();
+
+ prepare_exit_to_usermode(regs);
+}
+
+#ifdef CONFIG_XEN_PV
+#ifndef CONFIG_PREEMPTION
+/*
+ * Some hypercalls issued by the toolstack can take many 10s of
+ * seconds. Allow tasks running hypercalls via the privcmd driver to
+ * be voluntarily preempted even if full kernel preemption is
+ * disabled.
+ *
+ * Such preemptible hypercalls are bracketed by
+ * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
+ * calls.
+ */
+DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
+EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
+
+/*
+ * In case of scheduling the flag must be cleared and restored after
+ * returning from schedule as the task might move to a different CPU.
+ */
+static __always_inline bool get_and_clear_inhcall(void)
+{
+ bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
+
+ __this_cpu_write(xen_in_preemptible_hcall, false);
+ return inhcall;
+}
+
+static __always_inline void restore_inhcall(bool inhcall)
+{
+ __this_cpu_write(xen_in_preemptible_hcall, inhcall);
+}
+#else
+static __always_inline bool get_and_clear_inhcall(void) { return false; }
+static __always_inline void restore_inhcall(bool inhcall) { }
+#endif
+
+static void __xen_pv_evtchn_do_upcall(void)
+{
+ irq_enter_rcu();
+ inc_irq_stat(irq_hv_callback_count);
+
+ xen_hvm_evtchn_do_upcall();
+
+ irq_exit_rcu();
+}
+
+__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs;
+ bool inhcall, rcu_exit;
+
+ rcu_exit = idtentry_enter_cond_rcu(regs);
+ old_regs = set_irq_regs(regs);
+
+ instrumentation_begin();
+ run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs);
+ instrumentation_begin();
+
+ set_irq_regs(old_regs);
+
+ inhcall = get_and_clear_inhcall();
+ if (inhcall && !WARN_ON_ONCE(rcu_exit)) {
+ instrumentation_begin();
+ idtentry_exit_cond_resched(regs, true);
+ instrumentation_end();
+ restore_inhcall(inhcall);
+ } else {
+ idtentry_exit_cond_rcu(regs, rcu_exit);
+ }
+}
+#endif /* CONFIG_XEN_PV */
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index a5eed844e948..024d7d276cd4 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -44,40 +44,13 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/frame.h>
+#include <asm/trapnr.h>
#include <asm/nospec-branch.h>
#include "calling.h"
.section .entry.text, "ax"
-/*
- * We use macros for low-level operations which need to be overridden
- * for paravirtualization. The following will never clobber any registers:
- * INTERRUPT_RETURN (aka. "iret")
- * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
- * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
- *
- * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
- * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
- * Allowing a register to be clobbered can shrink the paravirt replacement
- * enough to patch inline, increasing performance.
- */
-
-#ifdef CONFIG_PREEMPTION
-# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
-#else
-# define preempt_stop(clobbers)
-#endif
-
-.macro TRACE_IRQS_IRET
-#ifdef CONFIG_TRACE_IRQFLAGS
- testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
- jz 1f
- TRACE_IRQS_ON
-1:
-#endif
-.endm
-
#define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
/*
@@ -726,10 +699,68 @@
.Lend_\@:
.endm
+
+/**
+ * idtentry - Macro to generate entry stubs for simple IDT entries
+ * @vector: Vector number
+ * @asmsym: ASM symbol for the entry point
+ * @cfunc: C function to be called
+ * @has_error_code: Hardware pushed error code on stack
+ */
+.macro idtentry vector asmsym cfunc has_error_code:req
+SYM_CODE_START(\asmsym)
+ ASM_CLAC
+ cld
+
+ .if \has_error_code == 0
+ pushl $0 /* Clear the error code */
+ .endif
+
+ /* Push the C-function address into the GS slot */
+ pushl $\cfunc
+ /* Invoke the common exception entry */
+ jmp handle_exception
+SYM_CODE_END(\asmsym)
+.endm
+
+.macro idtentry_irq vector cfunc
+ .p2align CONFIG_X86_L1_CACHE_SHIFT
+SYM_CODE_START_LOCAL(asm_\cfunc)
+ ASM_CLAC
+ SAVE_ALL switch_stacks=1
+ ENCODE_FRAME_POINTER
+ movl %esp, %eax
+ movl PT_ORIG_EAX(%esp), %edx /* get the vector from stack */
+ movl $-1, PT_ORIG_EAX(%esp) /* no syscall to restart */
+ call \cfunc
+ jmp handle_exception_return
+SYM_CODE_END(asm_\cfunc)
+.endm
+
+.macro idtentry_sysvec vector cfunc
+ idtentry \vector asm_\cfunc \cfunc has_error_code=0
+.endm
+
+/*
+ * Include the defines which emit the idt entries which are shared
+ * shared between 32 and 64 bit and emit the __irqentry_text_* markers
+ * so the stacktrace boundary checks work.
+ */
+ .align 16
+ .globl __irqentry_text_start
+__irqentry_text_start:
+
+#include <asm/idtentry.h>
+
+ .align 16
+ .globl __irqentry_text_end
+__irqentry_text_end:
+
/*
* %eax: prev task
* %edx: next task
*/
+.pushsection .text, "ax"
SYM_CODE_START(__switch_to_asm)
/*
* Save callee-saved registers
@@ -776,6 +807,7 @@ SYM_CODE_START(__switch_to_asm)
jmp __switch_to
SYM_CODE_END(__switch_to_asm)
+.popsection
/*
* The unwinder expects the last frame on the stack to always be at the same
@@ -784,6 +816,7 @@ SYM_CODE_END(__switch_to_asm)
* asmlinkage function so its argument has to be pushed on the stack. This
* wrapper creates a proper "end of stack" frame header before the call.
*/
+.pushsection .text, "ax"
SYM_FUNC_START(schedule_tail_wrapper)
FRAME_BEGIN
@@ -794,6 +827,8 @@ SYM_FUNC_START(schedule_tail_wrapper)
FRAME_END
ret
SYM_FUNC_END(schedule_tail_wrapper)
+.popsection
+
/*
* A newly forked process directly context switches into this address.
*
@@ -801,6 +836,7 @@ SYM_FUNC_END(schedule_tail_wrapper)
* ebx: kernel thread func (NULL for user thread)
* edi: kernel thread arg
*/
+.pushsection .text, "ax"
SYM_CODE_START(ret_from_fork)
call schedule_tail_wrapper
@@ -811,8 +847,7 @@ SYM_CODE_START(ret_from_fork)
/* When we fork, we trace the syscall return in the child, too. */
movl %esp, %eax
call syscall_return_slowpath
- STACKLEAK_ERASE
- jmp restore_all
+ jmp .Lsyscall_32_done
/* kernel thread */
1: movl %edi, %eax
@@ -825,38 +860,7 @@ SYM_CODE_START(ret_from_fork)
movl $0, PT_EAX(%esp)
jmp 2b
SYM_CODE_END(ret_from_fork)
-
-/*
- * Return to user mode is not as complex as all this looks,
- * but we want the default path for a system call return to
- * go as quickly as possible which is why some of this is
- * less clear than it otherwise should be.
- */
-
- # userspace resumption stub bypassing syscall exit tracing
-SYM_CODE_START_LOCAL(ret_from_exception)
- preempt_stop(CLBR_ANY)
-ret_from_intr:
-#ifdef CONFIG_VM86
- movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
- movb PT_CS(%esp), %al
- andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
-#else
- /*
- * We can be coming here from child spawned by kernel_thread().
- */
- movl PT_CS(%esp), %eax
- andl $SEGMENT_RPL_MASK, %eax
-#endif
- cmpl $USER_RPL, %eax
- jb restore_all_kernel # not returning to v8086 or userspace
-
- DISABLE_INTERRUPTS(CLBR_ANY)
- TRACE_IRQS_OFF
- movl %esp, %eax
- call prepare_exit_to_usermode
- jmp restore_all
-SYM_CODE_END(ret_from_exception)
+.popsection
SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
/*
@@ -960,12 +964,6 @@ SYM_FUNC_START(entry_SYSENTER_32)
jnz .Lsysenter_fix_flags
.Lsysenter_flags_fixed:
- /*
- * User mode is traced as though IRQs are on, and SYSENTER
- * turned them off.
- */
- TRACE_IRQS_OFF
-
movl %esp, %eax
call do_fast_syscall_32
/* XEN PV guests always use IRET path */
@@ -974,8 +972,7 @@ SYM_FUNC_START(entry_SYSENTER_32)
STACKLEAK_ERASE
-/* Opportunistic SYSEXIT */
- TRACE_IRQS_ON /* User mode traces as IRQs on. */
+ /* Opportunistic SYSEXIT */
/*
* Setup entry stack - we keep the pointer in %eax and do the
@@ -1075,20 +1072,12 @@ SYM_FUNC_START(entry_INT80_32)
SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */
- /*
- * User mode is traced as though IRQs are on, and the interrupt gate
- * turned them off.
- */
- TRACE_IRQS_OFF
-
movl %esp, %eax
call do_int80_syscall_32
.Lsyscall_32_done:
-
STACKLEAK_ERASE
-restore_all:
- TRACE_IRQS_ON
+restore_all_switch_stack:
SWITCH_TO_ENTRY_STACK
CHECK_AND_APPLY_ESPFIX
@@ -1107,26 +1096,10 @@ restore_all:
*/
INTERRUPT_RETURN
-restore_all_kernel:
-#ifdef CONFIG_PREEMPTION
- DISABLE_INTERRUPTS(CLBR_ANY)
- cmpl $0, PER_CPU_VAR(__preempt_count)
- jnz .Lno_preempt
- testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
- jz .Lno_preempt
- call preempt_schedule_irq
-.Lno_preempt:
-#endif
- TRACE_IRQS_IRET
- PARANOID_EXIT_TO_KERNEL_MODE
- BUG_IF_WRONG_CR3
- RESTORE_REGS 4
- jmp .Lirq_return
-
.section .fixup, "ax"
-SYM_CODE_START(iret_exc)
+SYM_CODE_START(asm_iret_error)
pushl $0 # no error code
- pushl $do_iret_error
+ pushl $iret_error
#ifdef CONFIG_DEBUG_ENTRY
/*
@@ -1140,10 +1113,10 @@ SYM_CODE_START(iret_exc)
popl %eax
#endif
- jmp common_exception
-SYM_CODE_END(iret_exc)
+ jmp handle_exception
+SYM_CODE_END(asm_iret_error)
.previous
- _ASM_EXTABLE(.Lirq_return, iret_exc)
+ _ASM_EXTABLE(.Lirq_return, asm_iret_error)
SYM_FUNC_END(entry_INT80_32)
.macro FIXUP_ESPFIX_STACK
@@ -1193,192 +1166,21 @@ SYM_FUNC_END(entry_INT80_32)
#endif
.endm
-/*
- * Build the entry stubs with some assembler magic.
- * We pack 1 stub into every 8-byte block.
- */
- .align 8
-SYM_CODE_START(irq_entries_start)
- vector=FIRST_EXTERNAL_VECTOR
- .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
- pushl $(~vector+0x80) /* Note: always in signed byte range */
- vector=vector+1
- jmp common_interrupt
- .align 8
- .endr
-SYM_CODE_END(irq_entries_start)
-
-#ifdef CONFIG_X86_LOCAL_APIC
- .align 8
-SYM_CODE_START(spurious_entries_start)
- vector=FIRST_SYSTEM_VECTOR
- .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
- pushl $(~vector+0x80) /* Note: always in signed byte range */
- vector=vector+1
- jmp common_spurious
- .align 8
- .endr
-SYM_CODE_END(spurious_entries_start)
-
-SYM_CODE_START_LOCAL(common_spurious)
- ASM_CLAC
- addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
- SAVE_ALL switch_stacks=1
- ENCODE_FRAME_POINTER
- TRACE_IRQS_OFF
- movl %esp, %eax
- call smp_spurious_interrupt
- jmp ret_from_intr
-SYM_CODE_END(common_spurious)
-#endif
-
-/*
- * the CPU automatically disables interrupts when executing an IRQ vector,
- * so IRQ-flags tracing has to follow that:
- */
- .p2align CONFIG_X86_L1_CACHE_SHIFT
-SYM_CODE_START_LOCAL(common_interrupt)
- ASM_CLAC
- addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
-
- SAVE_ALL switch_stacks=1
- ENCODE_FRAME_POINTER
- TRACE_IRQS_OFF
- movl %esp, %eax
- call do_IRQ
- jmp ret_from_intr
-SYM_CODE_END(common_interrupt)
-
-#define BUILD_INTERRUPT3(name, nr, fn) \
-SYM_FUNC_START(name) \
- ASM_CLAC; \
- pushl $~(nr); \
- SAVE_ALL switch_stacks=1; \
- ENCODE_FRAME_POINTER; \
- TRACE_IRQS_OFF \
- movl %esp, %eax; \
- call fn; \
- jmp ret_from_intr; \
-SYM_FUNC_END(name)
-
-#define BUILD_INTERRUPT(name, nr) \
- BUILD_INTERRUPT3(name, nr, smp_##name); \
-
-/* The include is where all of the SMP etc. interrupts come from */
-#include <asm/entry_arch.h>
-
-SYM_CODE_START(coprocessor_error)
- ASM_CLAC
- pushl $0
- pushl $do_coprocessor_error
- jmp common_exception
-SYM_CODE_END(coprocessor_error)
-
-SYM_CODE_START(simd_coprocessor_error)
- ASM_CLAC
- pushl $0
-#ifdef CONFIG_X86_INVD_BUG
- /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
- ALTERNATIVE "pushl $do_general_protection", \
- "pushl $do_simd_coprocessor_error", \
- X86_FEATURE_XMM
-#else
- pushl $do_simd_coprocessor_error
-#endif
- jmp common_exception
-SYM_CODE_END(simd_coprocessor_error)
-
-SYM_CODE_START(device_not_available)
- ASM_CLAC
- pushl $0
- pushl $do_device_not_available
- jmp common_exception
-SYM_CODE_END(device_not_available)
-
#ifdef CONFIG_PARAVIRT
SYM_CODE_START(native_iret)
iret
- _ASM_EXTABLE(native_iret, iret_exc)
+ _ASM_EXTABLE(native_iret, asm_iret_error)
SYM_CODE_END(native_iret)
#endif
-SYM_CODE_START(overflow)
- ASM_CLAC
- pushl $0
- pushl $do_overflow
- jmp common_exception
-SYM_CODE_END(overflow)
-
-SYM_CODE_START(bounds)
- ASM_CLAC
- pushl $0
- pushl $do_bounds
- jmp common_exception
-SYM_CODE_END(bounds)
-
-SYM_CODE_START(invalid_op)
- ASM_CLAC
- pushl $0
- pushl $do_invalid_op
- jmp common_exception
-SYM_CODE_END(invalid_op)
-
-SYM_CODE_START(coprocessor_segment_overrun)
- ASM_CLAC
- pushl $0
- pushl $do_coprocessor_segment_overrun
- jmp common_exception
-SYM_CODE_END(coprocessor_segment_overrun)
-
-SYM_CODE_START(invalid_TSS)
- ASM_CLAC
- pushl $do_invalid_TSS
- jmp common_exception
-SYM_CODE_END(invalid_TSS)
-
-SYM_CODE_START(segment_not_present)
- ASM_CLAC
- pushl $do_segment_not_present
- jmp common_exception
-SYM_CODE_END(segment_not_present)
-
-SYM_CODE_START(stack_segment)
- ASM_CLAC
- pushl $do_stack_segment
- jmp common_exception
-SYM_CODE_END(stack_segment)
-
-SYM_CODE_START(alignment_check)
- ASM_CLAC
- pushl $do_alignment_check
- jmp common_exception
-SYM_CODE_END(alignment_check)
-
-SYM_CODE_START(divide_error)
- ASM_CLAC
- pushl $0 # no error code
- pushl $do_divide_error
- jmp common_exception
-SYM_CODE_END(divide_error)
-
-#ifdef CONFIG_X86_MCE
-SYM_CODE_START(machine_check)
- ASM_CLAC
- pushl $0
- pushl $do_mce
- jmp common_exception
-SYM_CODE_END(machine_check)
-#endif
-
-SYM_CODE_START(spurious_interrupt_bug)
- ASM_CLAC
- pushl $0
- pushl $do_spurious_interrupt_bug
- jmp common_exception
-SYM_CODE_END(spurious_interrupt_bug)
-
#ifdef CONFIG_XEN_PV
-SYM_FUNC_START(xen_hypervisor_callback)
+/*
+ * See comment in entry_64.S for further explanation
+ *
+ * Note: This is not an actual IDT entry point. It's a XEN specific entry
+ * point and therefore named to match the 64-bit trampoline counterpart.
+ */
+SYM_FUNC_START(xen_asm_exc_xen_hypervisor_callback)
/*
* Check to see if we got the event in the critical
* region in xen_iret_direct, after we've reenabled
@@ -1395,14 +1197,11 @@ SYM_FUNC_START(xen_hypervisor_callback)
pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
ENCODE_FRAME_POINTER
- TRACE_IRQS_OFF
+
mov %esp, %eax
- call xen_evtchn_do_upcall
-#ifndef CONFIG_PREEMPTION
- call xen_maybe_preempt_hcall
-#endif
- jmp ret_from_intr
-SYM_FUNC_END(xen_hypervisor_callback)
+ call xen_pv_evtchn_do_upcall
+ jmp handle_exception_return
+SYM_FUNC_END(xen_asm_exc_xen_hypervisor_callback)
/*
* Hypervisor uses this for application faults while it executes.
@@ -1429,11 +1228,11 @@ SYM_FUNC_START(xen_failsafe_callback)
popl %eax
lea 16(%esp), %esp
jz 5f
- jmp iret_exc
+ jmp asm_iret_error
5: pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
ENCODE_FRAME_POINTER
- jmp ret_from_exception
+ jmp handle_exception_return
.section .fixup, "ax"
6: xorl %eax, %eax
@@ -1456,56 +1255,7 @@ SYM_FUNC_START(xen_failsafe_callback)
SYM_FUNC_END(xen_failsafe_callback)
#endif /* CONFIG_XEN_PV */
-#ifdef CONFIG_XEN_PVHVM
-BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
- xen_evtchn_do_upcall)
-#endif
-
-
-#if IS_ENABLED(CONFIG_HYPERV)
-
-BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
- hyperv_vector_handler)
-
-BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR,
- hyperv_reenlightenment_intr)
-
-BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
- hv_stimer0_vector_handler)
-
-#endif /* CONFIG_HYPERV */
-
-SYM_CODE_START(page_fault)
- ASM_CLAC
- pushl $do_page_fault
- jmp common_exception_read_cr2
-SYM_CODE_END(page_fault)
-
-SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2)
- /* the function address is in %gs's slot on the stack */
- SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
-
- ENCODE_FRAME_POINTER
-
- /* fixup %gs */
- GS_TO_REG %ecx
- movl PT_GS(%esp), %edi
- REG_TO_PTGS %ecx
- SET_KERNEL_GS %ecx
-
- GET_CR2_INTO(%ecx) # might clobber %eax
-
- /* fixup orig %eax */
- movl PT_ORIG_EAX(%esp), %edx # get the error code
- movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
-
- TRACE_IRQS_OFF
- movl %esp, %eax # pt_regs pointer
- CALL_NOSPEC edi
- jmp ret_from_exception
-SYM_CODE_END(common_exception_read_cr2)
-
-SYM_CODE_START_LOCAL_NOALIGN(common_exception)
+SYM_CODE_START_LOCAL_NOALIGN(handle_exception)
/* the function address is in %gs's slot on the stack */
SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
ENCODE_FRAME_POINTER
@@ -1520,23 +1270,35 @@ SYM_CODE_START_LOCAL_NOALIGN(common_exception)
movl PT_ORIG_EAX(%esp), %edx # get the error code
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
- TRACE_IRQS_OFF
movl %esp, %eax # pt_regs pointer
CALL_NOSPEC edi
- jmp ret_from_exception
-SYM_CODE_END(common_exception)
-SYM_CODE_START(debug)
+handle_exception_return:
+#ifdef CONFIG_VM86
+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
+ movb PT_CS(%esp), %al
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
+#else
/*
- * Entry from sysenter is now handled in common_exception
+ * We can be coming here from child spawned by kernel_thread().
*/
- ASM_CLAC
- pushl $0
- pushl $do_debug
- jmp common_exception
-SYM_CODE_END(debug)
+ movl PT_CS(%esp), %eax
+ andl $SEGMENT_RPL_MASK, %eax
+#endif
+ cmpl $USER_RPL, %eax # returning to v8086 or userspace ?
+ jnb ret_to_user
-SYM_CODE_START(double_fault)
+ PARANOID_EXIT_TO_KERNEL_MODE
+ BUG_IF_WRONG_CR3
+ RESTORE_REGS 4
+ jmp .Lirq_return
+
+ret_to_user:
+ movl %esp, %eax
+ jmp restore_all_switch_stack
+SYM_CODE_END(handle_exception)
+
+SYM_CODE_START(asm_exc_double_fault)
1:
/*
* This is a task gate handler, not an interrupt gate handler.
@@ -1574,7 +1336,7 @@ SYM_CODE_START(double_fault)
1:
hlt
jmp 1b
-SYM_CODE_END(double_fault)
+SYM_CODE_END(asm_exc_double_fault)
/*
* NMI is doubly nasty. It can happen on the first instruction of
@@ -1583,7 +1345,7 @@ SYM_CODE_END(double_fault)
* switched stacks. We handle both conditions by simply checking whether we
* interrupted kernel code running on the SYSENTER stack.
*/
-SYM_CODE_START(nmi)
+SYM_CODE_START(asm_exc_nmi)
ASM_CLAC
#ifdef CONFIG_X86_ESPFIX32
@@ -1612,7 +1374,7 @@ SYM_CODE_START(nmi)
jb .Lnmi_from_sysenter_stack
/* Not on SYSENTER stack. */
- call do_nmi
+ call exc_nmi
jmp .Lnmi_return
.Lnmi_from_sysenter_stack:
@@ -1622,7 +1384,7 @@ SYM_CODE_START(nmi)
*/
movl %esp, %ebx
movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
- call do_nmi
+ call exc_nmi
movl %ebx, %esp
.Lnmi_return:
@@ -1676,21 +1438,9 @@ SYM_CODE_START(nmi)
lss (1+5+6)*4(%esp), %esp # back to espfix stack
jmp .Lirq_return
#endif
-SYM_CODE_END(nmi)
-
-SYM_CODE_START(int3)
- ASM_CLAC
- pushl $0
- pushl $do_int3
- jmp common_exception
-SYM_CODE_END(int3)
-
-SYM_CODE_START(general_protection)
- ASM_CLAC
- pushl $do_general_protection
- jmp common_exception
-SYM_CODE_END(general_protection)
+SYM_CODE_END(asm_exc_nmi)
+.pushsection .text, "ax"
SYM_CODE_START(rewind_stack_do_exit)
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
@@ -1701,3 +1451,4 @@ SYM_CODE_START(rewind_stack_do_exit)
call do_exit
1: jmp 1b
SYM_CODE_END(rewind_stack_do_exit)
+.popsection
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index eead1e2bebd5..d2a00c97e53f 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -16,7 +16,6 @@
*
* Some macro usage:
* - SYM_FUNC_START/END:Define functions in the symbol table.
- * - TRACE_IRQ_*: Trace hardirq state for lock debugging.
* - idtentry: Define exception entry points.
*/
#include <linux/linkage.h>
@@ -37,6 +36,7 @@
#include <asm/pgtable_types.h>
#include <asm/export.h>
#include <asm/frame.h>
+#include <asm/trapnr.h>
#include <asm/nospec-branch.h>
#include <linux/err.h>
@@ -53,57 +53,6 @@ SYM_CODE_START(native_usergs_sysret64)
SYM_CODE_END(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
-.macro TRACE_IRQS_FLAGS flags:req
-#ifdef CONFIG_TRACE_IRQFLAGS
- btl $9, \flags /* interrupts off? */
- jnc 1f
- TRACE_IRQS_ON
-1:
-#endif
-.endm
-
-.macro TRACE_IRQS_IRETQ
- TRACE_IRQS_FLAGS EFLAGS(%rsp)
-.endm
-
-/*
- * When dynamic function tracer is enabled it will add a breakpoint
- * to all locations that it is about to modify, sync CPUs, update
- * all the code, sync CPUs, then remove the breakpoints. In this time
- * if lockdep is enabled, it might jump back into the debug handler
- * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
- *
- * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
- * make sure the stack pointer does not get reset back to the top
- * of the debug stack, and instead just reuses the current stack.
- */
-#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
-
-.macro TRACE_IRQS_OFF_DEBUG
- call debug_stack_set_zero
- TRACE_IRQS_OFF
- call debug_stack_reset
-.endm
-
-.macro TRACE_IRQS_ON_DEBUG
- call debug_stack_set_zero
- TRACE_IRQS_ON
- call debug_stack_reset
-.endm
-
-.macro TRACE_IRQS_IRETQ_DEBUG
- btl $9, EFLAGS(%rsp) /* interrupts off? */
- jnc 1f
- TRACE_IRQS_ON_DEBUG
-1:
-.endm
-
-#else
-# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF
-# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON
-# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
-#endif
-
/*
* 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
*
@@ -144,11 +93,6 @@ SYM_CODE_END(native_usergs_sysret64)
SYM_CODE_START(entry_SYSCALL_64)
UNWIND_HINT_EMPTY
- /*
- * Interrupts are off on entry.
- * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
- * it is too small to ever cause noticeable irq latency.
- */
swapgs
/* tss.sp2 is scratch space. */
@@ -167,15 +111,11 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
PUSH_AND_CLEAR_REGS rax=$-ENOSYS
- TRACE_IRQS_OFF
-
/* IRQs are off. */
movq %rax, %rdi
movq %rsp, %rsi
call do_syscall_64 /* returns with IRQs disabled */
- TRACE_IRQS_ON /* return enables interrupts */
-
/*
* Try to use SYSRET instead of IRET if we're returning to
* a completely clean 64-bit userspace context. If we're not,
@@ -279,6 +219,7 @@ SYM_CODE_END(entry_SYSCALL_64)
* %rdi: prev task
* %rsi: next task
*/
+.pushsection .text, "ax"
SYM_FUNC_START(__switch_to_asm)
/*
* Save callee-saved registers
@@ -321,6 +262,7 @@ SYM_FUNC_START(__switch_to_asm)
jmp __switch_to
SYM_FUNC_END(__switch_to_asm)
+.popsection
/*
* A newly forked process directly context switches into this address.
@@ -329,6 +271,7 @@ SYM_FUNC_END(__switch_to_asm)
* rbx: kernel thread func (NULL for user thread)
* r12: kernel thread arg
*/
+.pushsection .text, "ax"
SYM_CODE_START(ret_from_fork)
UNWIND_HINT_EMPTY
movq %rax, %rdi
@@ -341,7 +284,6 @@ SYM_CODE_START(ret_from_fork)
UNWIND_HINT_REGS
movq %rsp, %rdi
call syscall_return_slowpath /* returns with IRQs disabled */
- TRACE_IRQS_ON /* user mode is traced as IRQS on */
jmp swapgs_restore_regs_and_return_to_usermode
1:
@@ -357,34 +299,7 @@ SYM_CODE_START(ret_from_fork)
movq $0, RAX(%rsp)
jmp 2b
SYM_CODE_END(ret_from_fork)
-
-/*
- * Build the entry stubs with some assembler magic.
- * We pack 1 stub into every 8-byte block.
- */
- .align 8
-SYM_CODE_START(irq_entries_start)
- vector=FIRST_EXTERNAL_VECTOR
- .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
- UNWIND_HINT_IRET_REGS
- pushq $(~vector+0x80) /* Note: always in signed byte range */
- jmp common_interrupt
- .align 8
- vector=vector+1
- .endr
-SYM_CODE_END(irq_entries_start)
-
- .align 8
-SYM_CODE_START(spurious_entries_start)
- vector=FIRST_SYSTEM_VECTOR
- .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
- UNWIND_HINT_IRET_REGS
- pushq $(~vector+0x80) /* Note: always in signed byte range */
- jmp common_spurious
- .align 8
- vector=vector+1
- .endr
-SYM_CODE_END(spurious_entries_start)
+.popsection
.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
#ifdef CONFIG_DEBUG_ENTRY
@@ -398,228 +313,185 @@ SYM_CODE_END(spurious_entries_start)
#endif
.endm
-/*
- * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers
- * flags and puts old RSP into old_rsp, and leaves all other GPRs alone.
- * Requires kernel GSBASE.
- *
- * The invariant is that, if irq_count != -1, then the IRQ stack is in use.
+/**
+ * idtentry_body - Macro to emit code calling the C function
+ * @cfunc: C function to be called
+ * @has_error_code: Hardware pushed error code on stack
*/
-.macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0
- DEBUG_ENTRY_ASSERT_IRQS_OFF
+.macro idtentry_body cfunc has_error_code:req
- .if \save_ret
- /*
- * If save_ret is set, the original stack contains one additional
- * entry -- the return address. Therefore, move the address one
- * entry below %rsp to \old_rsp.
- */
- leaq 8(%rsp), \old_rsp
- .else
- movq %rsp, \old_rsp
- .endif
-
- .if \regs
- UNWIND_HINT_REGS base=\old_rsp
- .endif
+ call error_entry
+ UNWIND_HINT_REGS
- incl PER_CPU_VAR(irq_count)
- jnz .Lirq_stack_push_old_rsp_\@
+ movq %rsp, %rdi /* pt_regs pointer into 1st argument*/
- /*
- * Right now, if we just incremented irq_count to zero, we've
- * claimed the IRQ stack but we haven't switched to it yet.
- *
- * If anything is added that can interrupt us here without using IST,
- * it must be *extremely* careful to limit its stack usage. This
- * could include kprobes and a hypothetical future IST-less #DB
- * handler.
- *
- * The OOPS unwinder relies on the word at the top of the IRQ
- * stack linking back to the previous RSP for the entire time we're
- * on the IRQ stack. For this to work reliably, we need to write
- * it before we actually move ourselves to the IRQ stack.
- */
+ .if \has_error_code == 1
+ movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
+ movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
+ .endif
- movq \old_rsp, PER_CPU_VAR(irq_stack_backing_store + IRQ_STACK_SIZE - 8)
- movq PER_CPU_VAR(hardirq_stack_ptr), %rsp
+ call \cfunc
-#ifdef CONFIG_DEBUG_ENTRY
- /*
- * If the first movq above becomes wrong due to IRQ stack layout
- * changes, the only way we'll notice is if we try to unwind right
- * here. Assert that we set up the stack right to catch this type
- * of bug quickly.
- */
- cmpq -8(%rsp), \old_rsp
- je .Lirq_stack_okay\@
- ud2
- .Lirq_stack_okay\@:
-#endif
+ jmp error_return
+.endm
-.Lirq_stack_push_old_rsp_\@:
- pushq \old_rsp
+/**
+ * idtentry - Macro to generate entry stubs for simple IDT entries
+ * @vector: Vector number
+ * @asmsym: ASM symbol for the entry point
+ * @cfunc: C function to be called
+ * @has_error_code: Hardware pushed error code on stack
+ *
+ * The macro emits code to set up the kernel context for straight forward
+ * and simple IDT entries. No IST stack, no paranoid entry checks.
+ */
+.macro idtentry vector asmsym cfunc has_error_code:req
+SYM_CODE_START(\asmsym)
+ UNWIND_HINT_IRET_REGS offset=\has_error_code*8
+ ASM_CLAC
- .if \regs
- UNWIND_HINT_REGS indirect=1
+ .if \has_error_code == 0
+ pushq $-1 /* ORIG_RAX: no syscall to restart */
.endif
- .if \save_ret
- /*
- * Push the return address to the stack. This return address can
- * be found at the "real" original RSP, which was offset by 8 at
- * the beginning of this macro.
- */
- pushq -8(\old_rsp)
+ .if \vector == X86_TRAP_BP
+ /*
+ * If coming from kernel space, create a 6-word gap to allow the
+ * int3 handler to emulate a call instruction.
+ */
+ testb $3, CS-ORIG_RAX(%rsp)
+ jnz .Lfrom_usermode_no_gap_\@
+ .rept 6
+ pushq 5*8(%rsp)
+ .endr
+ UNWIND_HINT_IRET_REGS offset=8
+.Lfrom_usermode_no_gap_\@:
.endif
+
+ idtentry_body \cfunc \has_error_code
+
+_ASM_NOKPROBE(\asmsym)
+SYM_CODE_END(\asmsym)
.endm
/*
- * Undoes ENTER_IRQ_STACK.
+ * Interrupt entry/exit.
+ *
+ + The interrupt stubs push (vector) onto the stack, which is the error_code
+ * position of idtentry exceptions, and jump to one of the two idtentry points
+ * (common/spurious).
+ *
+ * common_interrupt is a hotpath, align it to a cache line
*/
-.macro LEAVE_IRQ_STACK regs=1
- DEBUG_ENTRY_ASSERT_IRQS_OFF
- /* We need to be off the IRQ stack before decrementing irq_count. */
- popq %rsp
-
- .if \regs
- UNWIND_HINT_REGS
- .endif
-
- /*
- * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming
- * the irq stack but we're not on it.
- */
-
- decl PER_CPU_VAR(irq_count)
+.macro idtentry_irq vector cfunc
+ .p2align CONFIG_X86_L1_CACHE_SHIFT
+ idtentry \vector asm_\cfunc \cfunc has_error_code=1
.endm
/*
- * Interrupt entry helper function.
+ * System vectors which invoke their handlers directly and are not
+ * going through the regular common device interrupt handling code.
+ */
+.macro idtentry_sysvec vector cfunc
+ idtentry \vector asm_\cfunc \cfunc has_error_code=0
+.endm
+
+/**
+ * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
+ * @vector: Vector number
+ * @asmsym: ASM symbol for the entry point
+ * @cfunc: C function to be called
+ *
+ * The macro emits code to set up the kernel context for #MC and #DB
+ *
+ * If the entry comes from user space it uses the normal entry path
+ * including the return to user space work and preemption checks on
+ * exit.
*
- * Entry runs with interrupts off. Stack layout at entry:
- * +----------------------------------------------------+
- * | regs->ss |
- * | regs->rsp |
- * | regs->eflags |
- * | regs->cs |
- * | regs->ip |
- * +----------------------------------------------------+
- * | regs->orig_ax = ~(interrupt number) |
- * +----------------------------------------------------+
- * | return address |
- * +----------------------------------------------------+
+ * If hits in kernel mode then it needs to go through the paranoid
+ * entry as the exception can hit any random state. No preemption
+ * check on exit to keep the paranoid path simple.
*/
-SYM_CODE_START(interrupt_entry)
- UNWIND_HINT_IRET_REGS offset=16
+.macro idtentry_mce_db vector asmsym cfunc
+SYM_CODE_START(\asmsym)
+ UNWIND_HINT_IRET_REGS
ASM_CLAC
- cld
- testb $3, CS-ORIG_RAX+8(%rsp)
- jz 1f
- SWAPGS
- FENCE_SWAPGS_USER_ENTRY
+ pushq $-1 /* ORIG_RAX: no syscall to restart */
+
/*
- * Switch to the thread stack. The IRET frame and orig_ax are
- * on the stack, as well as the return address. RDI..R12 are
- * not (yet) on the stack and space has not (yet) been
- * allocated for them.
+ * If the entry is from userspace, switch stacks and treat it as
+ * a normal entry.
*/
- pushq %rdi
+ testb $3, CS-ORIG_RAX(%rsp)
+ jnz .Lfrom_usermode_switch_stack_\@
- /* Need to switch before accessing the thread stack. */
- SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
- movq %rsp, %rdi
- movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+ /*
+ * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
+ * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
+ */
+ call paranoid_entry
- /*
- * We have RDI, return address, and orig_ax on the stack on
- * top of the IRET frame. That means offset=24
- */
- UNWIND_HINT_IRET_REGS base=%rdi offset=24
-
- pushq 7*8(%rdi) /* regs->ss */
- pushq 6*8(%rdi) /* regs->rsp */
- pushq 5*8(%rdi) /* regs->eflags */
- pushq 4*8(%rdi) /* regs->cs */
- pushq 3*8(%rdi) /* regs->ip */
- UNWIND_HINT_IRET_REGS
- pushq 2*8(%rdi) /* regs->orig_ax */
- pushq 8(%rdi) /* return address */
+ UNWIND_HINT_REGS
- movq (%rdi), %rdi
- jmp 2f
-1:
- FENCE_SWAPGS_KERNEL_ENTRY
-2:
- PUSH_AND_CLEAR_REGS save_ret=1
- ENCODE_FRAME_POINTER 8
+ movq %rsp, %rdi /* pt_regs pointer */
- testb $3, CS+8(%rsp)
- jz 1f
+ call \cfunc
- /*
- * IRQ from user mode.
- *
- * We need to tell lockdep that IRQs are off. We can't do this until
- * we fix gsbase, and we should do it before enter_from_user_mode
- * (which can take locks). Since TRACE_IRQS_OFF is idempotent,
- * the simplest way to handle it is to just call it twice if
- * we enter from user mode. There's no reason to optimize this since
- * TRACE_IRQS_OFF is a no-op if lockdep is off.
- */
- TRACE_IRQS_OFF
+ jmp paranoid_exit
- CALL_enter_from_user_mode
+ /* Switch to the regular task stack and use the noist entry point */
+.Lfrom_usermode_switch_stack_\@:
+ idtentry_body noist_\cfunc, has_error_code=0
-1:
- ENTER_IRQ_STACK old_rsp=%rdi save_ret=1
- /* We entered an interrupt context - irqs are off: */
- TRACE_IRQS_OFF
+_ASM_NOKPROBE(\asmsym)
+SYM_CODE_END(\asmsym)
+.endm
- ret
-SYM_CODE_END(interrupt_entry)
-_ASM_NOKPROBE(interrupt_entry)
+/*
+ * Double fault entry. Straight paranoid. No checks from which context
+ * this comes because for the espfix induced #DF this would do the wrong
+ * thing.
+ */
+.macro idtentry_df vector asmsym cfunc
+SYM_CODE_START(\asmsym)
+ UNWIND_HINT_IRET_REGS offset=8
+ ASM_CLAC
+ /*
+ * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
+ * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
+ */
+ call paranoid_entry
+ UNWIND_HINT_REGS
-/* Interrupt entry/exit. */
+ movq %rsp, %rdi /* pt_regs pointer into first argument */
+ movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
+ movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
+ call \cfunc
+
+ jmp paranoid_exit
+
+_ASM_NOKPROBE(\asmsym)
+SYM_CODE_END(\asmsym)
+.endm
/*
- * The interrupt stubs push (~vector+0x80) onto the stack and
- * then jump to common_spurious/interrupt.
+ * Include the defines which emit the idt entries which are shared
+ * shared between 32 and 64 bit and emit the __irqentry_text_* markers
+ * so the stacktrace boundary checks work.
*/
-SYM_CODE_START_LOCAL(common_spurious)
- addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
- call interrupt_entry
- UNWIND_HINT_REGS indirect=1
- call smp_spurious_interrupt /* rdi points to pt_regs */
- jmp ret_from_intr
-SYM_CODE_END(common_spurious)
-_ASM_NOKPROBE(common_spurious)
-
-/* common_interrupt is a hotpath. Align it */
- .p2align CONFIG_X86_L1_CACHE_SHIFT
-SYM_CODE_START_LOCAL(common_interrupt)
- addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
- call interrupt_entry
- UNWIND_HINT_REGS indirect=1
- call do_IRQ /* rdi points to pt_regs */
- /* 0(%rsp): old RSP */
-ret_from_intr:
- DISABLE_INTERRUPTS(CLBR_ANY)
- TRACE_IRQS_OFF
-
- LEAVE_IRQ_STACK
+ .align 16
+ .globl __irqentry_text_start
+__irqentry_text_start:
- testb $3, CS(%rsp)
- jz retint_kernel
+#include <asm/idtentry.h>
- /* Interrupt came from user space */
-.Lretint_user:
- mov %rsp,%rdi
- call prepare_exit_to_usermode
- TRACE_IRQS_ON
+ .align 16
+ .globl __irqentry_text_end
+__irqentry_text_end:
+SYM_CODE_START_LOCAL(common_interrupt_return)
SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
#ifdef CONFIG_DEBUG_ENTRY
/* Assert that pt_regs indicates user mode. */
@@ -662,23 +534,6 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
INTERRUPT_RETURN
-/* Returning to kernel space */
-retint_kernel:
-#ifdef CONFIG_PREEMPTION
- /* Interrupts are off */
- /* Check if we need preemption */
- btl $9, EFLAGS(%rsp) /* were interrupts off? */
- jnc 1f
- cmpl $0, PER_CPU_VAR(__preempt_count)
- jnz 1f
- call preempt_schedule_irq
-1:
-#endif
- /*
- * The iretq could re-enable interrupts:
- */
- TRACE_IRQS_IRETQ
-
SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
#ifdef CONFIG_DEBUG_ENTRY
/* Assert that pt_regs indicates kernel mode. */
@@ -710,7 +565,7 @@ SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
/*
* This may fault. Non-paranoid faults on return to userspace are
* handled by fixup_bad_iret. These include #SS, #GP, and #NP.
- * Double-faults due to espfix64 are handled in do_double_fault.
+ * Double-faults due to espfix64 are handled in exc_double_fault.
* Other faults here are fatal.
*/
iretq
@@ -788,280 +643,32 @@ native_irq_return_ldt:
*/
jmp native_irq_return_iret
#endif
-SYM_CODE_END(common_interrupt)
-_ASM_NOKPROBE(common_interrupt)
-
-/*
- * APIC interrupts.
- */
-.macro apicinterrupt3 num sym do_sym
-SYM_CODE_START(\sym)
- UNWIND_HINT_IRET_REGS
- pushq $~(\num)
-.Lcommon_\sym:
- call interrupt_entry
- UNWIND_HINT_REGS indirect=1
- call \do_sym /* rdi points to pt_regs */
- jmp ret_from_intr
-SYM_CODE_END(\sym)
-_ASM_NOKPROBE(\sym)
-.endm
-
-/* Make sure APIC interrupt handlers end up in the irqentry section: */
-#define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax"
-#define POP_SECTION_IRQENTRY .popsection
-
-.macro apicinterrupt num sym do_sym
-PUSH_SECTION_IRQENTRY
-apicinterrupt3 \num \sym \do_sym
-POP_SECTION_IRQENTRY
-.endm
-
-#ifdef CONFIG_SMP
-apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
-apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt
-#endif
-
-#ifdef CONFIG_X86_UV
-apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt
-#endif
-
-apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt
-apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi
-
-#ifdef CONFIG_HAVE_KVM
-apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi
-apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi
-apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi
-#endif
-
-#ifdef CONFIG_X86_MCE_THRESHOLD
-apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt
-#endif
-
-#ifdef CONFIG_X86_MCE_AMD
-apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt
-#endif
-
-#ifdef CONFIG_X86_THERMAL_VECTOR
-apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt
-#endif
-
-#ifdef CONFIG_SMP
-apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt
-apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt
-apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt
-#endif
-
-apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt
-apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt
-
-#ifdef CONFIG_IRQ_WORK
-apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
-#endif
+SYM_CODE_END(common_interrupt_return)
+_ASM_NOKPROBE(common_interrupt_return)
/*
- * Exception entry points.
- */
-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + (x) * 8)
-
-.macro idtentry_part do_sym, has_error_code:req, read_cr2:req, paranoid:req, shift_ist=-1, ist_offset=0
-
- .if \paranoid
- call paranoid_entry
- /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
- .else
- call error_entry
- .endif
- UNWIND_HINT_REGS
-
- .if \read_cr2
- /*
- * Store CR2 early so subsequent faults cannot clobber it. Use R12 as
- * intermediate storage as RDX can be clobbered in enter_from_user_mode().
- * GET_CR2_INTO can clobber RAX.
- */
- GET_CR2_INTO(%r12);
- .endif
-
- .if \shift_ist != -1
- TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */
- .else
- TRACE_IRQS_OFF
- .endif
-
- .if \paranoid == 0
- testb $3, CS(%rsp)
- jz .Lfrom_kernel_no_context_tracking_\@
- CALL_enter_from_user_mode
-.Lfrom_kernel_no_context_tracking_\@:
- .endif
-
- movq %rsp, %rdi /* pt_regs pointer */
-
- .if \has_error_code
- movq ORIG_RAX(%rsp), %rsi /* get error code */
- movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
- .else
- xorl %esi, %esi /* no error code */
- .endif
-
- .if \shift_ist != -1
- subq $\ist_offset, CPU_TSS_IST(\shift_ist)
- .endif
-
- .if \read_cr2
- movq %r12, %rdx /* Move CR2 into 3rd argument */
- .endif
-
- call \do_sym
-
- .if \shift_ist != -1
- addq $\ist_offset, CPU_TSS_IST(\shift_ist)
- .endif
-
- .if \paranoid
- /* this procedure expect "no swapgs" flag in ebx */
- jmp paranoid_exit
- .else
- jmp error_exit
- .endif
-
-.endm
-
-/**
- * idtentry - Generate an IDT entry stub
- * @sym: Name of the generated entry point
- * @do_sym: C function to be called
- * @has_error_code: True if this IDT vector has an error code on the stack
- * @paranoid: non-zero means that this vector may be invoked from
- * kernel mode with user GSBASE and/or user CR3.
- * 2 is special -- see below.
- * @shift_ist: Set to an IST index if entries from kernel mode should
- * decrement the IST stack so that nested entries get a
- * fresh stack. (This is for #DB, which has a nasty habit
- * of recursing.)
- * @create_gap: create a 6-word stack gap when coming from kernel mode.
- * @read_cr2: load CR2 into the 3rd argument; done before calling any C code
- *
- * idtentry generates an IDT stub that sets up a usable kernel context,
- * creates struct pt_regs, and calls @do_sym. The stub has the following
- * special behaviors:
- *
- * On an entry from user mode, the stub switches from the trampoline or
- * IST stack to the normal thread stack. On an exit to user mode, the
- * normal exit-to-usermode path is invoked.
- *
- * On an exit to kernel mode, if @paranoid == 0, we check for preemption,
- * whereas we omit the preemption check if @paranoid != 0. This is purely
- * because the implementation is simpler this way. The kernel only needs
- * to check for asynchronous kernel preemption when IRQ handlers return.
- *
- * If @paranoid == 0, then the stub will handle IRET faults by pretending
- * that the fault came from user mode. It will handle gs_change faults by
- * pretending that the fault happened with kernel GSBASE. Since this handling
- * is omitted for @paranoid != 0, the #GP, #SS, and #NP stubs must have
- * @paranoid == 0. This special handling will do the wrong thing for
- * espfix-induced #DF on IRET, so #DF must not use @paranoid == 0.
+ * Reload gs selector with exception handling
+ * edi: new selector
*
- * @paranoid == 2 is special: the stub will never switch stacks. This is for
- * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
+ * Is in entry.text as it shouldn't be instrumented.
*/
-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0 read_cr2=0
-SYM_CODE_START(\sym)
- UNWIND_HINT_IRET_REGS offset=\has_error_code*8
-
- /* Sanity check */
- .if \shift_ist != -1 && \paranoid != 1
- .error "using shift_ist requires paranoid=1"
- .endif
-
- .if \create_gap && \paranoid
- .error "using create_gap requires paranoid=0"
- .endif
-
- ASM_CLAC
-
- .if \has_error_code == 0
- pushq $-1 /* ORIG_RAX: no syscall to restart */
- .endif
-
- .if \paranoid == 1
- testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */
- jnz .Lfrom_usermode_switch_stack_\@
- .endif
-
- .if \create_gap == 1
- /*
- * If coming from kernel space, create a 6-word gap to allow the
- * int3 handler to emulate a call instruction.
- */
- testb $3, CS-ORIG_RAX(%rsp)
- jnz .Lfrom_usermode_no_gap_\@
- .rept 6
- pushq 5*8(%rsp)
- .endr
- UNWIND_HINT_IRET_REGS offset=8
-.Lfrom_usermode_no_gap_\@:
- .endif
-
- idtentry_part \do_sym, \has_error_code, \read_cr2, \paranoid, \shift_ist, \ist_offset
-
- .if \paranoid == 1
- /*
- * Entry from userspace. Switch stacks and treat it
- * as a normal entry. This means that paranoid handlers
- * run in real process context if user_mode(regs).
- */
-.Lfrom_usermode_switch_stack_\@:
- idtentry_part \do_sym, \has_error_code, \read_cr2, paranoid=0
- .endif
-
-_ASM_NOKPROBE(\sym)
-SYM_CODE_END(\sym)
-.endm
-
-idtentry divide_error do_divide_error has_error_code=0
-idtentry overflow do_overflow has_error_code=0
-idtentry bounds do_bounds has_error_code=0
-idtentry invalid_op do_invalid_op has_error_code=0
-idtentry device_not_available do_device_not_available has_error_code=0
-idtentry double_fault do_double_fault has_error_code=1 paranoid=2 read_cr2=1
-idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
-idtentry invalid_TSS do_invalid_TSS has_error_code=1
-idtentry segment_not_present do_segment_not_present has_error_code=1
-idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0
-idtentry coprocessor_error do_coprocessor_error has_error_code=0
-idtentry alignment_check do_alignment_check has_error_code=1
-idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
-
-
- /*
- * Reload gs selector with exception handling
- * edi: new selector
- */
-SYM_FUNC_START(native_load_gs_index)
+SYM_FUNC_START(asm_load_gs_index)
FRAME_BEGIN
- pushfq
- DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
- TRACE_IRQS_OFF
- SWAPGS
+ swapgs
.Lgs_change:
movl %edi, %gs
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
- SWAPGS
- TRACE_IRQS_FLAGS (%rsp)
- popfq
+ swapgs
FRAME_END
ret
-SYM_FUNC_END(native_load_gs_index)
-EXPORT_SYMBOL(native_load_gs_index)
+SYM_FUNC_END(asm_load_gs_index)
+EXPORT_SYMBOL(asm_load_gs_index)
_ASM_EXTABLE(.Lgs_change, .Lbad_gs)
.section .fixup, "ax"
/* running with kernelgs */
SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
- SWAPGS /* switch back to user gs */
+ swapgs /* switch back to user gs */
.macro ZAP_GS
/* This can't be a string because the preprocessor needs to see it. */
movl $__USER_DS, %eax
@@ -1074,20 +681,46 @@ SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
SYM_CODE_END(.Lbad_gs)
.previous
-/* Call softirq on interrupt stack. Interrupts are off. */
-SYM_FUNC_START(do_softirq_own_stack)
- pushq %rbp
- mov %rsp, %rbp
- ENTER_IRQ_STACK regs=0 old_rsp=%r11
- call __do_softirq
- LEAVE_IRQ_STACK regs=0
+/*
+ * rdi: New stack pointer points to the top word of the stack
+ * rsi: Function pointer
+ * rdx: Function argument (can be NULL if none)
+ */
+SYM_FUNC_START(asm_call_on_stack)
+ /*
+ * Save the frame pointer unconditionally. This allows the ORC
+ * unwinder to handle the stack switch.
+ */
+ pushq %rbp
+ mov %rsp, %rbp
+
+ /*
+ * The unwinder relies on the word at the top of the new stack
+ * page linking back to the previous RSP.
+ */
+ mov %rsp, (%rdi)
+ mov %rdi, %rsp
+ /* Move the argument to the right place */
+ mov %rdx, %rdi
+
+1:
+ .pushsection .discard.instr_begin
+ .long 1b - .
+ .popsection
+
+ CALL_NOSPEC rsi
+
+2:
+ .pushsection .discard.instr_end
+ .long 2b - .
+ .popsection
+
+ /* Restore the previous stack pointer from RBP. */
leaveq
ret
-SYM_FUNC_END(do_softirq_own_stack)
+SYM_FUNC_END(asm_call_on_stack)
#ifdef CONFIG_XEN_PV
-idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
-
/*
* A note on the "critical region" in our callback handler.
* We want to avoid stacking callback handlers due to events occurring
@@ -1100,9 +733,10 @@ idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
* So, on entry to the handler we detect whether we interrupted an
* existing activation in its critical region -- if so, we pop the current
* activation and restart the handler using the previous one.
+ *
+ * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs)
*/
-/* do_hypervisor_callback(struct *pt_regs) */
-SYM_CODE_START_LOCAL(xen_do_hypervisor_callback)
+SYM_CODE_START_LOCAL(exc_xen_hypervisor_callback)
/*
* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
@@ -1112,15 +746,10 @@ SYM_CODE_START_LOCAL(xen_do_hypervisor_callback)
movq %rdi, %rsp /* we don't return, adjust the stack frame */
UNWIND_HINT_REGS
- ENTER_IRQ_STACK old_rsp=%r10
- call xen_evtchn_do_upcall
- LEAVE_IRQ_STACK
+ call xen_pv_evtchn_do_upcall
-#ifndef CONFIG_PREEMPTION
- call xen_maybe_preempt_hcall
-#endif
- jmp error_exit
-SYM_CODE_END(xen_do_hypervisor_callback)
+ jmp error_return
+SYM_CODE_END(exc_xen_hypervisor_callback)
/*
* Hypervisor uses this for application faults while it executes.
@@ -1155,7 +784,7 @@ SYM_CODE_START(xen_failsafe_callback)
addq $0x30, %rsp
pushq $0 /* RIP */
UNWIND_HINT_IRET_REGS offset=8
- jmp general_protection
+ jmp asm_exc_general_protection
1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
movq (%rsp), %rcx
movq 8(%rsp), %r11
@@ -1164,48 +793,10 @@ SYM_CODE_START(xen_failsafe_callback)
pushq $-1 /* orig_ax = -1 => not a system call */
PUSH_AND_CLEAR_REGS
ENCODE_FRAME_POINTER
- jmp error_exit
+ jmp error_return
SYM_CODE_END(xen_failsafe_callback)
#endif /* CONFIG_XEN_PV */
-#ifdef CONFIG_XEN_PVHVM
-apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
- xen_hvm_callback_vector xen_evtchn_do_upcall
-#endif
-
-
-#if IS_ENABLED(CONFIG_HYPERV)
-apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
- hyperv_callback_vector hyperv_vector_handler
-
-apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \
- hyperv_reenlightenment_vector hyperv_reenlightenment_intr
-
-apicinterrupt3 HYPERV_STIMER0_VECTOR \
- hv_stimer0_callback_vector hv_stimer0_vector_handler
-#endif /* CONFIG_HYPERV */
-
-#if IS_ENABLED(CONFIG_ACRN_GUEST)
-apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
- acrn_hv_callback_vector acrn_hv_vector_handler
-#endif
-
-idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET
-idtentry int3 do_int3 has_error_code=0 create_gap=1
-idtentry stack_segment do_stack_segment has_error_code=1
-
-#ifdef CONFIG_XEN_PV
-idtentry xennmi do_nmi has_error_code=0
-idtentry xendebug do_debug has_error_code=0
-#endif
-
-idtentry general_protection do_general_protection has_error_code=1
-idtentry page_fault do_page_fault has_error_code=1 read_cr2=1
-
-#ifdef CONFIG_X86_MCE
-idtentry machine_check do_mce has_error_code=0 paranoid=1
-#endif
-
/*
* Save all registers in pt_regs, and switch gs if needed.
* Use slow, but surefire "are we in kernel?" check.
@@ -1261,17 +852,13 @@ SYM_CODE_END(paranoid_entry)
*/
SYM_CODE_START_LOCAL(paranoid_exit)
UNWIND_HINT_REGS
- DISABLE_INTERRUPTS(CLBR_ANY)
- TRACE_IRQS_OFF_DEBUG
testl %ebx, %ebx /* swapgs needed? */
jnz .Lparanoid_exit_no_swapgs
- TRACE_IRQS_IRETQ
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
SWAPGS_UNSAFE_STACK
jmp restore_regs_and_return_to_kernel
.Lparanoid_exit_no_swapgs:
- TRACE_IRQS_IRETQ_DEBUG
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
jmp restore_regs_and_return_to_kernel
@@ -1335,7 +922,6 @@ SYM_CODE_START_LOCAL(error_entry)
*/
SWAPGS
FENCE_SWAPGS_USER_ENTRY
- SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
jmp .Lerror_entry_done
.Lbstep_iret:
@@ -1362,14 +948,13 @@ SYM_CODE_START_LOCAL(error_entry)
jmp .Lerror_entry_from_usermode_after_swapgs
SYM_CODE_END(error_entry)
-SYM_CODE_START_LOCAL(error_exit)
+SYM_CODE_START_LOCAL(error_return)
UNWIND_HINT_REGS
- DISABLE_INTERRUPTS(CLBR_ANY)
- TRACE_IRQS_OFF
+ DEBUG_ENTRY_ASSERT_IRQS_OFF
testb $3, CS(%rsp)
- jz retint_kernel
- jmp .Lretint_user
-SYM_CODE_END(error_exit)
+ jz restore_regs_and_return_to_kernel
+ jmp swapgs_restore_regs_and_return_to_usermode
+SYM_CODE_END(error_return)
/*
* Runs on exception stack. Xen PV does not go through this path at all,
@@ -1379,7 +964,7 @@ SYM_CODE_END(error_exit)
* %r14: Used to save/restore the CR3 of the interrupted context
* when PAGE_TABLE_ISOLATION is in use. Do not clobber.
*/
-SYM_CODE_START(nmi)
+SYM_CODE_START(asm_exc_nmi)
UNWIND_HINT_IRET_REGS
/*
@@ -1464,7 +1049,7 @@ SYM_CODE_START(nmi)
movq %rsp, %rdi
movq $-1, %rsi
- call do_nmi
+ call exc_nmi
/*
* Return back to user mode. We must *not* do the normal exit
@@ -1521,7 +1106,7 @@ SYM_CODE_START(nmi)
* end_repeat_nmi, then we are a nested NMI. We must not
* modify the "iret" frame because it's being written by
* the outer NMI. That's okay; the outer NMI handler is
- * about to about to call do_nmi anyway, so we can just
+ * about to about to call exc_nmi() anyway, so we can just
* resume the outer NMI.
*/
@@ -1640,7 +1225,7 @@ repeat_nmi:
* RSP is pointing to "outermost RIP". gsbase is unknown, but, if
* we're repeating an NMI, gsbase has the same value that it had on
* the first iteration. paranoid_entry will load the kernel
- * gsbase if needed before we call do_nmi. "NMI executing"
+ * gsbase if needed before we call exc_nmi(). "NMI executing"
* is zero.
*/
movq $1, 10*8(%rsp) /* Set "NMI executing". */
@@ -1674,10 +1259,9 @@ end_repeat_nmi:
call paranoid_entry
UNWIND_HINT_REGS
- /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp, %rdi
movq $-1, %rsi
- call do_nmi
+ call exc_nmi
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
@@ -1714,7 +1298,7 @@ nmi_restore:
* about espfix64 on the way back to kernel mode.
*/
iretq
-SYM_CODE_END(nmi)
+SYM_CODE_END(asm_exc_nmi)
#ifndef CONFIG_IA32_EMULATION
/*
@@ -1728,6 +1312,7 @@ SYM_CODE_START(ignore_sysret)
SYM_CODE_END(ignore_sysret)
#endif
+.pushsection .text, "ax"
SYM_CODE_START(rewind_stack_do_exit)
UNWIND_HINT_FUNC
/* Prevent any naive code from trying to unwind to our caller. */
@@ -1739,3 +1324,4 @@ SYM_CODE_START(rewind_stack_do_exit)
call do_exit
SYM_CODE_END(rewind_stack_do_exit)
+.popsection
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index f1d3ccae5dd5..0f974ae01e62 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -46,12 +46,14 @@
* ebp user stack
* 0(%ebp) arg6
*/
-SYM_FUNC_START(entry_SYSENTER_compat)
+SYM_CODE_START(entry_SYSENTER_compat)
+ UNWIND_HINT_EMPTY
/* Interrupts are off on entry. */
SWAPGS
- /* We are about to clobber %rsp anyway, clobbering here is OK */
- SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+ pushq %rax
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+ popq %rax
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
@@ -104,6 +106,9 @@ SYM_FUNC_START(entry_SYSENTER_compat)
xorl %r14d, %r14d /* nospec r14 */
pushq $0 /* pt_regs->r15 = 0 */
xorl %r15d, %r15d /* nospec r15 */
+
+ UNWIND_HINT_REGS
+
cld
/*
@@ -129,17 +134,11 @@ SYM_FUNC_START(entry_SYSENTER_compat)
jnz .Lsysenter_fix_flags
.Lsysenter_flags_fixed:
- /*
- * User mode is traced as though IRQs are on, and SYSENTER
- * turned them off.
- */
- TRACE_IRQS_OFF
-
movq %rsp, %rdi
call do_fast_syscall_32
/* XEN PV guests always use IRET path */
- ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
- "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
+ ALTERNATIVE "testl %eax, %eax; jz swapgs_restore_regs_and_return_to_usermode", \
+ "jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
jmp sysret32_from_system_call
.Lsysenter_fix_flags:
@@ -147,7 +146,7 @@ SYM_FUNC_START(entry_SYSENTER_compat)
popfq
jmp .Lsysenter_flags_fixed
SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL)
-SYM_FUNC_END(entry_SYSENTER_compat)
+SYM_CODE_END(entry_SYSENTER_compat)
/*
* 32-bit SYSCALL entry.
@@ -197,6 +196,7 @@ SYM_FUNC_END(entry_SYSENTER_compat)
* 0(%esp) arg6
*/
SYM_CODE_START(entry_SYSCALL_compat)
+ UNWIND_HINT_EMPTY
/* Interrupts are off on entry. */
swapgs
@@ -247,17 +247,13 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
pushq $0 /* pt_regs->r15 = 0 */
xorl %r15d, %r15d /* nospec r15 */
- /*
- * User mode is traced as though IRQs are on, and SYSENTER
- * turned them off.
- */
- TRACE_IRQS_OFF
+ UNWIND_HINT_REGS
movq %rsp, %rdi
call do_fast_syscall_32
/* XEN PV guests always use IRET path */
- ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
- "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
+ ALTERNATIVE "testl %eax, %eax; jz swapgs_restore_regs_and_return_to_usermode", \
+ "jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
/* Opportunistic SYSRET */
sysret32_from_system_call:
@@ -266,7 +262,7 @@ sysret32_from_system_call:
* stack. So let's erase the thread stack right now.
*/
STACKLEAK_ERASE
- TRACE_IRQS_ON /* User mode traces as IRQs on. */
+
movq RBX(%rsp), %rbx /* pt_regs->rbx */
movq RBP(%rsp), %rbp /* pt_regs->rbp */
movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */
@@ -340,6 +336,7 @@ SYM_CODE_END(entry_SYSCALL_compat)
* ebp arg6
*/
SYM_CODE_START(entry_INT80_compat)
+ UNWIND_HINT_EMPTY
/*
* Interrupts are off on entry.
*/
@@ -361,8 +358,11 @@ SYM_CODE_START(entry_INT80_compat)
/* Need to switch before accessing the thread stack. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+
/* In the Xen PV case we already run on the thread stack. */
- ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
+ ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
+
+ movq %rsp, %rdi
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
pushq 6*8(%rdi) /* regs->ss */
@@ -401,19 +401,12 @@ SYM_CODE_START(entry_INT80_compat)
xorl %r14d, %r14d /* nospec r14 */
pushq %r15 /* pt_regs->r15 */
xorl %r15d, %r15d /* nospec r15 */
- cld
- /*
- * User mode is traced as though IRQs are on, and the interrupt
- * gate turned them off.
- */
- TRACE_IRQS_OFF
+ UNWIND_HINT_REGS
+
+ cld
movq %rsp, %rdi
call do_int80_syscall_32
-.Lsyscall_32_done:
-
- /* Go back to user mode. */
- TRACE_IRQS_ON
jmp swapgs_restore_regs_and_return_to_usermode
SYM_CODE_END(entry_INT80_compat)
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index dbe4493b534e..ccd32877a3c4 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -3,7 +3,6 @@
* Save registers before calling assembly functions. This avoids
* disturbance of register allocation in some inline assembly constructs.
* Copyright 2001,2002 by Andi Kleen, SuSE Labs.
- * Added trace_hardirqs callers - Copyright 2007 Steven Rostedt, Red Hat, Inc.
*/
#include <linux/linkage.h>
#include "calling.h"
@@ -37,15 +36,6 @@ SYM_FUNC_END(\name)
_ASM_NOKPROBE(\name)
.endm
-#ifdef CONFIG_TRACE_IRQFLAGS
- THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
- THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
-#endif
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- THUNK lockdep_sys_exit_thunk,lockdep_sys_exit
-#endif
-
#ifdef CONFIG_PREEMPTION
THUNK preempt_schedule_thunk, preempt_schedule
THUNK preempt_schedule_notrace_thunk, preempt_schedule_notrace
@@ -53,9 +43,7 @@ SYM_FUNC_END(\name)
EXPORT_SYMBOL(preempt_schedule_notrace_thunk)
#endif
-#if defined(CONFIG_TRACE_IRQFLAGS) \
- || defined(CONFIG_DEBUG_LOCK_ALLOC) \
- || defined(CONFIG_PREEMPTION)
+#ifdef CONFIG_PREEMPTION
SYM_CODE_START_LOCAL_NOALIGN(.L_restore)
popq %r11
popq %r10
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 54e03ab26ff3..04e65f0698f6 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -10,8 +10,11 @@ ARCH_REL_TYPE_ABS += R_386_GLOB_DAT|R_386_JMP_SLOT|R_386_RELATIVE
include $(srctree)/lib/vdso/Makefile
KBUILD_CFLAGS += $(DISABLE_LTO)
+
+# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
@@ -29,6 +32,9 @@ vobjs32-y += vdso32/vclock_gettime.o
# files to link into kernel
obj-y += vma.o
+KASAN_SANITIZE_vma.o := y
+UBSAN_SANITIZE_vma.o := y
+KCSAN_SANITIZE_vma.o := y
OBJECT_FILES_NON_STANDARD_vma.o := n
# vDSO images to build
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 43428cc514c8..ea7c1f0b79df 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -144,7 +144,7 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
@@ -154,7 +154,7 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
zap_page_range(vma, vma->vm_start, size);
}
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return 0;
}
#else
@@ -268,7 +268,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
unsigned long text_start;
int ret = 0;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
addr = get_unmapped_area(NULL, addr,
@@ -311,7 +311,7 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
}
up_fail:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
@@ -373,7 +373,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
/*
* Check if we have already mapped vdso blob - fail to prevent
* abusing from userspace install_speciall_mapping, which may
@@ -384,11 +384,11 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma_is_special_mapping(vma, &vdso_mapping) ||
vma_is_special_mapping(vma, &vvar_mapping)) {
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return -EEXIST;
}
}
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return map_vdso(image, addr);
}
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index ea2a3d151294..4103665c6e03 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2178,10 +2178,10 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
* userspace with CR4.PCE clear while another task is still
* doing on_each_cpu_mask() to propagate CR4.PCE.
*
- * For now, this can't happen because all callers hold mmap_sem
+ * For now, this can't happen because all callers hold mmap_lock
* for write. If this changes, we'll need a different solution.
*/
- lockdep_assert_held_write(&mm->mmap_sem);
+ mmap_assert_write_locked(mm);
if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
on_each_cpu_mask(mm_cpumask(mm), cr4_update_pce, NULL, 1);
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index e2137070386a..a54c6a401581 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -15,6 +15,7 @@
#include <asm/hypervisor.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
+#include <asm/idtentry.h>
#include <linux/version.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
@@ -152,15 +153,11 @@ static inline bool hv_reenlightenment_available(void)
ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT;
}
-__visible void __irq_entry hyperv_reenlightenment_intr(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment)
{
- entering_ack_irq();
-
+ ack_APIC_irq();
inc_irq_stat(irq_hv_reenlightenment_count);
-
schedule_delayed_work(&hv_reenlightenment_work, HZ/10);
-
- exiting_irq();
}
void set_hv_tscchange_cb(void (*cb)(void))
diff --git a/arch/x86/include/asm/acrn.h b/arch/x86/include/asm/acrn.h
deleted file mode 100644
index 4adb13f08af7..000000000000
--- a/arch/x86/include/asm/acrn.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_ACRN_H
-#define _ASM_X86_ACRN_H
-
-extern void acrn_hv_callback_vector(void);
-#ifdef CONFIG_TRACING
-#define trace_acrn_hv_callback_vector acrn_hv_callback_vector
-#endif
-
-extern void acrn_hv_vector_handler(struct pt_regs *regs);
-#endif /* _ASM_X86_ACRN_H */
diff --git a/arch/x86/include/asm/agp.h b/arch/x86/include/asm/agp.h
index 8e25bf4f323a..62da760d6d5a 100644
--- a/arch/x86/include/asm/agp.h
+++ b/arch/x86/include/asm/agp.h
@@ -2,7 +2,7 @@
#ifndef _ASM_X86_AGP_H
#define _ASM_X86_AGP_H
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
/*
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index c7df20e78b09..455066a06f60 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -57,6 +57,7 @@ struct threshold_bank {
/* initialized to the number of CPUs on the node sharing this bank */
refcount_t cpus;
+ unsigned int shared;
};
struct amd_northbridge {
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 19e94af9cc5d..2cc44e957c31 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -519,39 +519,6 @@ static inline bool apic_id_is_primary_thread(unsigned int id) { return false; }
static inline void apic_smt_update(void) { }
#endif
-extern void irq_enter(void);
-extern void irq_exit(void);
-
-static inline void entering_irq(void)
-{
- irq_enter();
- kvm_set_cpu_l1tf_flush_l1d();
-}
-
-static inline void entering_ack_irq(void)
-{
- entering_irq();
- ack_APIC_irq();
-}
-
-static inline void ipi_entering_ack_irq(void)
-{
- irq_enter();
- ack_APIC_irq();
- kvm_set_cpu_l1tf_flush_l1d();
-}
-
-static inline void exiting_irq(void)
-{
- irq_exit();
-}
-
-static inline void exiting_ack_irq(void)
-{
- ack_APIC_irq();
- irq_exit();
-}
-
extern void ioapic_zap_locks(void);
#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index 9bf2620ce817..5a42f9206138 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/ftrace.h>
#include <linux/uaccess.h>
+#include <linux/pgtable.h>
#include <asm/string.h>
#include <asm/page.h>
#include <asm/checksum.h>
#include <asm-generic/asm-prototypes.h>
-#include <asm/pgtable.h>
#include <asm/special_insns.h>
#include <asm/preempt.h>
#include <asm/asm.h>
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 115127c7ad28..bf35e476a776 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -28,7 +28,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
* Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
* it's non-inlined function that increases binary size and stack usage.
*/
- return READ_ONCE((v)->counter);
+ return __READ_ONCE((v)->counter);
}
/**
@@ -40,7 +40,7 @@ static __always_inline int arch_atomic_read(const atomic_t *v)
*/
static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
- WRITE_ONCE(v->counter, i);
+ __WRITE_ONCE(v->counter, i);
}
/**
@@ -166,6 +166,7 @@ static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{
return i + xadd(&v->counter, i);
}
+#define arch_atomic_add_return arch_atomic_add_return
/**
* arch_atomic_sub_return - subtract integer and return
@@ -178,34 +179,39 @@ static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
{
return arch_atomic_add_return(-i, v);
}
+#define arch_atomic_sub_return arch_atomic_sub_return
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return xadd(&v->counter, i);
}
+#define arch_atomic_fetch_add arch_atomic_fetch_add
static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
{
return xadd(&v->counter, -i);
}
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
return arch_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
return try_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
-static inline int arch_atomic_xchg(atomic_t *v, int new)
+static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
{
return arch_xchg(&v->counter, new);
}
+#define arch_atomic_xchg arch_atomic_xchg
-static inline void arch_atomic_and(int i, atomic_t *v)
+static __always_inline void arch_atomic_and(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "andl %1,%0"
: "+m" (v->counter)
@@ -213,7 +219,7 @@ static inline void arch_atomic_and(int i, atomic_t *v)
: "memory");
}
-static inline int arch_atomic_fetch_and(int i, atomic_t *v)
+static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
@@ -221,8 +227,9 @@ static inline int arch_atomic_fetch_and(int i, atomic_t *v)
return val;
}
+#define arch_atomic_fetch_and arch_atomic_fetch_and
-static inline void arch_atomic_or(int i, atomic_t *v)
+static __always_inline void arch_atomic_or(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "orl %1,%0"
: "+m" (v->counter)
@@ -230,7 +237,7 @@ static inline void arch_atomic_or(int i, atomic_t *v)
: "memory");
}
-static inline int arch_atomic_fetch_or(int i, atomic_t *v)
+static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
@@ -238,8 +245,9 @@ static inline int arch_atomic_fetch_or(int i, atomic_t *v)
return val;
}
+#define arch_atomic_fetch_or arch_atomic_fetch_or
-static inline void arch_atomic_xor(int i, atomic_t *v)
+static __always_inline void arch_atomic_xor(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "xorl %1,%0"
: "+m" (v->counter)
@@ -247,7 +255,7 @@ static inline void arch_atomic_xor(int i, atomic_t *v)
: "memory");
}
-static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
+static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
{
int val = arch_atomic_read(v);
@@ -255,6 +263,7 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
return val;
}
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
@@ -262,6 +271,6 @@ static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
# include <asm/atomic64_64.h>
#endif
-#include <asm-generic/atomic-instrumented.h>
+#define ARCH_ATOMIC
#endif /* _ASM_X86_ATOMIC_H */
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 52cfaecb13f9..5efd01b548d1 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -75,6 +75,7 @@ static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
return arch_cmpxchg64(&v->counter, o, n);
}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
/**
* arch_atomic64_xchg - xchg atomic64 variable
@@ -94,6 +95,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
: "memory");
return o;
}
+#define arch_atomic64_xchg arch_atomic64_xchg
/**
* arch_atomic64_set - set atomic64 variable
@@ -138,6 +140,7 @@ static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
+#define arch_atomic64_add_return arch_atomic64_add_return
/*
* Other variants with different arithmetic operators:
@@ -149,6 +152,7 @@ static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
ASM_NO_INPUT_CLOBBER("memory"));
return i;
}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
static inline s64 arch_atomic64_inc_return(atomic64_t *v)
{
@@ -242,6 +246,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
"S" (v) : "memory");
return (int)a;
}
+#define arch_atomic64_add_unless arch_atomic64_add_unless
static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{
@@ -281,6 +286,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
static inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
@@ -299,6 +305,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
@@ -317,6 +324,7 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
@@ -327,6 +335,7 @@ static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
return old;
}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 95c6ceac66b9..809bd010a751 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -19,7 +19,7 @@
*/
static inline s64 arch_atomic64_read(const atomic64_t *v)
{
- return READ_ONCE((v)->counter);
+ return __READ_ONCE((v)->counter);
}
/**
@@ -31,7 +31,7 @@ static inline s64 arch_atomic64_read(const atomic64_t *v)
*/
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
- WRITE_ONCE(v->counter, i);
+ __WRITE_ONCE(v->counter, i);
}
/**
@@ -159,37 +159,43 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
return i + xadd(&v->counter, i);
}
+#define arch_atomic64_add_return arch_atomic64_add_return
static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
{
return arch_atomic64_add_return(-i, v);
}
+#define arch_atomic64_sub_return arch_atomic64_sub_return
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
return xadd(&v->counter, i);
}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
{
return xadd(&v->counter, -i);
}
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
return arch_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
return try_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
{
return arch_xchg(&v->counter, new);
}
+#define arch_atomic64_xchg arch_atomic64_xchg
static inline void arch_atomic64_and(s64 i, atomic64_t *v)
{
@@ -207,6 +213,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
return val;
}
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
static inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
@@ -224,6 +231,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
return val;
}
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
@@ -241,5 +249,6 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
return val;
}
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 0367efdc5b7a..35460fef39b8 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -201,8 +201,12 @@ arch_test_and_change_bit(long nr, volatile unsigned long *addr)
return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
}
-static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
+static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
{
+ /*
+ * Because this is a plain access, we need to disable KCSAN here to
+ * avoid double instrumentation via instrumented bitops.
+ */
return ((1UL << (nr & (BITS_PER_LONG-1))) &
(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index facba9bc30ca..fb34ff641e0a 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -70,14 +70,17 @@ do { \
#define HAVE_ARCH_BUG
#define BUG() \
do { \
+ instrumentation_begin(); \
_BUG_FLAGS(ASM_UD2, 0); \
unreachable(); \
} while (0)
#define __WARN_FLAGS(flags) \
do { \
+ instrumentation_begin(); \
_BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
annotate_reachable(); \
+ instrumentation_end(); \
} while (0)
#include <asm-generic/bug.h>
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 63feaf2a5f93..b192d917a6d0 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_CACHEFLUSH_H
#define _ASM_X86_CACHEFLUSH_H
+#include <linux/mm.h>
+
/* Caches aren't brain-dead on the intel. */
#include <asm-generic/cacheflush.h>
#include <asm/special_insns.h>
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
index 02c0078d3787..8902fdb7de13 100644
--- a/arch/x86/include/asm/cpu_entry_area.h
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -11,15 +11,11 @@
#ifdef CONFIG_X86_64
/* Macro to enforce the same ordering and stack sizes */
-#define ESTACKS_MEMBERS(guardsize, db2_holesize)\
+#define ESTACKS_MEMBERS(guardsize) \
char DF_stack_guard[guardsize]; \
char DF_stack[EXCEPTION_STKSZ]; \
char NMI_stack_guard[guardsize]; \
char NMI_stack[EXCEPTION_STKSZ]; \
- char DB2_stack_guard[guardsize]; \
- char DB2_stack[db2_holesize]; \
- char DB1_stack_guard[guardsize]; \
- char DB1_stack[EXCEPTION_STKSZ]; \
char DB_stack_guard[guardsize]; \
char DB_stack[EXCEPTION_STKSZ]; \
char MCE_stack_guard[guardsize]; \
@@ -28,12 +24,12 @@
/* The exception stacks' physical storage. No guard pages required */
struct exception_stacks {
- ESTACKS_MEMBERS(0, 0)
+ ESTACKS_MEMBERS(0)
};
/* The effective cpu entry area mapping with guard pages. */
struct cea_exception_stacks {
- ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
+ ESTACKS_MEMBERS(PAGE_SIZE)
};
/*
@@ -42,8 +38,6 @@ struct cea_exception_stacks {
enum exception_stack_ordering {
ESTACK_DF,
ESTACK_NMI,
- ESTACK_DB2,
- ESTACK_DB1,
ESTACK_DB,
ESTACK_MCE,
N_EXCEPTION_STACKS
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index db189945e9b0..02dabc9e77b0 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -362,6 +362,7 @@
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_FSRM (18*32+ 4) /* Fast Short Rep Mov */
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
+#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
@@ -407,5 +408,6 @@
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
+#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 1a8609a15856..e89558a3fe4a 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -18,7 +18,7 @@ DECLARE_PER_CPU(unsigned long, cpu_dr7);
native_set_debugreg(register, value)
#endif
-static inline unsigned long native_get_debugreg(int regno)
+static __always_inline unsigned long native_get_debugreg(int regno)
{
unsigned long val = 0; /* Damn you, gcc! */
@@ -47,7 +47,7 @@ static inline unsigned long native_get_debugreg(int regno)
return val;
}
-static inline void native_set_debugreg(int regno, unsigned long value)
+static __always_inline void native_set_debugreg(int regno, unsigned long value)
{
switch (regno) {
case 0:
@@ -85,7 +85,7 @@ static inline void hw_breakpoint_disable(void)
set_debugreg(0UL, 3);
}
-static inline int hw_breakpoint_active(void)
+static __always_inline bool hw_breakpoint_active(void)
{
return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
}
@@ -94,24 +94,38 @@ extern void aout_dump_debugregs(struct user *dump);
extern void hw_breakpoint_restore(void);
-#ifdef CONFIG_X86_64
-DECLARE_PER_CPU(int, debug_stack_usage);
-static inline void debug_stack_usage_inc(void)
+static __always_inline unsigned long local_db_save(void)
{
- __this_cpu_inc(debug_stack_usage);
+ unsigned long dr7;
+
+ if (static_cpu_has(X86_FEATURE_HYPERVISOR) && !hw_breakpoint_active())
+ return 0;
+
+ get_debugreg(dr7, 7);
+ dr7 &= ~0x400; /* architecturally set bit */
+ if (dr7)
+ set_debugreg(0, 7);
+ /*
+ * Ensure the compiler doesn't lower the above statements into
+ * the critical section; disabling breakpoints late would not
+ * be good.
+ */
+ barrier();
+
+ return dr7;
}
-static inline void debug_stack_usage_dec(void)
+
+static __always_inline void local_db_restore(unsigned long dr7)
{
- __this_cpu_dec(debug_stack_usage);
+ /*
+ * Ensure the compiler doesn't raise this statement into
+ * the critical section; enabling breakpoints early would
+ * not be good.
+ */
+ barrier();
+ if (dr7)
+ set_debugreg(dr7, 7);
}
-void debug_stack_set_zero(void);
-void debug_stack_reset(void);
-#else /* !X86_64 */
-static inline void debug_stack_set_zero(void) { }
-static inline void debug_stack_reset(void) { }
-static inline void debug_stack_usage_inc(void) { }
-static inline void debug_stack_usage_dec(void) { }
-#endif /* X86_64 */
#ifdef CONFIG_CPU_SUP_AMD
extern void set_dr_addr_mask(unsigned long mask, int dr);
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 68a99d2a5f33..1ced11d31932 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -40,11 +40,6 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
desc->l = 0;
}
-extern struct desc_ptr idt_descr;
-extern gate_desc idt_table[];
-extern const struct desc_ptr debug_idt_descr;
-extern gate_desc debug_idt_table[];
-
struct gdt_page {
struct desc_struct gdt[GDT_ENTRIES];
} __attribute__((aligned(PAGE_SIZE)));
@@ -214,7 +209,7 @@ static inline void native_load_gdt(const struct desc_ptr *dtr)
asm volatile("lgdt %0"::"m" (*dtr));
}
-static inline void native_load_idt(const struct desc_ptr *dtr)
+static __always_inline void native_load_idt(const struct desc_ptr *dtr)
{
asm volatile("lidt %0"::"m" (*dtr));
}
@@ -386,64 +381,23 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
desc->limit1 = (limit >> 16) & 0xf;
}
-void update_intr_gate(unsigned int n, const void *addr);
void alloc_intr_gate(unsigned int n, const void *addr);
extern unsigned long system_vectors[];
-#ifdef CONFIG_X86_64
-DECLARE_PER_CPU(u32, debug_idt_ctr);
-static inline bool is_debug_idt_enabled(void)
-{
- if (this_cpu_read(debug_idt_ctr))
- return true;
-
- return false;
-}
-
-static inline void load_debug_idt(void)
-{
- load_idt((const struct desc_ptr *)&debug_idt_descr);
-}
-#else
-static inline bool is_debug_idt_enabled(void)
-{
- return false;
-}
-
-static inline void load_debug_idt(void)
-{
-}
-#endif
-
-/*
- * The load_current_idt() must be called with interrupts disabled
- * to avoid races. That way the IDT will always be set back to the expected
- * descriptor. It's also called when a CPU is being initialized, and
- * that doesn't need to disable interrupts, as nothing should be
- * bothering the CPU then.
- */
-static inline void load_current_idt(void)
-{
- if (is_debug_idt_enabled())
- load_debug_idt();
- else
- load_idt((const struct desc_ptr *)&idt_descr);
-}
-
+extern void load_current_idt(void);
extern void idt_setup_early_handler(void);
extern void idt_setup_early_traps(void);
extern void idt_setup_traps(void);
extern void idt_setup_apic_and_irq_gates(void);
+extern bool idt_is_f00f_address(unsigned long address);
#ifdef CONFIG_X86_64
extern void idt_setup_early_pf(void);
extern void idt_setup_ist_traps(void);
-extern void idt_setup_debugidt_traps(void);
#else
static inline void idt_setup_early_pf(void) { }
static inline void idt_setup_ist_traps(void) { }
-static inline void idt_setup_debugidt_traps(void) { }
#endif
extern void idt_invalidate(void *addr);
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 89dcc7aa7e2c..e7d2ccfdd507 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -3,13 +3,13 @@
#define _ASM_X86_EFI_H
#include <asm/fpu/api.h>
-#include <asm/pgtable.h>
#include <asm/processor-flags.h>
#include <asm/tlb.h>
#include <asm/nospec-branch.h>
#include <asm/mmu_context.h>
#include <linux/build_bug.h>
#include <linux/kernel.h>
+#include <linux/pgtable.h>
extern unsigned long efi_fw_vendor, efi_config_table;
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 69c0f892e310..452beed7892b 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -281,9 +281,29 @@ extern u32 elf_hwcap2;
/*
* An executable for which elf_read_implies_exec() returns TRUE will
* have the READ_IMPLIES_EXEC personality flag set automatically.
+ *
+ * The decision process for determining the results are:
+ *
+ *              CPU: | lacks NX*  | has NX, ia32     | has NX, x86_64 |
+ * ELF:              |            |                  |                |
+ * ---------------------|------------|------------------|----------------|
+ * missing PT_GNU_STACK | exec-all   | exec-all         | exec-none      |
+ * PT_GNU_STACK == RWX  | exec-stack | exec-stack       | exec-stack     |
+ * PT_GNU_STACK == RW   | exec-none  | exec-none        | exec-none      |
+ *
+ * exec-all : all PROT_READ user mappings are executable, except when
+ * backed by files on a noexec-filesystem.
+ * exec-none : only PROT_EXEC user mappings are executable.
+ * exec-stack: only the stack and PROT_EXEC user mappings are executable.
+ *
+ * *this column has no architectural effect: NX markings are ignored by
+ * hardware, but may have behavioral effects when "wants X" collides with
+ * "cannot be X" constraints in memory permission flags, as in
+ * https://lkml.kernel.org/r/20190418055759.GA3155@mellanox.com
+ *
*/
#define elf_read_implies_exec(ex, executable_stack) \
- (executable_stack != EXSTACK_DISABLE_X)
+ (mmap_is_ia32() && executable_stack == EXSTACK_DEFAULT)
struct task_struct;
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
deleted file mode 100644
index 416422762845..000000000000
--- a/arch/x86/include/asm/entry_arch.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file is designed to contain the BUILD_INTERRUPT specifications for
- * all of the extra named interrupt vectors used by the architecture.
- * Usually this is the Inter Process Interrupts (IPIs)
- */
-
-/*
- * The following vectors are part of the Linux architecture, there
- * is no hardware IRQ pin equivalent for them, they are triggered
- * through the ICC by us (IPIs)
- */
-#ifdef CONFIG_SMP
-BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
-BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
-BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
-BUILD_INTERRUPT(irq_move_cleanup_interrupt, IRQ_MOVE_CLEANUP_VECTOR)
-BUILD_INTERRUPT(reboot_interrupt, REBOOT_VECTOR)
-#endif
-
-#ifdef CONFIG_HAVE_KVM
-BUILD_INTERRUPT(kvm_posted_intr_ipi, POSTED_INTR_VECTOR)
-BUILD_INTERRUPT(kvm_posted_intr_wakeup_ipi, POSTED_INTR_WAKEUP_VECTOR)
-BUILD_INTERRUPT(kvm_posted_intr_nested_ipi, POSTED_INTR_NESTED_VECTOR)
-#endif
-
-/*
- * every pentium local APIC has two 'local interrupts', with a
- * soft-definable vector attached to both interrupts, one of
- * which is a timer interrupt, the other one is error counter
- * overflow. Linux uses the local APIC timer interrupt to get
- * a much simpler SMP time architecture:
- */
-#ifdef CONFIG_X86_LOCAL_APIC
-
-BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
-BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
-BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
-BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
-
-#ifdef CONFIG_IRQ_WORK
-BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR)
-#endif
-
-#ifdef CONFIG_X86_THERMAL_VECTOR
-BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
-#endif
-
-#ifdef CONFIG_X86_MCE_THRESHOLD
-BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR)
-#endif
-
-#ifdef CONFIG_X86_MCE_AMD
-BUILD_INTERRUPT(deferred_error_interrupt, DEFERRED_ERROR_VECTOR)
-#endif
-#endif
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 4154bc5f6a4e..74c12437401e 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -28,28 +28,6 @@
#include <asm/irq.h>
#include <asm/sections.h>
-/* Interrupt handlers registered during init_IRQ */
-extern asmlinkage void apic_timer_interrupt(void);
-extern asmlinkage void x86_platform_ipi(void);
-extern asmlinkage void kvm_posted_intr_ipi(void);
-extern asmlinkage void kvm_posted_intr_wakeup_ipi(void);
-extern asmlinkage void kvm_posted_intr_nested_ipi(void);
-extern asmlinkage void error_interrupt(void);
-extern asmlinkage void irq_work_interrupt(void);
-extern asmlinkage void uv_bau_message_intr1(void);
-
-extern asmlinkage void spurious_interrupt(void);
-extern asmlinkage void thermal_interrupt(void);
-extern asmlinkage void reschedule_interrupt(void);
-
-extern asmlinkage void irq_move_cleanup_interrupt(void);
-extern asmlinkage void reboot_interrupt(void);
-extern asmlinkage void threshold_interrupt(void);
-extern asmlinkage void deferred_error_interrupt(void);
-
-extern asmlinkage void call_function_interrupt(void);
-extern asmlinkage void call_function_single_interrupt(void);
-
#ifdef CONFIG_X86_LOCAL_APIC
struct irq_data;
struct pci_dev;
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
new file mode 100644
index 000000000000..cf51c50eb356
--- /dev/null
+++ b/arch/x86/include/asm/idtentry.h
@@ -0,0 +1,652 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_IDTENTRY_H
+#define _ASM_X86_IDTENTRY_H
+
+/* Interrupts/Exceptions */
+#include <asm/trapnr.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/hardirq.h>
+
+#include <asm/irq_stack.h>
+
+void idtentry_enter_user(struct pt_regs *regs);
+void idtentry_exit_user(struct pt_regs *regs);
+
+bool idtentry_enter_cond_rcu(struct pt_regs *regs);
+void idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit);
+
+/**
+ * DECLARE_IDTENTRY - Declare functions for simple IDT entry points
+ * No error code pushed by hardware
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Declares three functions:
+ * - The ASM entry point: asm_##func
+ * - The XEN PV trap entry point: xen_##func (maybe unused)
+ * - The C handler called from the ASM entry point
+ *
+ * Note: This is the C variant of DECLARE_IDTENTRY(). As the name says it
+ * declares the entry points for usage in C code. There is an ASM variant
+ * as well which is used to emit the entry stubs in entry_32/64.S.
+ */
+#define DECLARE_IDTENTRY(vector, func) \
+ asmlinkage void asm_##func(void); \
+ asmlinkage void xen_asm_##func(void); \
+ __visible void func(struct pt_regs *regs)
+
+/**
+ * DEFINE_IDTENTRY - Emit code for simple IDT entry points
+ * @func: Function name of the entry point
+ *
+ * @func is called from ASM entry code with interrupts disabled.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ *
+ * idtentry_enter() contains common code which has to be invoked before
+ * arbitrary code in the body. idtentry_exit() contains common code
+ * which has to run before returning to the low level assembly code.
+ */
+#define DEFINE_IDTENTRY(func) \
+static __always_inline void __##func(struct pt_regs *regs); \
+ \
+__visible noinstr void func(struct pt_regs *regs) \
+{ \
+ bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ \
+ instrumentation_begin(); \
+ __##func (regs); \
+ instrumentation_end(); \
+ idtentry_exit_cond_rcu(regs, rcu_exit); \
+} \
+ \
+static __always_inline void __##func(struct pt_regs *regs)
+
+/* Special case for 32bit IRET 'trap' */
+#define DECLARE_IDTENTRY_SW DECLARE_IDTENTRY
+#define DEFINE_IDTENTRY_SW DEFINE_IDTENTRY
+
+/**
+ * DECLARE_IDTENTRY_ERRORCODE - Declare functions for simple IDT entry points
+ * Error code pushed by hardware
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Declares three functions:
+ * - The ASM entry point: asm_##func
+ * - The XEN PV trap entry point: xen_##func (maybe unused)
+ * - The C handler called from the ASM entry point
+ *
+ * Same as DECLARE_IDTENTRY, but has an extra error_code argument for the
+ * C-handler.
+ */
+#define DECLARE_IDTENTRY_ERRORCODE(vector, func) \
+ asmlinkage void asm_##func(void); \
+ asmlinkage void xen_asm_##func(void); \
+ __visible void func(struct pt_regs *regs, unsigned long error_code)
+
+/**
+ * DEFINE_IDTENTRY_ERRORCODE - Emit code for simple IDT entry points
+ * Error code pushed by hardware
+ * @func: Function name of the entry point
+ *
+ * Same as DEFINE_IDTENTRY, but has an extra error_code argument
+ */
+#define DEFINE_IDTENTRY_ERRORCODE(func) \
+static __always_inline void __##func(struct pt_regs *regs, \
+ unsigned long error_code); \
+ \
+__visible noinstr void func(struct pt_regs *regs, \
+ unsigned long error_code) \
+{ \
+ bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ \
+ instrumentation_begin(); \
+ __##func (regs, error_code); \
+ instrumentation_end(); \
+ idtentry_exit_cond_rcu(regs, rcu_exit); \
+} \
+ \
+static __always_inline void __##func(struct pt_regs *regs, \
+ unsigned long error_code)
+
+/**
+ * DECLARE_IDTENTRY_RAW - Declare functions for raw IDT entry points
+ * No error code pushed by hardware
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Maps to DECLARE_IDTENTRY().
+ */
+#define DECLARE_IDTENTRY_RAW(vector, func) \
+ DECLARE_IDTENTRY(vector, func)
+
+/**
+ * DEFINE_IDTENTRY_RAW - Emit code for raw IDT entry points
+ * @func: Function name of the entry point
+ *
+ * @func is called from ASM entry code with interrupts disabled.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ *
+ * Contrary to DEFINE_IDTENTRY() this does not invoke the
+ * idtentry_enter/exit() helpers before and after the body invocation. This
+ * needs to be done in the body itself if applicable. Use if extra work
+ * is required before the enter/exit() helpers are invoked.
+ */
+#define DEFINE_IDTENTRY_RAW(func) \
+__visible noinstr void func(struct pt_regs *regs)
+
+/**
+ * DECLARE_IDTENTRY_RAW_ERRORCODE - Declare functions for raw IDT entry points
+ * Error code pushed by hardware
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Maps to DECLARE_IDTENTRY_ERRORCODE()
+ */
+#define DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func) \
+ DECLARE_IDTENTRY_ERRORCODE(vector, func)
+
+/**
+ * DEFINE_IDTENTRY_RAW_ERRORCODE - Emit code for raw IDT entry points
+ * @func: Function name of the entry point
+ *
+ * @func is called from ASM entry code with interrupts disabled.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ *
+ * Contrary to DEFINE_IDTENTRY_ERRORCODE() this does not invoke the
+ * idtentry_enter/exit() helpers before and after the body invocation. This
+ * needs to be done in the body itself if applicable. Use if extra work
+ * is required before the enter/exit() helpers are invoked.
+ */
+#define DEFINE_IDTENTRY_RAW_ERRORCODE(func) \
+__visible noinstr void func(struct pt_regs *regs, unsigned long error_code)
+
+/**
+ * DECLARE_IDTENTRY_IRQ - Declare functions for device interrupt IDT entry
+ * points (common/spurious)
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Maps to DECLARE_IDTENTRY_ERRORCODE()
+ */
+#define DECLARE_IDTENTRY_IRQ(vector, func) \
+ DECLARE_IDTENTRY_ERRORCODE(vector, func)
+
+/**
+ * DEFINE_IDTENTRY_IRQ - Emit code for device interrupt IDT entry points
+ * @func: Function name of the entry point
+ *
+ * The vector number is pushed by the low level entry stub and handed
+ * to the function as error_code argument which needs to be truncated
+ * to an u8 because the push is sign extending.
+ *
+ * On 64-bit idtentry_enter/exit() are invoked in the ASM entry code before
+ * and after switching to the interrupt stack. On 32-bit this happens in C.
+ *
+ * irq_enter/exit_rcu() are invoked before the function body and the
+ * KVM L1D flush request is set.
+ */
+#define DEFINE_IDTENTRY_IRQ(func) \
+static __always_inline void __##func(struct pt_regs *regs, u8 vector); \
+ \
+__visible noinstr void func(struct pt_regs *regs, \
+ unsigned long error_code) \
+{ \
+ bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ \
+ instrumentation_begin(); \
+ irq_enter_rcu(); \
+ kvm_set_cpu_l1tf_flush_l1d(); \
+ __##func (regs, (u8)error_code); \
+ irq_exit_rcu(); \
+ instrumentation_end(); \
+ idtentry_exit_cond_rcu(regs, rcu_exit); \
+} \
+ \
+static __always_inline void __##func(struct pt_regs *regs, u8 vector)
+
+/**
+ * DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Declares three functions:
+ * - The ASM entry point: asm_##func
+ * - The XEN PV trap entry point: xen_##func (maybe unused)
+ * - The C handler called from the ASM entry point
+ *
+ * Maps to DECLARE_IDTENTRY().
+ */
+#define DECLARE_IDTENTRY_SYSVEC(vector, func) \
+ DECLARE_IDTENTRY(vector, func)
+
+/**
+ * DEFINE_IDTENTRY_SYSVEC - Emit code for system vector IDT entry points
+ * @func: Function name of the entry point
+ *
+ * idtentry_enter/exit() and irq_enter/exit_rcu() are invoked before the
+ * function body. KVM L1D flush request is set.
+ *
+ * Runs the function on the interrupt stack if the entry hit kernel mode
+ */
+#define DEFINE_IDTENTRY_SYSVEC(func) \
+static void __##func(struct pt_regs *regs); \
+ \
+__visible noinstr void func(struct pt_regs *regs) \
+{ \
+ bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ \
+ instrumentation_begin(); \
+ irq_enter_rcu(); \
+ kvm_set_cpu_l1tf_flush_l1d(); \
+ run_on_irqstack_cond(__##func, regs, regs); \
+ irq_exit_rcu(); \
+ instrumentation_end(); \
+ idtentry_exit_cond_rcu(regs, rcu_exit); \
+} \
+ \
+static noinline void __##func(struct pt_regs *regs)
+
+/**
+ * DEFINE_IDTENTRY_SYSVEC_SIMPLE - Emit code for simple system vector IDT
+ * entry points
+ * @func: Function name of the entry point
+ *
+ * Runs the function on the interrupted stack. No switch to IRQ stack and
+ * only the minimal __irq_enter/exit() handling.
+ *
+ * Only use for 'empty' vectors like reschedule IPI and KVM posted
+ * interrupt vectors.
+ */
+#define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) \
+static __always_inline void __##func(struct pt_regs *regs); \
+ \
+__visible noinstr void func(struct pt_regs *regs) \
+{ \
+ bool rcu_exit = idtentry_enter_cond_rcu(regs); \
+ \
+ instrumentation_begin(); \
+ __irq_enter_raw(); \
+ kvm_set_cpu_l1tf_flush_l1d(); \
+ __##func (regs); \
+ __irq_exit_raw(); \
+ instrumentation_end(); \
+ idtentry_exit_cond_rcu(regs, rcu_exit); \
+} \
+ \
+static __always_inline void __##func(struct pt_regs *regs)
+
+/**
+ * DECLARE_IDTENTRY_XENCB - Declare functions for XEN HV callback entry point
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Declares three functions:
+ * - The ASM entry point: asm_##func
+ * - The XEN PV trap entry point: xen_##func (maybe unused)
+ * - The C handler called from the ASM entry point
+ *
+ * Maps to DECLARE_IDTENTRY(). Distinct entry point to handle the 32/64-bit
+ * difference
+ */
+#define DECLARE_IDTENTRY_XENCB(vector, func) \
+ DECLARE_IDTENTRY(vector, func)
+
+#ifdef CONFIG_X86_64
+/**
+ * DECLARE_IDTENTRY_IST - Declare functions for IST handling IDT entry points
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Maps to DECLARE_IDTENTRY_RAW, but declares also the NOIST C handler
+ * which is called from the ASM entry point on user mode entry
+ */
+#define DECLARE_IDTENTRY_IST(vector, func) \
+ DECLARE_IDTENTRY_RAW(vector, func); \
+ __visible void noist_##func(struct pt_regs *regs)
+
+/**
+ * DEFINE_IDTENTRY_IST - Emit code for IST entry points
+ * @func: Function name of the entry point
+ *
+ * Maps to DEFINE_IDTENTRY_RAW
+ */
+#define DEFINE_IDTENTRY_IST(func) \
+ DEFINE_IDTENTRY_RAW(func)
+
+/**
+ * DEFINE_IDTENTRY_NOIST - Emit code for NOIST entry points which
+ * belong to a IST entry point (MCE, DB)
+ * @func: Function name of the entry point. Must be the same as
+ * the function name of the corresponding IST variant
+ *
+ * Maps to DEFINE_IDTENTRY_RAW().
+ */
+#define DEFINE_IDTENTRY_NOIST(func) \
+ DEFINE_IDTENTRY_RAW(noist_##func)
+
+/**
+ * DECLARE_IDTENTRY_DF - Declare functions for double fault
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Maps to DECLARE_IDTENTRY_RAW_ERRORCODE
+ */
+#define DECLARE_IDTENTRY_DF(vector, func) \
+ DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func)
+
+/**
+ * DEFINE_IDTENTRY_DF - Emit code for double fault
+ * @func: Function name of the entry point
+ *
+ * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
+ */
+#define DEFINE_IDTENTRY_DF(func) \
+ DEFINE_IDTENTRY_RAW_ERRORCODE(func)
+
+#else /* CONFIG_X86_64 */
+
+/* Maps to a regular IDTENTRY on 32bit for now */
+# define DECLARE_IDTENTRY_IST DECLARE_IDTENTRY
+# define DEFINE_IDTENTRY_IST DEFINE_IDTENTRY
+
+/**
+ * DECLARE_IDTENTRY_DF - Declare functions for double fault 32bit variant
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Declares two functions:
+ * - The ASM entry point: asm_##func
+ * - The C handler called from the C shim
+ */
+#define DECLARE_IDTENTRY_DF(vector, func) \
+ asmlinkage void asm_##func(void); \
+ __visible void func(struct pt_regs *regs, \
+ unsigned long error_code, \
+ unsigned long address)
+
+/**
+ * DEFINE_IDTENTRY_DF - Emit code for double fault on 32bit
+ * @func: Function name of the entry point
+ *
+ * This is called through the doublefault shim which already provides
+ * cr2 in the address argument.
+ */
+#define DEFINE_IDTENTRY_DF(func) \
+__visible noinstr void func(struct pt_regs *regs, \
+ unsigned long error_code, \
+ unsigned long address)
+
+#endif /* !CONFIG_X86_64 */
+
+/* C-Code mapping */
+#define DECLARE_IDTENTRY_MCE DECLARE_IDTENTRY_IST
+#define DEFINE_IDTENTRY_MCE DEFINE_IDTENTRY_IST
+#define DEFINE_IDTENTRY_MCE_USER DEFINE_IDTENTRY_NOIST
+
+#define DECLARE_IDTENTRY_NMI DECLARE_IDTENTRY_RAW
+#define DEFINE_IDTENTRY_NMI DEFINE_IDTENTRY_RAW
+
+#define DECLARE_IDTENTRY_DEBUG DECLARE_IDTENTRY_IST
+#define DEFINE_IDTENTRY_DEBUG DEFINE_IDTENTRY_IST
+#define DEFINE_IDTENTRY_DEBUG_USER DEFINE_IDTENTRY_NOIST
+
+/**
+ * DECLARE_IDTENTRY_XEN - Declare functions for XEN redirect IDT entry points
+ * @vector: Vector number (ignored for C)
+ * @func: Function name of the entry point
+ *
+ * Used for xennmi and xendebug redirections. No DEFINE as this is all ASM
+ * indirection magic.
+ */
+#define DECLARE_IDTENTRY_XEN(vector, func) \
+ asmlinkage void xen_asm_exc_xen##func(void); \
+ asmlinkage void asm_exc_xen##func(void)
+
+#else /* !__ASSEMBLY__ */
+
+/*
+ * The ASM variants for DECLARE_IDTENTRY*() which emit the ASM entry stubs.
+ */
+#define DECLARE_IDTENTRY(vector, func) \
+ idtentry vector asm_##func func has_error_code=0
+
+#define DECLARE_IDTENTRY_ERRORCODE(vector, func) \
+ idtentry vector asm_##func func has_error_code=1
+
+/* Special case for 32bit IRET 'trap'. Do not emit ASM code */
+#define DECLARE_IDTENTRY_SW(vector, func)
+
+#define DECLARE_IDTENTRY_RAW(vector, func) \
+ DECLARE_IDTENTRY(vector, func)
+
+#define DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func) \
+ DECLARE_IDTENTRY_ERRORCODE(vector, func)
+
+/* Entries for common/spurious (device) interrupts */
+#define DECLARE_IDTENTRY_IRQ(vector, func) \
+ idtentry_irq vector func
+
+/* System vector entries */
+#define DECLARE_IDTENTRY_SYSVEC(vector, func) \
+ idtentry_sysvec vector func
+
+#ifdef CONFIG_X86_64
+# define DECLARE_IDTENTRY_MCE(vector, func) \
+ idtentry_mce_db vector asm_##func func
+
+# define DECLARE_IDTENTRY_DEBUG(vector, func) \
+ idtentry_mce_db vector asm_##func func
+
+# define DECLARE_IDTENTRY_DF(vector, func) \
+ idtentry_df vector asm_##func func
+
+# define DECLARE_IDTENTRY_XENCB(vector, func) \
+ DECLARE_IDTENTRY(vector, func)
+
+#else
+# define DECLARE_IDTENTRY_MCE(vector, func) \
+ DECLARE_IDTENTRY(vector, func)
+
+# define DECLARE_IDTENTRY_DEBUG(vector, func) \
+ DECLARE_IDTENTRY(vector, func)
+
+/* No ASM emitted for DF as this goes through a C shim */
+# define DECLARE_IDTENTRY_DF(vector, func)
+
+/* No ASM emitted for XEN hypervisor callback */
+# define DECLARE_IDTENTRY_XENCB(vector, func)
+
+#endif
+
+/* No ASM code emitted for NMI */
+#define DECLARE_IDTENTRY_NMI(vector, func)
+
+/* XEN NMI and DB wrapper */
+#define DECLARE_IDTENTRY_XEN(vector, func) \
+ idtentry vector asm_exc_xen##func exc_##func has_error_code=0
+
+/*
+ * ASM code to emit the common vector entry stubs where each stub is
+ * packed into 8 bytes.
+ *
+ * Note, that the 'pushq imm8' is emitted via '.byte 0x6a, vector' because
+ * GCC treats the local vector variable as unsigned int and would expand
+ * all vectors above 0x7F to a 5 byte push. The original code did an
+ * adjustment of the vector number to be in the signed byte range to avoid
+ * this. While clever it's mindboggling counterintuitive and requires the
+ * odd conversion back to a real vector number in the C entry points. Using
+ * .byte achieves the same thing and the only fixup needed in the C entry
+ * point is to mask off the bits above bit 7 because the push is sign
+ * extending.
+ */
+ .align 8
+SYM_CODE_START(irq_entries_start)
+ vector=FIRST_EXTERNAL_VECTOR
+ pos = .
+ .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+ UNWIND_HINT_IRET_REGS
+ .byte 0x6a, vector
+ jmp asm_common_interrupt
+ nop
+ /* Ensure that the above is 8 bytes max */
+ . = pos + 8
+ pos=pos+8
+ vector=vector+1
+ .endr
+SYM_CODE_END(irq_entries_start)
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ .align 8
+SYM_CODE_START(spurious_entries_start)
+ vector=FIRST_SYSTEM_VECTOR
+ pos = .
+ .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
+ UNWIND_HINT_IRET_REGS
+ .byte 0x6a, vector
+ jmp asm_spurious_interrupt
+ nop
+ /* Ensure that the above is 8 bytes max */
+ . = pos + 8
+ pos=pos+8
+ vector=vector+1
+ .endr
+SYM_CODE_END(spurious_entries_start)
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The actual entry points. Note that DECLARE_IDTENTRY*() serves two
+ * purposes:
+ * - provide the function declarations when included from C-Code
+ * - emit the ASM stubs when included from entry_32/64.S
+ *
+ * This avoids duplicate defines and ensures that everything is consistent.
+ */
+
+/*
+ * Dummy trap number so the low level ASM macro vector number checks do not
+ * match which results in emitting plain IDTENTRY stubs without bells and
+ * whistels.
+ */
+#define X86_TRAP_OTHER 0xFFFF
+
+/* Simple exception entry points. No hardware error code */
+DECLARE_IDTENTRY(X86_TRAP_DE, exc_divide_error);
+DECLARE_IDTENTRY(X86_TRAP_OF, exc_overflow);
+DECLARE_IDTENTRY(X86_TRAP_BR, exc_bounds);
+DECLARE_IDTENTRY(X86_TRAP_NM, exc_device_not_available);
+DECLARE_IDTENTRY(X86_TRAP_OLD_MF, exc_coproc_segment_overrun);
+DECLARE_IDTENTRY(X86_TRAP_SPURIOUS, exc_spurious_interrupt_bug);
+DECLARE_IDTENTRY(X86_TRAP_MF, exc_coprocessor_error);
+DECLARE_IDTENTRY(X86_TRAP_XF, exc_simd_coprocessor_error);
+
+/* 32bit software IRET trap. Do not emit ASM code */
+DECLARE_IDTENTRY_SW(X86_TRAP_IRET, iret_error);
+
+/* Simple exception entries with error code pushed by hardware */
+DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_TS, exc_invalid_tss);
+DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_NP, exc_segment_not_present);
+DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_SS, exc_stack_segment);
+DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_GP, exc_general_protection);
+DECLARE_IDTENTRY_ERRORCODE(X86_TRAP_AC, exc_alignment_check);
+
+/* Raw exception entries which need extra work */
+DECLARE_IDTENTRY_RAW(X86_TRAP_UD, exc_invalid_op);
+DECLARE_IDTENTRY_RAW(X86_TRAP_BP, exc_int3);
+DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF, exc_page_fault);
+
+#ifdef CONFIG_X86_MCE
+DECLARE_IDTENTRY_MCE(X86_TRAP_MC, exc_machine_check);
+#endif
+
+/* NMI */
+DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi);
+DECLARE_IDTENTRY_XEN(X86_TRAP_NMI, nmi);
+
+/* #DB */
+DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB, exc_debug);
+DECLARE_IDTENTRY_XEN(X86_TRAP_DB, debug);
+
+/* #DF */
+DECLARE_IDTENTRY_DF(X86_TRAP_DF, exc_double_fault);
+
+#ifdef CONFIG_XEN_PV
+DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback);
+#endif
+
+/* Device interrupts common/spurious */
+DECLARE_IDTENTRY_IRQ(X86_TRAP_OTHER, common_interrupt);
+#ifdef CONFIG_X86_LOCAL_APIC
+DECLARE_IDTENTRY_IRQ(X86_TRAP_OTHER, spurious_interrupt);
+#endif
+
+/* System vector entry points */
+#ifdef CONFIG_X86_LOCAL_APIC
+DECLARE_IDTENTRY_SYSVEC(ERROR_APIC_VECTOR, sysvec_error_interrupt);
+DECLARE_IDTENTRY_SYSVEC(SPURIOUS_APIC_VECTOR, sysvec_spurious_apic_interrupt);
+DECLARE_IDTENTRY_SYSVEC(LOCAL_TIMER_VECTOR, sysvec_apic_timer_interrupt);
+DECLARE_IDTENTRY_SYSVEC(X86_PLATFORM_IPI_VECTOR, sysvec_x86_platform_ipi);
+#endif
+
+#ifdef CONFIG_SMP
+DECLARE_IDTENTRY(RESCHEDULE_VECTOR, sysvec_reschedule_ipi);
+DECLARE_IDTENTRY_SYSVEC(IRQ_MOVE_CLEANUP_VECTOR, sysvec_irq_move_cleanup);
+DECLARE_IDTENTRY_SYSVEC(REBOOT_VECTOR, sysvec_reboot);
+DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_SINGLE_VECTOR, sysvec_call_function_single);
+DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR, sysvec_call_function);
+#endif
+
+#ifdef CONFIG_X86_LOCAL_APIC
+# ifdef CONFIG_X86_UV
+DECLARE_IDTENTRY_SYSVEC(UV_BAU_MESSAGE, sysvec_uv_bau_message);
+# endif
+
+# ifdef CONFIG_X86_MCE_THRESHOLD
+DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR, sysvec_threshold);
+# endif
+
+# ifdef CONFIG_X86_MCE_AMD
+DECLARE_IDTENTRY_SYSVEC(DEFERRED_ERROR_VECTOR, sysvec_deferred_error);
+# endif
+
+# ifdef CONFIG_X86_THERMAL_VECTOR
+DECLARE_IDTENTRY_SYSVEC(THERMAL_APIC_VECTOR, sysvec_thermal);
+# endif
+
+# ifdef CONFIG_IRQ_WORK
+DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work);
+# endif
+#endif
+
+#ifdef CONFIG_HAVE_KVM
+DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR, sysvec_kvm_posted_intr_ipi);
+DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR, sysvec_kvm_posted_intr_wakeup_ipi);
+DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested_ipi);
+#endif
+
+#if IS_ENABLED(CONFIG_HYPERV)
+DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback);
+DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment);
+DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_STIMER0_VECTOR, sysvec_hyperv_stimer0);
+#endif
+
+#if IS_ENABLED(CONFIG_ACRN_GUEST)
+DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_acrn_hv_callback);
+#endif
+
+#ifdef CONFIG_XEN_PVHVM
+DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_xen_hvm_callback);
+#endif
+
+#undef X86_TRAP_OTHER
+
+#endif
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 8f1e94f29a16..a338a6deb950 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -89,6 +89,8 @@
#define INTEL_FAM6_COMETLAKE 0xA5
#define INTEL_FAM6_COMETLAKE_L 0xA6
+#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F
+
/* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index 2a7b3211ee7a..bacf68c4d70e 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -10,7 +10,6 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
void __iomem *
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 72fba0eeeb30..528c8a71fe7f 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -11,6 +11,13 @@
#include <asm/apicdef.h>
#include <asm/irq_vectors.h>
+/*
+ * The irq entry code is in the noinstr section and the start/end of
+ * __irqentry_text is emitted via labels. Make the build fail if
+ * something moves a C function into the __irq_entry section.
+ */
+#define __irq_entry __invalid_section
+
static inline int irq_canonicalize(int irq)
{
return ((irq == 2) ? 9 : irq);
@@ -26,17 +33,14 @@ extern void fixup_irqs(void);
#ifdef CONFIG_HAVE_KVM
extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
-extern __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs);
-extern __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs);
-extern __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs);
#endif
extern void (*x86_platform_ipi_callback)(void);
extern void native_init_IRQ(void);
-extern void handle_irq(struct irq_desc *desc, struct pt_regs *regs);
+extern void __handle_irq(struct irq_desc *desc, struct pt_regs *regs);
-extern __visible void do_IRQ(struct pt_regs *regs);
+extern __visible void do_IRQ(struct pt_regs *regs, unsigned long vector);
extern void init_ISA_irqs(void);
@@ -46,7 +50,6 @@ extern void __init init_IRQ(void);
void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
bool exclude_self);
-extern __visible void smp_x86_platform_ipi(struct pt_regs *regs);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
diff --git a/arch/x86/include/asm/irq_regs.h b/arch/x86/include/asm/irq_regs.h
deleted file mode 100644
index 187ce59aea28..000000000000
--- a/arch/x86/include/asm/irq_regs.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Per-cpu current frame pointer - the location of the last exception frame on
- * the stack, stored in the per-cpu area.
- *
- * Jeremy Fitzhardinge <jeremy@goop.org>
- */
-#ifndef _ASM_X86_IRQ_REGS_H
-#define _ASM_X86_IRQ_REGS_H
-
-#include <asm/percpu.h>
-
-#define ARCH_HAS_OWN_IRQ_REGS
-
-DECLARE_PER_CPU(struct pt_regs *, irq_regs);
-
-static inline struct pt_regs *get_irq_regs(void)
-{
- return __this_cpu_read(irq_regs);
-}
-
-static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
-{
- struct pt_regs *old_regs;
-
- old_regs = get_irq_regs();
- __this_cpu_write(irq_regs, new_regs);
-
- return old_regs;
-}
-
-#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
new file mode 100644
index 000000000000..4ae66f097101
--- /dev/null
+++ b/arch/x86/include/asm/irq_stack.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_IRQ_STACK_H
+#define _ASM_X86_IRQ_STACK_H
+
+#include <linux/ptrace.h>
+
+#include <asm/processor.h>
+
+#ifdef CONFIG_X86_64
+static __always_inline bool irqstack_active(void)
+{
+ return __this_cpu_read(irq_count) != -1;
+}
+
+void asm_call_on_stack(void *sp, void *func, void *arg);
+
+static __always_inline void __run_on_irqstack(void *func, void *arg)
+{
+ void *tos = __this_cpu_read(hardirq_stack_ptr);
+
+ __this_cpu_add(irq_count, 1);
+ asm_call_on_stack(tos - 8, func, arg);
+ __this_cpu_sub(irq_count, 1);
+}
+
+#else /* CONFIG_X86_64 */
+static inline bool irqstack_active(void) { return false; }
+static inline void __run_on_irqstack(void *func, void *arg) { }
+#endif /* !CONFIG_X86_64 */
+
+static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_X86_32))
+ return false;
+ if (!regs)
+ return !irqstack_active();
+ return !user_mode(regs) && !irqstack_active();
+}
+
+static __always_inline void run_on_irqstack_cond(void *func, void *arg,
+ struct pt_regs *regs)
+{
+ void (*__func)(void *arg) = func;
+
+ lockdep_assert_irqs_disabled();
+
+ if (irq_needs_irq_stack(regs))
+ __run_on_irqstack(__func, arg);
+ else
+ __func(arg);
+}
+
+#endif
diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h
index 80b35e3adf03..800ffce0db29 100644
--- a/arch/x86/include/asm/irq_work.h
+++ b/arch/x86/include/asm/irq_work.h
@@ -10,7 +10,6 @@ static inline bool arch_irq_work_has_interrupt(void)
return boot_cpu_has(X86_FEATURE_APIC);
}
extern void arch_irq_work_raise(void);
-extern __visible void smp_irq_work_interrupt(struct pt_regs *regs);
#else
static inline bool arch_irq_work_has_interrupt(void)
{
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 8a0e56e1dcc9..02a0cf547d7b 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -17,7 +17,7 @@
/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
extern inline unsigned long native_save_fl(void);
-extern inline unsigned long native_save_fl(void)
+extern __always_inline unsigned long native_save_fl(void)
{
unsigned long flags;
@@ -44,12 +44,12 @@ extern inline void native_restore_fl(unsigned long flags)
:"memory", "cc");
}
-static inline void native_irq_disable(void)
+static __always_inline void native_irq_disable(void)
{
asm volatile("cli": : :"memory");
}
-static inline void native_irq_enable(void)
+static __always_inline void native_irq_enable(void)
{
asm volatile("sti": : :"memory");
}
@@ -74,22 +74,22 @@ static inline __cpuidle void native_halt(void)
#ifndef __ASSEMBLY__
#include <linux/types.h>
-static inline notrace unsigned long arch_local_save_flags(void)
+static __always_inline unsigned long arch_local_save_flags(void)
{
return native_save_fl();
}
-static inline notrace void arch_local_irq_restore(unsigned long flags)
+static __always_inline void arch_local_irq_restore(unsigned long flags)
{
native_restore_fl(flags);
}
-static inline notrace void arch_local_irq_disable(void)
+static __always_inline void arch_local_irq_disable(void)
{
native_irq_disable();
}
-static inline notrace void arch_local_irq_enable(void)
+static __always_inline void arch_local_irq_enable(void)
{
native_irq_enable();
}
@@ -115,7 +115,7 @@ static inline __cpuidle void halt(void)
/*
* For spinlocks, etc:
*/
-static inline notrace unsigned long arch_local_irq_save(void)
+static __always_inline unsigned long arch_local_irq_save(void)
{
unsigned long flags = arch_local_save_flags();
arch_local_irq_disable();
@@ -159,12 +159,12 @@ static inline notrace unsigned long arch_local_irq_save(void)
#endif /* CONFIG_PARAVIRT_XXL */
#ifndef __ASSEMBLY__
-static inline int arch_irqs_disabled_flags(unsigned long flags)
+static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & X86_EFLAGS_IF);
}
-static inline int arch_irqs_disabled(void)
+static __always_inline int arch_irqs_disabled(void)
{
unsigned long flags = arch_local_save_flags();
@@ -172,38 +172,4 @@ static inline int arch_irqs_disabled(void)
}
#endif /* !__ASSEMBLY__ */
-#ifdef __ASSEMBLY__
-#ifdef CONFIG_TRACE_IRQFLAGS
-# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
-# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
-#else
-# define TRACE_IRQS_ON
-# define TRACE_IRQS_OFF
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# ifdef CONFIG_X86_64
-# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
-# define LOCKDEP_SYS_EXIT_IRQ \
- TRACE_IRQS_ON; \
- sti; \
- call lockdep_sys_exit_thunk; \
- cli; \
- TRACE_IRQS_OFF;
-# else
-# define LOCKDEP_SYS_EXIT \
- pushl %eax; \
- pushl %ecx; \
- pushl %edx; \
- call lockdep_sys_exit; \
- popl %edx; \
- popl %ecx; \
- popl %eax;
-# define LOCKDEP_SYS_EXIT_IRQ
-# endif
-#else
-# define LOCKDEP_SYS_EXIT
-# define LOCKDEP_SYS_EXIT_IRQ
-#endif
-#endif /* __ASSEMBLY__ */
-
#endif
diff --git a/arch/x86/include/asm/kaslr.h b/arch/x86/include/asm/kaslr.h
index db7ba2feb947..0648190467ba 100644
--- a/arch/x86/include/asm/kaslr.h
+++ b/arch/x86/include/asm/kaslr.h
@@ -6,8 +6,10 @@ unsigned long kaslr_get_random_long(const char *purpose);
#ifdef CONFIG_RANDOMIZE_MEMORY
void kernel_randomize_memory(void);
+void init_trampoline_kaslr(void);
#else
static inline void kernel_randomize_memory(void) { }
+static inline void init_trampoline_kaslr(void) {}
#endif /* CONFIG_RANDOMIZE_MEMORY */
#endif
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1da5858501ca..f8998e97457f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1306,7 +1306,6 @@ struct kvm_arch_async_pf {
extern u64 __read_mostly host_efer;
extern struct kvm_x86_ops kvm_x86_ops;
-extern struct kmem_cache *x86_fpu_cache;
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
@@ -1671,7 +1670,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
unsigned long *vcpu_bitmap);
-void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 57fd1966c4ea..49d3a9edb06f 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -141,7 +141,7 @@ static inline void kvm_disable_steal_time(void)
return;
}
-static inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
+static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{
return false;
}
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index f9cea081c05b..cf503824529c 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -127,6 +127,17 @@
#define MSR_AMD64_SMCA_MCx_DEADDR(x) (MSR_AMD64_SMCA_MC0_DEADDR + 0x10*(x))
#define MSR_AMD64_SMCA_MCx_MISCy(x, y) ((MSR_AMD64_SMCA_MC0_MISC1 + y) + (0x10*(x)))
+#define XEC(x, mask) (((x) >> 16) & mask)
+
+/* mce.kflags flag bits for logging etc. */
+#define MCE_HANDLED_CEC BIT_ULL(0)
+#define MCE_HANDLED_UC BIT_ULL(1)
+#define MCE_HANDLED_EXTLOG BIT_ULL(2)
+#define MCE_HANDLED_NFIT BIT_ULL(3)
+#define MCE_HANDLED_EDAC BIT_ULL(4)
+#define MCE_HANDLED_MCELOG BIT_ULL(5)
+#define MCE_IN_KERNEL_RECOV BIT_ULL(6)
+
/*
* This structure contains all data related to the MCE log. Also
* carries a signature to make it easier to find from external
@@ -142,14 +153,16 @@ struct mce_log_buffer {
struct mce entry[];
};
+/* Highest last */
enum mce_notifier_prios {
- MCE_PRIO_FIRST = INT_MAX,
- MCE_PRIO_UC = INT_MAX - 1,
- MCE_PRIO_EXTLOG = INT_MAX - 2,
- MCE_PRIO_NFIT = INT_MAX - 3,
- MCE_PRIO_EDAC = INT_MAX - 4,
- MCE_PRIO_MCELOG = 1,
- MCE_PRIO_LOWEST = 0,
+ MCE_PRIO_LOWEST,
+ MCE_PRIO_MCELOG,
+ MCE_PRIO_EDAC,
+ MCE_PRIO_NFIT,
+ MCE_PRIO_EXTLOG,
+ MCE_PRIO_UC,
+ MCE_PRIO_EARLY,
+ MCE_PRIO_CEC
};
struct notifier_block;
@@ -238,7 +251,7 @@ extern void mce_disable_bank(int bank);
/*
* Exception handler
*/
-void do_machine_check(struct pt_regs *, long);
+void do_machine_check(struct pt_regs *pt_regs);
/*
* Threshold handler
@@ -347,5 +360,4 @@ umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return
#endif
static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); }
-
#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index bdeae9291e5c..0a301ad0b02f 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -45,7 +45,7 @@ typedef struct {
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
/*
* One bit per protection key says whether userspace can
- * use it or not. protected by mmap_sem.
+ * use it or not. protected by mmap_lock.
*/
u16 pkey_allocation_map;
s16 execute_only_pkey;
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index d30805ed323e..60b944dd2df1 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -54,20 +54,8 @@ typedef int (*hyperv_fill_flush_list_func)(
vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
#define hv_get_raw_timer() rdtsc_ordered()
-void hyperv_callback_vector(void);
-void hyperv_reenlightenment_vector(void);
-#ifdef CONFIG_TRACING
-#define trace_hyperv_callback_vector hyperv_callback_vector
-#endif
void hyperv_vector_handler(struct pt_regs *regs);
-/*
- * Routines for stimer0 Direct Mode handling.
- * On x86/x64, there are no percpu actions to take.
- */
-void hv_stimer0_vector_handler(struct pt_regs *regs);
-void hv_stimer0_callback_vector(void);
-
static inline void hv_enable_stimer0_percpu_irq(int irq) {}
static inline void hv_disable_stimer0_percpu_irq(int irq) {}
@@ -226,7 +214,6 @@ void hyperv_setup_mmu_ops(void);
void *hv_alloc_hyperv_page(void);
void *hv_alloc_hyperv_zeroed_page(void);
void hv_free_hyperv_page(unsigned long addr);
-void hyperv_reenlightenment_intr(struct pt_regs *regs);
void set_hv_tscchange_cb(void (*cb)(void));
void clear_hv_tscchange_cb(void);
void hyperv_stop_tsc_emulation(void);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index ef452b817f44..e8370e64a155 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -128,6 +128,10 @@
#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */
#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */
+/* SRBDS support */
+#define MSR_IA32_MCU_OPT_CTRL 0x00000123
+#define RNGDS_MITG_DIS BIT(0)
+
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
#define MSR_IA32_SYSENTER_EIP 0x00000176
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index d52d1aacdd97..e7752b4038ff 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -262,7 +262,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
* combination with microcode which triggers a CPU buffer flush when the
* instruction is executed.
*/
-static inline void mds_clear_cpu_buffers(void)
+static __always_inline void mds_clear_cpu_buffers(void)
{
static const u16 ds = __KERNEL_DS;
@@ -283,7 +283,7 @@ static inline void mds_clear_cpu_buffers(void)
*
* Clear CPU buffers if the corresponding static key is enabled
*/
-static inline void mds_user_clear_cpu_buffers(void)
+static __always_inline void mds_user_clear_cpu_buffers(void)
{
if (static_branch_likely(&mds_user_clear))
mds_clear_cpu_buffers();
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 5afb5e0fe903..e896ebef8c24 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -39,23 +39,23 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
* pte_offset_map_lock() on 32-bit PAE kernels was reading the pmd_t with
* a "*pmdp" dereference done by GCC. Problem is, in certain places
* where pte_offset_map_lock() is called, concurrent page faults are
- * allowed, if the mmap_sem is hold for reading. An example is mincore
+ * allowed, if the mmap_lock is hold for reading. An example is mincore
* vs page faults vs MADV_DONTNEED. On the page fault side
* pmd_populate() rightfully does a set_64bit(), but if we're reading the
* pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
* because GCC will not read the 64-bit value of the pmd atomically.
*
* To fix this all places running pte_offset_map_lock() while holding the
- * mmap_sem in read mode, shall read the pmdp pointer using this
+ * mmap_lock in read mode, shall read the pmdp pointer using this
* function to know if the pmd is null or not, and in turn to know if
* they can run pte_offset_map_lock() or pmd_trans_huge() or other pmd
* operations.
*
- * Without THP if the mmap_sem is held for reading, the pmd can only
+ * Without THP if the mmap_lock is held for reading, the pmd can only
* transition from null to not null while pmd_read_atomic() runs. So
* we can always return atomic pmd values with this function.
*
- * With THP if the mmap_sem is held for reading, the pmd can become
+ * With THP if the mmap_lock is held for reading, the pmd can become
* trans_huge or none or point to a pte (and in turn become "stable")
* at any time under pmd_read_atomic(). We could read it truly
* atomically here with an atomic64_read() for the THP enabled case (and
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index b8f46bbe69f4..76aa21e8128d 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -802,7 +802,7 @@ static inline int pmd_present(pmd_t pmd)
#ifdef CONFIG_NUMA_BALANCING
/*
* These work without NUMA balancing but the kernel does not care. See the
- * comment in include/asm-generic/pgtable.h
+ * comment in include/linux/pgtable.h
*/
static inline int pte_protnone(pte_t pte)
{
@@ -837,17 +837,6 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
/*
- * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
- *
- * this macro returns the index of the entry in the pmd page which would
- * control the given virtual address
- */
-static inline unsigned long pmd_index(unsigned long address)
-{
- return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
-}
-
-/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
@@ -856,25 +845,6 @@ static inline unsigned long pmd_index(unsigned long address)
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-/*
- * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- *
- * this function returns the index of the entry in the pte page which would
- * control the given virtual address
- *
- * Also define macro so we can test if pte_index is defined for arch.
- */
-#define pte_index pte_index
-static inline unsigned long pte_index(unsigned long address)
-{
- return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
-}
-
-static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
-{
- return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
-}
-
static inline int pmd_bad(pmd_t pmd)
{
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
@@ -907,12 +877,6 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
*/
#define pud_page(pud) pfn_to_page(pud_pfn(pud))
-/* Find an entry in the second-level page table.. */
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
-{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
-}
-
#define pud_leaf pud_large
static inline int pud_large(pud_t pud)
{
@@ -932,11 +896,6 @@ static inline int pud_large(pud_t pud)
}
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
-static inline unsigned long pud_index(unsigned long address)
-{
- return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
-}
-
#if CONFIG_PGTABLE_LEVELS > 3
static inline int p4d_none(p4d_t p4d)
{
@@ -959,12 +918,6 @@ static inline unsigned long p4d_page_vaddr(p4d_t p4d)
*/
#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
-/* Find an entry in the third-level page table.. */
-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
-{
- return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
-}
-
static inline int p4d_bad(p4d_t p4d)
{
unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
@@ -1037,30 +990,6 @@ static inline int pgd_none(pgd_t pgd)
#endif /* __ASSEMBLY__ */
-/*
- * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
- *
- * this macro returns the index of the entry in the pgd page which would
- * control the given virtual address
- */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-
-/*
- * pgd_offset() returns a (pgd_t *)
- * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
- */
-#define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address)))
-/*
- * a shortcut to get a pgd_t in a given mm
- */
-#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
-/*
- * a shortcut which implies the use of the kernel's pgd, instead
- * of a process's
- */
-#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
-
-
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
@@ -1071,27 +1000,14 @@ void init_mem_mapping(void);
void early_alloc_pgt_buf(void);
extern void memblock_find_dma_reserve(void);
+
#ifdef CONFIG_X86_64
-/* Realmode trampoline initialization. */
extern pgd_t trampoline_pgd_entry;
-static inline void __meminit init_trampoline_default(void)
-{
- /* Default trampoline pgd value */
- trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
-}
void __init poking_init(void);
unsigned long init_memory_mapping(unsigned long start,
unsigned long end, pgprot_t prot);
-
-# ifdef CONFIG_RANDOMIZE_MEMORY
-void __meminit init_trampoline(void);
-# else
-# define init_trampoline init_trampoline_default
-# endif
-#else
-static inline void init_trampoline(void) { }
#endif
/* local pte updates need not use xchg for locking */
@@ -1546,7 +1462,6 @@ static inline bool arch_faults_on_old_pte(void)
return false;
}
-#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_H */
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 82dc0d8464fa..d7acae4120d5 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -32,30 +32,12 @@ extern pmd_t initial_pg_pmd[];
void paging_init(void);
void sync_initial_page_table(void);
-/*
- * Define this if things work differently on an i386 and an i486:
- * it will (on an i486) warn about kernel memory accesses that are
- * done without a 'access_ok( ..)'
- */
-#undef TEST_ACCESS_OK
-
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level.h>
#else
# include <asm/pgtable-2level.h>
#endif
-#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
- pte_index((address)))
-#define pte_unmap(pte) kunmap_atomic((pte))
-#else
-#define pte_offset_map(dir, address) \
- ((pte_t *)page_address(pmd_page(*(dir))) + pte_index((address)))
-#define pte_unmap(pte) do { } while (0)
-#endif
-
/* Clear a kernel PTE and flush it from the TLB */
#define kpte_clear_flush(ptep, vaddr) \
do { \
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 8d03ffd43794..1b68d24dc6a0 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -186,10 +186,6 @@ extern void sync_global_pgds(unsigned long start, unsigned long end);
/* PTE - Level 1 access. */
-/* x86-64 always has all page tables mapped. */
-#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
-#define pte_unmap(pte) ((void)(pte))/* NOP */
-
/*
* Encode and de-code a swap entry
*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 29ee0c088009..42cd333616c4 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -823,7 +823,7 @@ static inline void prefetch(const void *x)
* Useful for spinlocks to avoid one state transition in the
* cache coherency protocol:
*/
-static inline void prefetchw(const void *x)
+static __always_inline void prefetchw(const void *x)
{
alternative_input(BASE_PREFETCH, "prefetchw %P1",
X86_FEATURE_3DNOWPREFETCH,
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 6d6475fdd327..ebedeab48704 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -123,7 +123,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
* On x86_64, vm86 mode is mercifully nonexistent, and we don't need
* the extra check.
*/
-static inline int user_mode(struct pt_regs *regs)
+static __always_inline int user_mode(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL;
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index ec2c0a094b5d..5948218f35c5 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -86,28 +86,35 @@ int set_direct_map_default_noflush(struct page *page);
extern int kernel_set_to_readonly;
#ifdef CONFIG_X86_64
-static inline int set_mce_nospec(unsigned long pfn)
+/*
+ * Prevent speculative access to the page by either unmapping
+ * it (if we do not require access to any part of the page) or
+ * marking it uncacheable (if we want to try to retrieve data
+ * from non-poisoned lines in the page).
+ */
+static inline int set_mce_nospec(unsigned long pfn, bool unmap)
{
unsigned long decoy_addr;
int rc;
/*
- * Mark the linear address as UC to make sure we don't log more
- * errors because of speculative access to the page.
* We would like to just call:
- * set_memory_uc((unsigned long)pfn_to_kaddr(pfn), 1);
+ * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
* but doing that would radically increase the odds of a
* speculative access to the poison page because we'd have
* the virtual address of the kernel 1:1 mapping sitting
* around in registers.
* Instead we get tricky. We create a non-canonical address
* that looks just like the one we want, but has bit 63 flipped.
- * This relies on set_memory_uc() properly sanitizing any __pa()
+ * This relies on set_memory_XX() properly sanitizing any __pa()
* results with __PHYSICAL_MASK or PTE_PFN_MASK.
*/
decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
- rc = set_memory_uc(decoy_addr, 1);
+ if (unmap)
+ rc = set_memory_np(decoy_addr, 1);
+ else
+ rc = set_memory_uc(decoy_addr, 1);
if (rc)
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
return rc;
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ed8ec011a9fd..84b645cc8bc9 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -75,7 +75,17 @@ extern char _text[];
static inline bool kaslr_enabled(void)
{
- return !!(boot_params.hdr.loadflags & KASLR_FLAG);
+ return IS_ENABLED(CONFIG_RANDOMIZE_MEMORY) &&
+ !!(boot_params.hdr.loadflags & KASLR_FLAG);
+}
+
+/*
+ * Apply no randomization if KASLR was disabled at boot or if KASAN
+ * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
+ */
+static inline bool kaslr_memory_enabled(void)
+{
+ return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
}
static inline unsigned long kaslr_offset(void)
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 6d37b8fcfc77..eb8e781c4353 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -7,6 +7,7 @@
#include <asm/nops.h>
#include <asm/processor-flags.h>
+#include <linux/irqflags.h>
#include <linux/jump_label.h>
/*
@@ -27,14 +28,14 @@ static inline unsigned long native_read_cr0(void)
return val;
}
-static inline unsigned long native_read_cr2(void)
+static __always_inline unsigned long native_read_cr2(void)
{
unsigned long val;
asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
-static inline void native_write_cr2(unsigned long val)
+static __always_inline void native_write_cr2(unsigned long val)
{
asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
}
@@ -129,7 +130,16 @@ static inline void native_wbinvd(void)
asm volatile("wbinvd": : :"memory");
}
-extern asmlinkage void native_load_gs_index(unsigned);
+extern asmlinkage void asm_load_gs_index(unsigned int selector);
+
+static inline void native_load_gs_index(unsigned int selector)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ asm_load_gs_index(selector);
+ local_irq_restore(flags);
+}
static inline unsigned long __read_cr4(void)
{
@@ -150,12 +160,12 @@ static inline void write_cr0(unsigned long x)
native_write_cr0(x);
}
-static inline unsigned long read_cr2(void)
+static __always_inline unsigned long read_cr2(void)
{
return native_read_cr2();
}
-static inline void write_cr2(unsigned long x)
+static __always_inline void write_cr2(unsigned long x)
{
native_write_cr2(x);
}
@@ -186,7 +196,7 @@ static inline void wbinvd(void)
#ifdef CONFIG_X86_64
-static inline void load_gs_index(unsigned selector)
+static inline void load_gs_index(unsigned int selector)
{
native_load_gs_index(selector);
}
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 14db05086bbf..5ae5a68e469d 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -87,7 +87,7 @@ get_stack_pointer(struct task_struct *task, struct pt_regs *regs)
}
void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *stack, char *log_lvl);
+ unsigned long *stack, const char *log_lvl);
/* The form of the top of the frame on the stack */
struct stack_frame {
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index 67315fa3956a..6593b42cb379 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -64,7 +64,7 @@ extern void text_poke_finish(void);
#define DISP32_SIZE 4
-static inline int text_opcode_size(u8 opcode)
+static __always_inline int text_opcode_size(u8 opcode)
{
int size = 0;
@@ -118,12 +118,14 @@ extern __ro_after_init struct mm_struct *poking_mm;
extern __ro_after_init unsigned long poking_addr;
#ifndef CONFIG_UML_X86
-static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
+static __always_inline
+void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
{
regs->ip = ip;
}
-static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
+static __always_inline
+void int3_emulate_push(struct pt_regs *regs, unsigned long val)
{
/*
* The int3 handler in entry_64.S adds a gap between the
@@ -138,7 +140,8 @@ static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
*(unsigned long *)regs->sp = val;
}
-static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
+static __always_inline
+void int3_emulate_call(struct pt_regs *regs, unsigned long func)
{
int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
int3_emulate_jmp(regs, func);
diff --git a/arch/x86/include/asm/trace/common.h b/arch/x86/include/asm/trace/common.h
index 57c8da027d99..f0f9bcdb74d9 100644
--- a/arch/x86/include/asm/trace/common.h
+++ b/arch/x86/include/asm/trace/common.h
@@ -5,12 +5,8 @@
DECLARE_STATIC_KEY_FALSE(trace_pagefault_key);
#define trace_pagefault_enabled() \
static_branch_unlikely(&trace_pagefault_key)
-DECLARE_STATIC_KEY_FALSE(trace_resched_ipi_key);
-#define trace_resched_ipi_enabled() \
- static_branch_unlikely(&trace_resched_ipi_key)
#else
static inline bool trace_pagefault_enabled(void) { return false; }
-static inline bool trace_resched_ipi_enabled(void) { return false; }
#endif
#endif
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 33b9d0f0aafe..88e7f0f3bf62 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -10,9 +10,6 @@
#ifdef CONFIG_X86_LOCAL_APIC
-extern int trace_resched_ipi_reg(void);
-extern void trace_resched_ipi_unreg(void);
-
DECLARE_EVENT_CLASS(x86_irq_vector,
TP_PROTO(int vector),
@@ -37,18 +34,6 @@ DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \
TP_PROTO(int vector), \
TP_ARGS(vector), NULL, NULL);
-#define DEFINE_RESCHED_IPI_EVENT(name) \
-DEFINE_EVENT_FN(x86_irq_vector, name##_entry, \
- TP_PROTO(int vector), \
- TP_ARGS(vector), \
- trace_resched_ipi_reg, \
- trace_resched_ipi_unreg); \
-DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \
- TP_PROTO(int vector), \
- TP_ARGS(vector), \
- trace_resched_ipi_reg, \
- trace_resched_ipi_unreg);
-
/*
* local_timer - called when entering/exiting a local timer interrupt
* vector handler
@@ -99,7 +84,7 @@ TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0);
/*
* reschedule - called when entering/exiting a reschedule vector handler
*/
-DEFINE_RESCHED_IPI_EVENT(reschedule);
+DEFINE_IRQ_VECTOR_EVENT(reschedule);
/*
* call_function - called when entering/exiting a call function interrupt
diff --git a/arch/x86/include/asm/trapnr.h b/arch/x86/include/asm/trapnr.h
new file mode 100644
index 000000000000..082f45631fa9
--- /dev/null
+++ b/arch/x86/include/asm/trapnr.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_TRAPNR_H
+#define _ASM_X86_TRAPNR_H
+
+/* Interrupts/Exceptions */
+
+#define X86_TRAP_DE 0 /* Divide-by-zero */
+#define X86_TRAP_DB 1 /* Debug */
+#define X86_TRAP_NMI 2 /* Non-maskable Interrupt */
+#define X86_TRAP_BP 3 /* Breakpoint */
+#define X86_TRAP_OF 4 /* Overflow */
+#define X86_TRAP_BR 5 /* Bound Range Exceeded */
+#define X86_TRAP_UD 6 /* Invalid Opcode */
+#define X86_TRAP_NM 7 /* Device Not Available */
+#define X86_TRAP_DF 8 /* Double Fault */
+#define X86_TRAP_OLD_MF 9 /* Coprocessor Segment Overrun */
+#define X86_TRAP_TS 10 /* Invalid TSS */
+#define X86_TRAP_NP 11 /* Segment Not Present */
+#define X86_TRAP_SS 12 /* Stack Segment Fault */
+#define X86_TRAP_GP 13 /* General Protection Fault */
+#define X86_TRAP_PF 14 /* Page Fault */
+#define X86_TRAP_SPURIOUS 15 /* Spurious Interrupt */
+#define X86_TRAP_MF 16 /* x87 Floating-Point Exception */
+#define X86_TRAP_AC 17 /* Alignment Check */
+#define X86_TRAP_MC 18 /* Machine Check */
+#define X86_TRAP_XF 19 /* SIMD Floating-Point Exception */
+#define X86_TRAP_VE 20 /* Virtualization Exception */
+#define X86_TRAP_CP 21 /* Control Protection Exception */
+#define X86_TRAP_IRET 32 /* IRET Exception */
+
+#endif
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 2ae904bf25e4..714b1a30e7b0 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -6,85 +6,9 @@
#include <linux/kprobes.h>
#include <asm/debugreg.h>
+#include <asm/idtentry.h>
#include <asm/siginfo.h> /* TRAP_TRACE, ... */
-#define dotraplinkage __visible
-
-asmlinkage void divide_error(void);
-asmlinkage void debug(void);
-asmlinkage void nmi(void);
-asmlinkage void int3(void);
-asmlinkage void overflow(void);
-asmlinkage void bounds(void);
-asmlinkage void invalid_op(void);
-asmlinkage void device_not_available(void);
-#ifdef CONFIG_X86_64
-asmlinkage void double_fault(void);
-#endif
-asmlinkage void coprocessor_segment_overrun(void);
-asmlinkage void invalid_TSS(void);
-asmlinkage void segment_not_present(void);
-asmlinkage void stack_segment(void);
-asmlinkage void general_protection(void);
-asmlinkage void page_fault(void);
-asmlinkage void async_page_fault(void);
-asmlinkage void spurious_interrupt_bug(void);
-asmlinkage void coprocessor_error(void);
-asmlinkage void alignment_check(void);
-#ifdef CONFIG_X86_MCE
-asmlinkage void machine_check(void);
-#endif /* CONFIG_X86_MCE */
-asmlinkage void simd_coprocessor_error(void);
-
-#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
-asmlinkage void xen_divide_error(void);
-asmlinkage void xen_xennmi(void);
-asmlinkage void xen_xendebug(void);
-asmlinkage void xen_int3(void);
-asmlinkage void xen_overflow(void);
-asmlinkage void xen_bounds(void);
-asmlinkage void xen_invalid_op(void);
-asmlinkage void xen_device_not_available(void);
-asmlinkage void xen_double_fault(void);
-asmlinkage void xen_coprocessor_segment_overrun(void);
-asmlinkage void xen_invalid_TSS(void);
-asmlinkage void xen_segment_not_present(void);
-asmlinkage void xen_stack_segment(void);
-asmlinkage void xen_general_protection(void);
-asmlinkage void xen_page_fault(void);
-asmlinkage void xen_spurious_interrupt_bug(void);
-asmlinkage void xen_coprocessor_error(void);
-asmlinkage void xen_alignment_check(void);
-#ifdef CONFIG_X86_MCE
-asmlinkage void xen_machine_check(void);
-#endif /* CONFIG_X86_MCE */
-asmlinkage void xen_simd_coprocessor_error(void);
-#endif
-
-dotraplinkage void do_divide_error(struct pt_regs *regs, long error_code);
-dotraplinkage void do_debug(struct pt_regs *regs, long error_code);
-dotraplinkage void do_nmi(struct pt_regs *regs, long error_code);
-dotraplinkage void do_int3(struct pt_regs *regs, long error_code);
-dotraplinkage void do_overflow(struct pt_regs *regs, long error_code);
-dotraplinkage void do_bounds(struct pt_regs *regs, long error_code);
-dotraplinkage void do_invalid_op(struct pt_regs *regs, long error_code);
-dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code);
-dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long cr2);
-dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long error_code);
-dotraplinkage void do_invalid_TSS(struct pt_regs *regs, long error_code);
-dotraplinkage void do_segment_not_present(struct pt_regs *regs, long error_code);
-dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code);
-dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code);
-dotraplinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
-dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code);
-dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code);
-dotraplinkage void do_alignment_check(struct pt_regs *regs, long error_code);
-dotraplinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code);
-#ifdef CONFIG_X86_32
-dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code);
-#endif
-dotraplinkage void do_mce(struct pt_regs *regs, long error_code);
-
#ifdef CONFIG_X86_64
asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs);
asmlinkage __visible notrace
@@ -92,6 +16,11 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s);
void __init trap_init(void);
#endif
+#ifdef CONFIG_X86_F00F_BUG
+/* For handling the FOOF bug */
+void handle_invalid_op(struct pt_regs *regs);
+#endif
+
static inline int get_si_code(unsigned long condition)
{
if (condition & DR_STEP)
@@ -105,16 +34,6 @@ static inline int get_si_code(unsigned long condition)
extern int panic_on_unrecovered_nmi;
void math_emulate(struct math_emu_info *);
-#ifndef CONFIG_X86_32
-asmlinkage void smp_thermal_interrupt(struct pt_regs *regs);
-asmlinkage void smp_threshold_interrupt(struct pt_regs *regs);
-asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs);
-#endif
-
-void smp_apic_timer_interrupt(struct pt_regs *regs);
-void smp_spurious_interrupt(struct pt_regs *regs);
-void smp_error_interrupt(struct pt_regs *regs);
-asmlinkage void smp_irq_move_cleanup_interrupt(void);
#ifdef CONFIG_VMAP_STACK
void __noreturn handle_stack_overflow(const char *message,
@@ -122,31 +41,6 @@ void __noreturn handle_stack_overflow(const char *message,
unsigned long fault_address);
#endif
-/* Interrupts/Exceptions */
-enum {
- X86_TRAP_DE = 0, /* 0, Divide-by-zero */
- X86_TRAP_DB, /* 1, Debug */
- X86_TRAP_NMI, /* 2, Non-maskable Interrupt */
- X86_TRAP_BP, /* 3, Breakpoint */
- X86_TRAP_OF, /* 4, Overflow */
- X86_TRAP_BR, /* 5, Bound Range Exceeded */
- X86_TRAP_UD, /* 6, Invalid Opcode */
- X86_TRAP_NM, /* 7, Device Not Available */
- X86_TRAP_DF, /* 8, Double Fault */
- X86_TRAP_OLD_MF, /* 9, Coprocessor Segment Overrun */
- X86_TRAP_TS, /* 10, Invalid TSS */
- X86_TRAP_NP, /* 11, Segment Not Present */
- X86_TRAP_SS, /* 12, Stack Segment Fault */
- X86_TRAP_GP, /* 13, General Protection Fault */
- X86_TRAP_PF, /* 14, Page Fault */
- X86_TRAP_SPURIOUS, /* 15, Spurious Interrupt */
- X86_TRAP_MF, /* 16, x87 Floating-Point Exception */
- X86_TRAP_AC, /* 17, Alignment Check */
- X86_TRAP_MC, /* 18, Machine Check */
- X86_TRAP_XF, /* 19, SIMD Floating-Point Exception */
- X86_TRAP_IRET = 32, /* 32, IRET Exception */
-};
-
/*
* Page fault error code bits:
*
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index d8f283b9a569..18dfa07d3ef0 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -504,12 +504,12 @@ do { \
* We want the unsafe accessors to always be inlined and use
* the error labels - thus the macro games.
*/
-#define unsafe_copy_loop(dst, src, len, type, label) \
- while (len >= sizeof(type)) { \
- unsafe_put_user(*(type *)src,(type __user *)dst,label); \
- dst += sizeof(type); \
- src += sizeof(type); \
- len -= sizeof(type); \
+#define unsafe_copy_loop(dst, src, len, type, label) \
+ while (len >= sizeof(type)) { \
+ unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
+ dst += sizeof(type); \
+ src += sizeof(type); \
+ len -= sizeof(type); \
}
#define unsafe_copy_to_user(_dst,_src,_len,label) \
@@ -523,5 +523,21 @@ do { \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
} while (0)
+#define HAVE_GET_KERNEL_NOFAULT
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ int __kr_err; \
+ \
+ __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
+ sizeof(type), __kr_err); \
+ if (unlikely(__kr_err)) \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+ __put_user_size(*((type *)(src)), (__force type __user *)(dst), \
+ sizeof(type), err_label)
+
#endif /* _ASM_X86_UACCESS_H */
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 13687bf0e0a9..f1188bd47658 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -12,6 +12,8 @@
#define _ASM_X86_UV_UV_BAU_H
#include <linux/bitmap.h>
+#include <asm/idtentry.h>
+
#define BITSPERBYTE 8
/*
@@ -799,12 +801,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
bitmap_zero(&dstp->bits, nbits);
}
-extern void uv_bau_message_intr1(void);
-#ifdef CONFIG_TRACING
-#define trace_uv_bau_message_intr1 uv_bau_message_intr1
-#endif
-extern void uv_bau_timeout_intr1(void);
-
struct atomic_short {
short counter;
};
diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
index 9a6dc9b4ec99..fb81fea99093 100644
--- a/arch/x86/include/asm/vdso/gettimeofday.h
+++ b/arch/x86/include/asm/vdso/gettimeofday.h
@@ -271,6 +271,24 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
return __vdso_data;
}
+static inline bool arch_vdso_clocksource_ok(const struct vdso_data *vd)
+{
+ return true;
+}
+#define vdso_clocksource_ok arch_vdso_clocksource_ok
+
+/*
+ * Clocksource read value validation to handle PV and HyperV clocksources
+ * which can be invalidated asynchronously and indicate invalidation by
+ * returning U64_MAX, which can be effectively tested by checking for a
+ * negative value after casting it to s64.
+ */
+static inline bool arch_vdso_cycles_ok(u64 cycles)
+{
+ return (s64)cycles >= 0;
+}
+#define vdso_cycles_ok arch_vdso_cycles_ok
+
/*
* x86 specific delta calculation.
*
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index d50c7b747d8b..ba4c1b15908b 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -38,11 +38,11 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/pgtable.h>
#include <trace/events/xen.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/smap.h>
#include <asm/nospec-branch.h>
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 790ce08e41f2..5941e18edd5a 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -11,7 +11,6 @@
#include <asm/extable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <xen/interface/xen.h>
#include <xen/interface/grant_table.h>
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 955c2a2e1cf9..db9adc081c5a 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -35,6 +35,7 @@ struct mce {
__u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
__u64 ppin; /* Protected Processor Inventory Number */
__u32 microcode; /* Microcode revision */
+ __u64 kflags; /* Internal kernel use */
};
#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 8ef4369a4f06..e77261db2391 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -28,6 +28,10 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n
KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n
+# With some compiler versions the generated code results in boot hangs, caused
+# by several compilation units. To be safe, disable all instrumentation.
+KCSAN_SANITIZE := n
+
OBJECT_FILES_NON_STANDARD_test_nx.o := y
OBJECT_FILES_NON_STANDARD_paravirt_patch.o := y
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 683ed9e12e6b..7bdc0239a943 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -20,11 +20,11 @@
#include <linux/pci.h>
#include <linux/efi-bgrt.h>
#include <linux/serial_core.h>
+#include <linux/pgtable.h>
#include <asm/e820/api.h>
#include <asm/irqdomain.h>
#include <asm/pci_x86.h>
-#include <asm/pgtable.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
#include <asm/io.h>
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index ed3b04483972..cc1fea76aab0 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -10,9 +10,9 @@
#include <linux/memblock.h>
#include <linux/dmi.h>
#include <linux/cpumask.h>
+#include <linux/pgtable.h>
#include <asm/segment.h>
#include <asm/desc.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/realmode.h>
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index cd617979b7fc..8fd39ff74a49 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -18,7 +18,6 @@
#include <asm/text-patching.h>
#include <asm/alternative.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/mce.h>
#include <asm/nmi.h>
#include <asm/cacheflush.h>
@@ -1012,28 +1011,29 @@ struct bp_patching_desc {
static struct bp_patching_desc *bp_desc;
-static inline struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
+static __always_inline
+struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
{
- struct bp_patching_desc *desc = READ_ONCE(*descp); /* rcu_dereference */
+ struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */
- if (!desc || !atomic_inc_not_zero(&desc->refs))
+ if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
return NULL;
return desc;
}
-static inline void put_desc(struct bp_patching_desc *desc)
+static __always_inline void put_desc(struct bp_patching_desc *desc)
{
smp_mb__before_atomic();
- atomic_dec(&desc->refs);
+ arch_atomic_dec(&desc->refs);
}
-static inline void *text_poke_addr(struct text_poke_loc *tp)
+static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
{
return _stext + tp->rel_addr;
}
-static int notrace patch_cmp(const void *key, const void *elt)
+static __always_inline int patch_cmp(const void *key, const void *elt)
{
struct text_poke_loc *tp = (struct text_poke_loc *) elt;
@@ -1043,9 +1043,8 @@ static int notrace patch_cmp(const void *key, const void *elt)
return 1;
return 0;
}
-NOKPROBE_SYMBOL(patch_cmp);
-int notrace poke_int3_handler(struct pt_regs *regs)
+int noinstr poke_int3_handler(struct pt_regs *regs)
{
struct bp_patching_desc *desc;
struct text_poke_loc *tp;
@@ -1078,9 +1077,9 @@ int notrace poke_int3_handler(struct pt_regs *regs)
* Skip the binary search if there is a single member in the vector.
*/
if (unlikely(desc->nr_entries > 1)) {
- tp = bsearch(ip, desc->vec, desc->nr_entries,
- sizeof(struct text_poke_loc),
- patch_cmp);
+ tp = __inline_bsearch(ip, desc->vec, desc->nr_entries,
+ sizeof(struct text_poke_loc),
+ patch_cmp);
if (!tp)
goto out_put;
} else {
@@ -1119,7 +1118,6 @@ out_put:
put_desc(desc);
return ret;
}
-NOKPROBE_SYMBOL(poke_int3_handler);
#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
static struct text_poke_loc tp_vec[TP_VEC_MAX];
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 16133819415c..17cb5b933dcf 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -33,7 +33,6 @@
#include <linux/atomic.h>
#include <linux/dma-direct.h>
#include <asm/mtrr.h>
-#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/iommu.h>
#include <asm/gart.h>
@@ -159,7 +158,7 @@ static void dump_leak(void)
return;
dump = 1;
- show_stack(NULL, NULL);
+ show_stack(NULL, NULL, KERN_ERR);
debug_dma_dump_mappings(NULL);
}
#endif
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index b6b3297851f3..18f6b7c4bd79 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -18,9 +18,11 @@
#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
+#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
+#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
@@ -33,6 +35,7 @@ static const struct pci_device_id amd_root_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
{}
};
@@ -50,6 +53,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
@@ -65,6 +69,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 4b1d31be50b4..e0e2f020ec02 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1088,23 +1088,14 @@ static void local_apic_timer_interrupt(void)
* [ if a single-CPU system runs an SMP kernel then we call the local
* interrupt as well. Thus we cannot inline the local irq ... ]
*/
-__visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_apic_timer_interrupt)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- /*
- * NOTE! We'd better ACK the irq immediately,
- * because timer handling can be slow.
- *
- * update_process_times() expects us to have done irq_enter().
- * Besides, if we don't timer interrupts ignore the global
- * interrupt lock, which is the WrongThing (tm) to do.
- */
- entering_ack_irq();
+ ack_APIC_irq();
trace_local_timer_entry(LOCAL_TIMER_VECTOR);
local_apic_timer_interrupt();
trace_local_timer_exit(LOCAL_TIMER_VECTOR);
- exiting_irq();
set_irq_regs(old_regs);
}
@@ -2060,7 +2051,7 @@ void __init init_apic_mappings(void)
unsigned int new_apicid;
if (apic_validate_deadline_timer())
- pr_debug("TSC deadline timer available\n");
+ pr_info("TSC deadline timer available\n");
if (x2apic_mode) {
boot_cpu_physical_apicid = read_apic_id();
@@ -2120,15 +2111,21 @@ void __init register_lapic_address(unsigned long address)
* Local APIC interrupts
*/
-/*
- * This interrupt should _never_ happen with our APIC/SMP architecture
+/**
+ * spurious_interrupt - Catch all for interrupts raised on unused vectors
+ * @regs: Pointer to pt_regs on stack
+ * @vector: The vector number
+ *
+ * This is invoked from ASM entry code to catch all interrupts which
+ * trigger on an entry which is routed to the common_spurious idtentry
+ * point.
+ *
+ * Also called from sysvec_spurious_apic_interrupt().
*/
-__visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_IRQ(spurious_interrupt)
{
- u8 vector = ~regs->orig_ax;
u32 v;
- entering_irq();
trace_spurious_apic_entry(vector);
inc_irq_stat(irq_spurious_count);
@@ -2158,13 +2155,17 @@ __visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
}
out:
trace_spurious_apic_exit(vector);
- exiting_irq();
+}
+
+DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt)
+{
+ __spurious_interrupt(regs, SPURIOUS_APIC_VECTOR);
}
/*
* This interrupt should never happen with our APIC/SMP architecture
*/
-__visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_error_interrupt)
{
static const char * const error_interrupt_reason[] = {
"Send CS error", /* APIC Error Bit 0 */
@@ -2178,7 +2179,6 @@ __visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
};
u32 v, i = 0;
- entering_irq();
trace_error_apic_entry(ERROR_APIC_VECTOR);
/* First tickle the hardware, only then report what went on. -- REW */
@@ -2202,7 +2202,6 @@ __visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
apic_printk(APIC_DEBUG, KERN_CONT "\n");
trace_error_apic_exit(ERROR_APIC_VECTOR);
- exiting_irq();
}
/**
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index cdf45b4700f2..35edd57f064a 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -12,11 +12,11 @@
*/
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/numachip/numachip.h>
#include <asm/numachip/numachip_csr.h>
-#include <asm/pgtable.h>
#include "local.h"
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index 159bd0cb8548..5cbaca58af95 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -115,7 +115,8 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force)
* denote it as spurious which is no harm as this is a rare event
* and interrupt handlers have to cope with spurious interrupts
* anyway. If the vector is unused, then it is marked so it won't
- * trigger the 'No irq handler for vector' warning in do_IRQ().
+ * trigger the 'No irq handler for vector' warning in
+ * common_interrupt().
*
* This requires to hold vector lock to prevent concurrent updates to
* the affected vector.
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 67768e54438b..c48be6e1f676 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -861,13 +861,13 @@ static void free_moved_vector(struct apic_chip_data *apicd)
apicd->move_in_progress = 0;
}
-asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
+DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)
{
struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
struct apic_chip_data *apicd;
struct hlist_node *tmp;
- entering_ack_irq();
+ ack_APIC_irq();
/* Prevent vectors vanishing under us */
raw_spin_lock(&vector_lock);
@@ -892,7 +892,6 @@ asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
}
raw_spin_unlock(&vector_lock);
- exiting_irq();
}
static void __send_cleanup_vector(struct apic_chip_data *apicd)
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index c2a47016f243..828be792231e 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -57,9 +57,6 @@ int main(void)
BLANK();
#undef ENTRY
- OFFSET(TSS_ist, tss_struct, x86_tss.ist);
- DEFINE(DB_STACK_OFFSET, offsetof(struct cea_exception_stacks, DB_stack) -
- offsetof(struct cea_exception_stacks, DB1_stack));
BLANK();
#ifdef CONFIG_STACKPROTECTOR
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 7dc4ad68eb41..dba6a83bc349 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -13,6 +13,9 @@ endif
KCOV_INSTRUMENT_common.o := n
KCOV_INSTRUMENT_perf_event.o := n
+# As above, instrumenting secondary CPU boot code causes boot hangs.
+KCSAN_SANITIZE_common.o := n
+
# Make sure load_percpu_segment has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_common.o := $(nostackp)
diff --git a/arch/x86/kernel/cpu/acrn.c b/arch/x86/kernel/cpu/acrn.c
index 676022e71791..1da9b1c9a2db 100644
--- a/arch/x86/kernel/cpu/acrn.c
+++ b/arch/x86/kernel/cpu/acrn.c
@@ -10,10 +10,10 @@
*/
#include <linux/interrupt.h>
-#include <asm/acrn.h>
#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/hypervisor.h>
+#include <asm/idtentry.h>
#include <asm/irq_regs.h>
static uint32_t __init acrn_detect(void)
@@ -24,7 +24,7 @@ static uint32_t __init acrn_detect(void)
static void __init acrn_init_platform(void)
{
/* Setup the IDT for ACRN hypervisor callback */
- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, acrn_hv_callback_vector);
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_acrn_hv_callback);
}
static bool acrn_x2apic_available(void)
@@ -39,7 +39,7 @@ static bool acrn_x2apic_available(void)
static void (*acrn_intr_handler)(void);
-__visible void __irq_entry acrn_hv_vector_handler(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_acrn_hv_callback)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -50,13 +50,12 @@ __visible void __irq_entry acrn_hv_vector_handler(struct pt_regs *regs)
* will block the interrupt whose vector is lower than
* HYPERVISOR_CALLBACK_VECTOR.
*/
- entering_ack_irq();
+ ack_APIC_irq();
inc_irq_stat(irq_hv_callback_count);
if (acrn_intr_handler)
acrn_intr_handler();
- exiting_irq();
set_irq_regs(old_regs);
}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index ed54b3b21c39..0b71970d2d3d 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -15,6 +15,7 @@
#include <linux/nospec.h>
#include <linux/prctl.h>
#include <linux/sched/smt.h>
+#include <linux/pgtable.h>
#include <asm/spec-ctrl.h>
#include <asm/cmdline.h>
@@ -26,7 +27,6 @@
#include <asm/vmx.h>
#include <asm/paravirt.h>
#include <asm/alternative.h>
-#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/intel-family.h>
#include <asm/e820/api.h>
@@ -41,6 +41,7 @@ static void __init l1tf_select_mitigation(void);
static void __init mds_select_mitigation(void);
static void __init mds_print_mitigation(void);
static void __init taa_select_mitigation(void);
+static void __init srbds_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
u64 x86_spec_ctrl_base;
@@ -108,6 +109,7 @@ void __init check_bugs(void)
l1tf_select_mitigation();
mds_select_mitigation();
taa_select_mitigation();
+ srbds_select_mitigation();
/*
* As MDS and TAA mitigations are inter-related, print MDS
@@ -398,6 +400,97 @@ static int __init tsx_async_abort_parse_cmdline(char *str)
early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
#undef pr_fmt
+#define pr_fmt(fmt) "SRBDS: " fmt
+
+enum srbds_mitigations {
+ SRBDS_MITIGATION_OFF,
+ SRBDS_MITIGATION_UCODE_NEEDED,
+ SRBDS_MITIGATION_FULL,
+ SRBDS_MITIGATION_TSX_OFF,
+ SRBDS_MITIGATION_HYPERVISOR,
+};
+
+static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
+
+static const char * const srbds_strings[] = {
+ [SRBDS_MITIGATION_OFF] = "Vulnerable",
+ [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
+ [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
+ [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
+ [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
+};
+
+static bool srbds_off;
+
+void update_srbds_msr(void)
+{
+ u64 mcu_ctrl;
+
+ if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ return;
+
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return;
+
+ if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
+ return;
+
+ rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+
+ switch (srbds_mitigation) {
+ case SRBDS_MITIGATION_OFF:
+ case SRBDS_MITIGATION_TSX_OFF:
+ mcu_ctrl |= RNGDS_MITG_DIS;
+ break;
+ case SRBDS_MITIGATION_FULL:
+ mcu_ctrl &= ~RNGDS_MITG_DIS;
+ break;
+ default:
+ break;
+ }
+
+ wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
+}
+
+static void __init srbds_select_mitigation(void)
+{
+ u64 ia32_cap;
+
+ if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ return;
+
+ /*
+ * Check to see if this is one of the MDS_NO systems supporting
+ * TSX that are only exposed to SRBDS when TSX is enabled.
+ */
+ ia32_cap = x86_read_arch_cap_msr();
+ if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
+ srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
+ else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
+ else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
+ srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
+ else if (cpu_mitigations_off() || srbds_off)
+ srbds_mitigation = SRBDS_MITIGATION_OFF;
+
+ update_srbds_msr();
+ pr_info("%s\n", srbds_strings[srbds_mitigation]);
+}
+
+static int __init srbds_parse_cmdline(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!boot_cpu_has_bug(X86_BUG_SRBDS))
+ return 0;
+
+ srbds_off = !strcmp(str, "off");
+ return 0;
+}
+early_param("srbds", srbds_parse_cmdline);
+
+#undef pr_fmt
#define pr_fmt(fmt) "Spectre V1 : " fmt
enum spectre_v1_mitigation {
@@ -495,7 +588,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline);
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
SPECTRE_V2_NONE;
-static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
+static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
+ SPECTRE_V2_USER_NONE;
+static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
SPECTRE_V2_USER_NONE;
#ifdef CONFIG_RETPOLINE
@@ -641,15 +736,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
break;
}
- /*
- * At this point, an STIBP mode other than "off" has been set.
- * If STIBP support is not being forced, check if STIBP always-on
- * is preferred.
- */
- if (mode != SPECTRE_V2_USER_STRICT &&
- boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
- mode = SPECTRE_V2_USER_STRICT_PREFERRED;
-
/* Initialize Indirect Branch Prediction Barrier */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
@@ -672,23 +758,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
static_key_enabled(&switch_mm_always_ibpb) ?
"always-on" : "conditional");
+
+ spectre_v2_user_ibpb = mode;
}
- /* If enhanced IBRS is enabled no STIBP required */
- if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ /*
+ * If enhanced IBRS is enabled or SMT impossible, STIBP is not
+ * required.
+ */
+ if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return;
/*
- * If SMT is not possible or STIBP is not available clear the STIBP
- * mode.
+ * At this point, an STIBP mode other than "off" has been set.
+ * If STIBP support is not being forced, check if STIBP always-on
+ * is preferred.
+ */
+ if (mode != SPECTRE_V2_USER_STRICT &&
+ boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
+ mode = SPECTRE_V2_USER_STRICT_PREFERRED;
+
+ /*
+ * If STIBP is not available, clear the STIBP mode.
*/
- if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
+ if (!boot_cpu_has(X86_FEATURE_STIBP))
mode = SPECTRE_V2_USER_NONE;
+
+ spectre_v2_user_stibp = mode;
+
set_mode:
- spectre_v2_user = mode;
- /* Only print the STIBP mode when SMT possible */
- if (smt_possible)
- pr_info("%s\n", spectre_v2_user_strings[mode]);
+ pr_info("%s\n", spectre_v2_user_strings[mode]);
}
static const char * const spectre_v2_strings[] = {
@@ -921,7 +1020,7 @@ void cpu_bugs_smt_update(void)
{
mutex_lock(&spec_ctrl_mutex);
- switch (spectre_v2_user) {
+ switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
break;
case SPECTRE_V2_USER_STRICT:
@@ -1164,14 +1263,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
{
switch (ctrl) {
case PR_SPEC_ENABLE:
- if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return 0;
/*
* Indirect branch speculation is always disabled in strict
- * mode.
+ * mode. It can neither be enabled if it was force-disabled
+ * by a previous prctl call.
+
*/
- if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
- spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
+ task_spec_ib_force_disable(task))
return -EPERM;
task_clear_spec_ib_disable(task);
task_update_spec_tif(task);
@@ -1182,10 +1286,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
* Indirect branch speculation is always allowed when
* mitigation is force disabled.
*/
- if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return -EPERM;
- if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
- spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
return 0;
task_set_spec_ib_disable(task);
if (ctrl == PR_SPEC_FORCE_DISABLE)
@@ -1216,7 +1322,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
{
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
- if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
}
#endif
@@ -1247,22 +1354,24 @@ static int ib_prctl_get(struct task_struct *task)
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return PR_SPEC_NOT_AFFECTED;
- switch (spectre_v2_user) {
- case SPECTRE_V2_USER_NONE:
+ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
+ spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
return PR_SPEC_ENABLE;
- case SPECTRE_V2_USER_PRCTL:
- case SPECTRE_V2_USER_SECCOMP:
+ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
+ return PR_SPEC_DISABLE;
+ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
+ spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
+ spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
if (task_spec_ib_force_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
if (task_spec_ib_disable(task))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
- case SPECTRE_V2_USER_STRICT:
- case SPECTRE_V2_USER_STRICT_PREFERRED:
- return PR_SPEC_DISABLE;
- default:
+ } else
return PR_SPEC_NOT_AFFECTED;
- }
}
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
@@ -1501,7 +1610,7 @@ static char *stibp_state(void)
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
return "";
- switch (spectre_v2_user) {
+ switch (spectre_v2_user_stibp) {
case SPECTRE_V2_USER_NONE:
return ", STIBP: disabled";
case SPECTRE_V2_USER_STRICT:
@@ -1528,6 +1637,11 @@ static char *ibpb_state(void)
return "";
}
+static ssize_t srbds_show_state(char *buf)
+{
+ return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
+}
+
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
@@ -1572,6 +1686,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_ITLB_MULTIHIT:
return itlb_multihit_show_state(buf);
+ case X86_BUG_SRBDS:
+ return srbds_show_state(buf);
+
default:
break;
}
@@ -1618,4 +1735,9 @@ ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr
{
return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
}
+
+ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
+}
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 74682b8d09b0..043d93cdcaad 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -21,6 +21,7 @@
#include <linux/smp.h>
#include <linux/io.h>
#include <linux/syscore_ops.h>
+#include <linux/pgtable.h>
#include <asm/stackprotector.h>
#include <asm/perf_event.h>
@@ -35,7 +36,6 @@
#include <asm/vsyscall.h>
#include <linux/topology.h>
#include <linux/cpumask.h>
-#include <asm/pgtable.h>
#include <linux/atomic.h>
#include <asm/proto.h>
#include <asm/setup.h>
@@ -1073,9 +1073,30 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
{}
};
-static bool __init cpu_matches(unsigned long which)
+#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \
+ X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \
+ INTEL_FAM6_##model, steppings, \
+ X86_FEATURE_ANY, issues)
+
+#define SRBDS BIT(0)
+
+static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS),
+ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS),
+ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS),
+ {}
+};
+
+static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
{
- const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
+ const struct x86_cpu_id *m = x86_match_cpu(table);
return m && !!(m->driver_data & which);
}
@@ -1095,31 +1116,34 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
u64 ia32_cap = x86_read_arch_cap_msr();
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
- if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+ if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
+ !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
- if (cpu_matches(NO_SPECULATION))
+ if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
return;
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
- if (!cpu_matches(NO_SPECTRE_V2))
+ if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2))
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
- if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
+ if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
+ !(ia32_cap & ARCH_CAP_SSB_NO) &&
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
if (ia32_cap & ARCH_CAP_IBRS_ALL)
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
- if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) {
+ if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
+ !(ia32_cap & ARCH_CAP_MDS_NO)) {
setup_force_cpu_bug(X86_BUG_MDS);
- if (cpu_matches(MSBDS_ONLY))
+ if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
}
- if (!cpu_matches(NO_SWAPGS))
+ if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
setup_force_cpu_bug(X86_BUG_SWAPGS);
/*
@@ -1137,7 +1161,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
(ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
setup_force_cpu_bug(X86_BUG_TAA);
- if (cpu_matches(NO_MELTDOWN))
+ /*
+ * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
+ * in the vulnerability blacklist.
+ */
+ if ((cpu_has(c, X86_FEATURE_RDRAND) ||
+ cpu_has(c, X86_FEATURE_RDSEED)) &&
+ cpu_matches(cpu_vuln_blacklist, SRBDS))
+ setup_force_cpu_bug(X86_BUG_SRBDS);
+
+ if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;
/* Rogue Data Cache Load? No! */
@@ -1146,7 +1179,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
- if (cpu_matches(NO_L1TF))
+ if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
return;
setup_force_cpu_bug(X86_BUG_L1TF);
@@ -1574,6 +1607,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
mtrr_ap_init();
validate_apic_and_package_id(c);
x86_spec_ctrl_setup_ap();
+ update_srbds_msr();
}
static __init int setup_noclflush(char *arg)
@@ -1672,25 +1706,6 @@ void syscall_init(void)
X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
}
-DEFINE_PER_CPU(int, debug_stack_usage);
-DEFINE_PER_CPU(u32, debug_idt_ctr);
-
-void debug_stack_set_zero(void)
-{
- this_cpu_inc(debug_idt_ctr);
- load_current_idt();
-}
-NOKPROBE_SYMBOL(debug_stack_set_zero);
-
-void debug_stack_reset(void)
-{
- if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
- return;
- if (this_cpu_dec_return(debug_idt_ctr) == 0)
- load_current_idt();
-}
-NOKPROBE_SYMBOL(debug_stack_reset);
-
#else /* CONFIG_X86_64 */
DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 37fdefd14f28..fb538fccd24c 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -77,6 +77,7 @@ extern void detect_ht(struct cpuinfo_x86 *c);
unsigned int aperfmperf_get_khz(int cpu);
extern void x86_spec_ctrl_setup_ap(void);
+extern void update_srbds_msr(void);
extern u64 x86_read_arch_cap_msr(void);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 166d7c355896..c25a67a34bd3 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
+#include <linux/pgtable.h>
#include <linux/string.h>
#include <linux/bitops.h>
@@ -11,7 +12,6 @@
#include <linux/uaccess.h>
#include <asm/cpufeature.h>
-#include <asm/pgtable.h>
#include <asm/msr.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
@@ -1142,9 +1142,12 @@ void switch_to_sld(unsigned long tifn)
static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
{}
};
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index 52de616a8065..99be063fcb1b 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -192,7 +192,12 @@ EXPORT_SYMBOL_GPL(smca_banks);
static char buf_mcatype[MAX_MCATYPE_NAME_LEN];
static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
-static DEFINE_PER_CPU(unsigned int, bank_map); /* see which banks are on */
+
+/*
+ * A list of the banks enabled on each logical CPU. Controls which respective
+ * descriptors to initialize later in mce_threshold_create_device().
+ */
+static DEFINE_PER_CPU(unsigned int, bank_map);
/* Map of banks that have more than MCA_MISC0 available. */
static DEFINE_PER_CPU(u32, smca_misc_banks_map);
@@ -381,6 +386,10 @@ static void threshold_restart_bank(void *_tr)
struct thresh_restart *tr = _tr;
u32 hi, lo;
+ /* sysfs write might race against an offline operation */
+ if (this_cpu_read(threshold_banks))
+ return;
+
rdmsr(tr->b->address, lo, hi);
if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
@@ -568,14 +577,19 @@ bool amd_filter_mce(struct mce *m)
{
enum smca_bank_types bank_type = smca_get_bank_type(m->bank);
struct cpuinfo_x86 *c = &boot_cpu_data;
- u8 xec = (m->status >> 16) & 0x3F;
/* See Family 17h Models 10h-2Fh Erratum #1114. */
if (c->x86 == 0x17 &&
c->x86_model >= 0x10 && c->x86_model <= 0x2F &&
- bank_type == SMCA_IF && xec == 10)
+ bank_type == SMCA_IF && XEC(m->status, 0x3f) == 10)
return true;
+ /* NB GART TLB error reporting is disabled by default. */
+ if (c->x86 < 0x17) {
+ if (m->bank == 4 && XEC(m->status, 0x1f) == 0x5)
+ return true;
+ }
+
return false;
}
@@ -907,14 +921,13 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
mce_log(&m);
}
-asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_deferred_error)
{
- entering_irq();
trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
inc_irq_stat(irq_deferred_error_count);
deferred_error_int_vector();
trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
- exiting_ack_irq();
+ ack_APIC_irq();
}
/*
@@ -1016,13 +1029,22 @@ static void log_and_reset_block(struct threshold_block *block)
static void amd_threshold_interrupt(void)
{
struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL;
+ struct threshold_bank **bp = this_cpu_read(threshold_banks);
unsigned int bank, cpu = smp_processor_id();
+ /*
+ * Validate that the threshold bank has been initialized already. The
+ * handler is installed at boot time, but on a hotplug event the
+ * interrupt might fire before the data has been initialized.
+ */
+ if (!bp)
+ return;
+
for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) {
if (!(per_cpu(bank_map, cpu) & (1 << bank)))
continue;
- first_block = per_cpu(threshold_banks, cpu)[bank]->blocks;
+ first_block = bp[bank]->blocks;
if (!first_block)
continue;
@@ -1071,7 +1093,8 @@ store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
memset(&tr, 0, sizeof(tr));
tr.b = b;
- smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
+ if (smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1))
+ return -ENODEV;
return size;
}
@@ -1095,7 +1118,8 @@ store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
b->threshold_limit = new;
tr.b = b;
- smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
+ if (smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1))
+ return -ENODEV;
return size;
}
@@ -1104,7 +1128,9 @@ static ssize_t show_error_count(struct threshold_block *b, char *buf)
{
u32 lo, hi;
- rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
+ /* CPU might be offline by now */
+ if (rdmsr_on_cpu(b->cpu, b->address, &lo, &hi))
+ return -ENODEV;
return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
(THRESHOLD_MAX - b->threshold_limit)));
@@ -1209,10 +1235,10 @@ static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb
u32 low, high;
int err;
- if ((bank >= per_cpu(mce_num_banks, cpu)) || (block >= NR_BLOCKS))
+ if ((bank >= this_cpu_read(mce_num_banks)) || (block >= NR_BLOCKS))
return 0;
- if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
+ if (rdmsr_safe(address, &low, &high))
return 0;
if (!(high & MASK_VALID_HI)) {
@@ -1247,6 +1273,7 @@ static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb
INIT_LIST_HEAD(&b->miscj);
+ /* This is safe as @tb is not visible yet */
if (tb->blocks)
list_add(&b->miscj, &tb->blocks->miscj);
else
@@ -1267,13 +1294,12 @@ recurse:
if (b)
kobject_uevent(&b->kobj, KOBJ_ADD);
- return err;
+ return 0;
out_free:
if (b) {
- kobject_put(&b->kobj);
list_del(&b->miscj);
- kfree(b);
+ kobject_put(&b->kobj);
}
return err;
}
@@ -1302,9 +1328,10 @@ static int __threshold_add_blocks(struct threshold_bank *b)
return err;
}
-static int threshold_create_bank(unsigned int cpu, unsigned int bank)
+static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
+ unsigned int bank)
{
- struct device *dev = per_cpu(mce_device, cpu);
+ struct device *dev = this_cpu_read(mce_device);
struct amd_northbridge *nb = NULL;
struct threshold_bank *b = NULL;
const char *name = get_name(bank, NULL);
@@ -1324,7 +1351,7 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err)
goto out;
- per_cpu(threshold_banks, cpu)[bank] = b;
+ bp[bank] = b;
refcount_inc(&b->cpus);
err = __threshold_add_blocks(b);
@@ -1339,6 +1366,7 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
goto out;
}
+ /* Associate the bank with the per-CPU MCE device */
b->kobj = kobject_create_and_add(name, &dev->kobj);
if (!b->kobj) {
err = -EINVAL;
@@ -1346,6 +1374,7 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
}
if (is_shared_bank(bank)) {
+ b->shared = 1;
refcount_set(&b->cpus, 1);
/* nb is already initialized, see above */
@@ -1357,16 +1386,16 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
if (err)
- goto out_free;
-
- per_cpu(threshold_banks, cpu)[bank] = b;
+ goto out_kobj;
+ bp[bank] = b;
return 0;
- out_free:
+out_kobj:
+ kobject_put(b->kobj);
+out_free:
kfree(b);
-
- out:
+out:
return err;
}
@@ -1375,21 +1404,16 @@ static void threshold_block_release(struct kobject *kobj)
kfree(to_block(kobj));
}
-static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
+static void deallocate_threshold_blocks(struct threshold_bank *bank)
{
- struct threshold_block *pos = NULL;
- struct threshold_block *tmp = NULL;
- struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
+ struct threshold_block *pos, *tmp;
- if (!head)
- return;
-
- list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
+ list_for_each_entry_safe(pos, tmp, &bank->blocks->miscj, miscj) {
list_del(&pos->miscj);
kobject_put(&pos->kobj);
}
- kobject_put(&head->blocks->kobj);
+ kobject_put(&bank->blocks->kobj);
}
static void __threshold_remove_blocks(struct threshold_bank *b)
@@ -1403,122 +1427,102 @@ static void __threshold_remove_blocks(struct threshold_bank *b)
kobject_del(&pos->kobj);
}
-static void threshold_remove_bank(unsigned int cpu, int bank)
+static void threshold_remove_bank(struct threshold_bank *bank)
{
struct amd_northbridge *nb;
- struct threshold_bank *b;
- b = per_cpu(threshold_banks, cpu)[bank];
- if (!b)
- return;
+ if (!bank->blocks)
+ goto out_free;
- if (!b->blocks)
- goto free_out;
+ if (!bank->shared)
+ goto out_dealloc;
- if (is_shared_bank(bank)) {
- if (!refcount_dec_and_test(&b->cpus)) {
- __threshold_remove_blocks(b);
- per_cpu(threshold_banks, cpu)[bank] = NULL;
- return;
- } else {
- /*
- * the last CPU on this node using the shared bank is
- * going away, remove that bank now.
- */
- nb = node_to_amd_nb(amd_get_nb_id(cpu));
- nb->bank4 = NULL;
- }
+ if (!refcount_dec_and_test(&bank->cpus)) {
+ __threshold_remove_blocks(bank);
+ return;
+ } else {
+ /*
+ * The last CPU on this node using the shared bank is going
+ * away, remove that bank now.
+ */
+ nb = node_to_amd_nb(amd_get_nb_id(smp_processor_id()));
+ nb->bank4 = NULL;
}
- deallocate_threshold_block(cpu, bank);
+out_dealloc:
+ deallocate_threshold_blocks(bank);
-free_out:
- kobject_del(b->kobj);
- kobject_put(b->kobj);
- kfree(b);
- per_cpu(threshold_banks, cpu)[bank] = NULL;
+out_free:
+ kobject_put(bank->kobj);
+ kfree(bank);
}
int mce_threshold_remove_device(unsigned int cpu)
{
- unsigned int bank;
+ struct threshold_bank **bp = this_cpu_read(threshold_banks);
+ unsigned int bank, numbanks = this_cpu_read(mce_num_banks);
- for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) {
- if (!(per_cpu(bank_map, cpu) & (1 << bank)))
- continue;
- threshold_remove_bank(cpu, bank);
+ if (!bp)
+ return 0;
+
+ /*
+ * Clear the pointer before cleaning up, so that the interrupt won't
+ * touch anything of this.
+ */
+ this_cpu_write(threshold_banks, NULL);
+
+ for (bank = 0; bank < numbanks; bank++) {
+ if (bp[bank]) {
+ threshold_remove_bank(bp[bank]);
+ bp[bank] = NULL;
+ }
}
- kfree(per_cpu(threshold_banks, cpu));
- per_cpu(threshold_banks, cpu) = NULL;
+ kfree(bp);
return 0;
}
-/* create dir/files for all valid threshold banks */
+/**
+ * mce_threshold_create_device - Create the per-CPU MCE threshold device
+ * @cpu: The plugged in CPU
+ *
+ * Create directories and files for all valid threshold banks.
+ *
+ * This is invoked from the CPU hotplug callback which was installed in
+ * mcheck_init_device(). The invocation happens in context of the hotplug
+ * thread running on @cpu. The callback is invoked on all CPUs which are
+ * online when the callback is installed or during a real hotplug event.
+ */
int mce_threshold_create_device(unsigned int cpu)
{
- unsigned int bank;
+ unsigned int numbanks, bank;
struct threshold_bank **bp;
- int err = 0;
+ int err;
- bp = per_cpu(threshold_banks, cpu);
+ if (!mce_flags.amd_threshold)
+ return 0;
+
+ bp = this_cpu_read(threshold_banks);
if (bp)
return 0;
- bp = kcalloc(per_cpu(mce_num_banks, cpu), sizeof(struct threshold_bank *),
- GFP_KERNEL);
+ numbanks = this_cpu_read(mce_num_banks);
+ bp = kcalloc(numbanks, sizeof(*bp), GFP_KERNEL);
if (!bp)
return -ENOMEM;
- per_cpu(threshold_banks, cpu) = bp;
-
- for (bank = 0; bank < per_cpu(mce_num_banks, cpu); ++bank) {
- if (!(per_cpu(bank_map, cpu) & (1 << bank)))
+ for (bank = 0; bank < numbanks; ++bank) {
+ if (!(this_cpu_read(bank_map) & (1 << bank)))
continue;
- err = threshold_create_bank(cpu, bank);
+ err = threshold_create_bank(bp, cpu, bank);
if (err)
- goto err;
- }
- return err;
-err:
- mce_threshold_remove_device(cpu);
- return err;
-}
-
-static __init int threshold_init_device(void)
-{
- unsigned lcpu = 0;
-
- /* to hit CPUs online before the notifier is up */
- for_each_online_cpu(lcpu) {
- int err = mce_threshold_create_device(lcpu);
-
- if (err)
- return err;
+ goto out_err;
}
+ this_cpu_write(threshold_banks, bp);
if (thresholding_irq_en)
mce_threshold_vector = amd_threshold_interrupt;
-
return 0;
+out_err:
+ mce_threshold_remove_device(cpu);
+ return err;
}
-/*
- * there are 3 funcs which need to be _initcalled in a logic sequence:
- * 1. xen_late_init_mcelog
- * 2. mcheck_init_device
- * 3. threshold_init_device
- *
- * xen_late_init_mcelog must register xen_mce_chrdev_device before
- * native mce_chrdev_device registration if running under xen platform;
- *
- * mcheck_init_device should be inited before threshold_init_device to
- * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
- *
- * so we use following _initcalls
- * 1. device_initcall(xen_late_init_mcelog);
- * 2. device_initcall_sync(mcheck_init_device);
- * 3. late_initcall(threshold_init_device);
- *
- * when running under xen, the initcall order is 1,2,3;
- * on baremetal, we skip 1 and we do only 2 and 3.
- */
-late_initcall(threshold_init_device);
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index e9265e2f28c9..ce9120c4f740 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -130,7 +130,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
/* Do initial initialization of a struct mce */
-void mce_setup(struct mce *m)
+noinstr void mce_setup(struct mce *m)
{
memset(m, 0, sizeof(struct mce));
m->cpu = m->extcpu = smp_processor_id();
@@ -140,12 +140,12 @@ void mce_setup(struct mce *m)
m->cpuid = cpuid_eax(1);
m->socketid = cpu_data(m->extcpu).phys_proc_id;
m->apicid = cpu_data(m->extcpu).initial_apicid;
- rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
+ m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
- rdmsrl(MSR_PPIN, m->ppin);
+ m->ppin = __rdmsr(MSR_PPIN);
else if (this_cpu_has(X86_FEATURE_AMD_PPIN))
- rdmsrl(MSR_AMD_PPIN, m->ppin);
+ m->ppin = __rdmsr(MSR_AMD_PPIN);
m->microcode = boot_cpu_data.microcode;
}
@@ -160,29 +160,17 @@ void mce_log(struct mce *m)
}
EXPORT_SYMBOL_GPL(mce_log);
-/*
- * We run the default notifier if we have only the UC, the first and the
- * default notifier registered. I.e., the mandatory NUM_DEFAULT_NOTIFIERS
- * notifiers registered on the chain.
- */
-#define NUM_DEFAULT_NOTIFIERS 3
-static atomic_t num_notifiers;
-
void mce_register_decode_chain(struct notifier_block *nb)
{
if (WARN_ON(nb->priority > MCE_PRIO_MCELOG && nb->priority < MCE_PRIO_EDAC))
return;
- atomic_inc(&num_notifiers);
-
blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
}
EXPORT_SYMBOL_GPL(mce_register_decode_chain);
void mce_unregister_decode_chain(struct notifier_block *nb)
{
- atomic_dec(&num_notifiers);
-
blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
}
EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
@@ -265,6 +253,7 @@ static void __print_mce(struct mce *m)
}
pr_cont("\n");
+
/*
* Note this output is parsed by external tools and old fields
* should not be changed.
@@ -531,6 +520,14 @@ bool mce_is_memory_error(struct mce *m)
}
EXPORT_SYMBOL_GPL(mce_is_memory_error);
+static bool whole_page(struct mce *m)
+{
+ if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV))
+ return true;
+
+ return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT;
+}
+
bool mce_is_correctable(struct mce *m)
{
if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
@@ -546,22 +543,7 @@ bool mce_is_correctable(struct mce *m)
}
EXPORT_SYMBOL_GPL(mce_is_correctable);
-static bool cec_add_mce(struct mce *m)
-{
- if (!m)
- return false;
-
- /* We eat only correctable DRAM errors with usable addresses. */
- if (mce_is_memory_error(m) &&
- mce_is_correctable(m) &&
- mce_usable_address(m))
- if (!cec_add_elem(m->addr >> PAGE_SHIFT))
- return true;
-
- return false;
-}
-
-static int mce_first_notifier(struct notifier_block *nb, unsigned long val,
+static int mce_early_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct mce *m = (struct mce *)data;
@@ -569,9 +551,6 @@ static int mce_first_notifier(struct notifier_block *nb, unsigned long val,
if (!m)
return NOTIFY_DONE;
- if (cec_add_mce(m))
- return NOTIFY_STOP;
-
/* Emit the trace record: */
trace_mce_record(m);
@@ -582,9 +561,9 @@ static int mce_first_notifier(struct notifier_block *nb, unsigned long val,
return NOTIFY_DONE;
}
-static struct notifier_block first_nb = {
- .notifier_call = mce_first_notifier,
- .priority = MCE_PRIO_FIRST,
+static struct notifier_block early_nb = {
+ .notifier_call = mce_early_notifier,
+ .priority = MCE_PRIO_EARLY,
};
static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
@@ -601,8 +580,10 @@ static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
return NOTIFY_DONE;
pfn = mce->addr >> PAGE_SHIFT;
- if (!memory_failure(pfn, 0))
- set_mce_nospec(pfn);
+ if (!memory_failure(pfn, 0)) {
+ set_mce_nospec(pfn, whole_page(mce));
+ mce->kflags |= MCE_HANDLED_UC;
+ }
return NOTIFY_OK;
}
@@ -620,10 +601,8 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
if (!m)
return NOTIFY_DONE;
- if (atomic_read(&num_notifiers) > NUM_DEFAULT_NOTIFIERS)
- return NOTIFY_DONE;
-
- __print_mce(m);
+ if (mca_cfg.print_all || !m->kflags)
+ __print_mce(m);
return NOTIFY_DONE;
}
@@ -1100,13 +1079,15 @@ static void mce_clear_state(unsigned long *toclear)
* kdump kernel establishing a new #MC handler where a broadcasted MCE
* might not get handled properly.
*/
-static bool __mc_check_crashing_cpu(int cpu)
+static noinstr bool mce_check_crashing_cpu(void)
{
+ unsigned int cpu = smp_processor_id();
+
if (cpu_is_offline(cpu) ||
(crashing_cpu != -1 && crashing_cpu != cpu)) {
u64 mcgstatus;
- mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
+ mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS);
if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) {
if (mcgstatus & MCG_STATUS_LMCES)
@@ -1114,7 +1095,7 @@ static bool __mc_check_crashing_cpu(int cpu)
}
if (mcgstatus & MCG_STATUS_RIPV) {
- mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
+ __wrmsr(MSR_IA32_MCG_STATUS, 0, 0);
return true;
}
}
@@ -1200,11 +1181,12 @@ static void kill_me_maybe(struct callback_head *cb)
int flags = MF_ACTION_REQUIRED;
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
- if (!(p->mce_status & MCG_STATUS_RIPV))
+
+ if (!p->mce_ripv)
flags |= MF_MUST_KILL;
if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags)) {
- set_mce_nospec(p->mce_addr >> PAGE_SHIFT);
+ set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
return;
}
@@ -1230,12 +1212,11 @@ static void kill_me_maybe(struct callback_head *cb)
* backing the user stack, tracing that reads the user stack will cause
* potentially infinite recursion.
*/
-void noinstr do_machine_check(struct pt_regs *regs, long error_code)
+void noinstr do_machine_check(struct pt_regs *regs)
{
DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
DECLARE_BITMAP(toclear, MAX_NR_BANKS);
struct mca_config *cfg = &mca_cfg;
- int cpu = smp_processor_id();
struct mce m, *final;
char *msg = NULL;
int worst = 0;
@@ -1264,11 +1245,6 @@ void noinstr do_machine_check(struct pt_regs *regs, long error_code)
*/
int lmce = 1;
- if (__mc_check_crashing_cpu(cpu))
- return;
-
- nmi_enter();
-
this_cpu_inc(mce_exception_count);
mce_gather_info(&m, regs);
@@ -1356,7 +1332,7 @@ void noinstr do_machine_check(struct pt_regs *regs, long error_code)
sync_core();
if (worst != MCE_AR_SEVERITY && !kill_it)
- goto out_ist;
+ return;
/* Fault was in user mode and we need to take some action */
if ((m.cs & 3) == 3) {
@@ -1364,18 +1340,27 @@ void noinstr do_machine_check(struct pt_regs *regs, long error_code)
BUG_ON(!on_thread_stack() || !user_mode(regs));
current->mce_addr = m.addr;
- current->mce_status = m.mcgstatus;
+ current->mce_ripv = !!(m.mcgstatus & MCG_STATUS_RIPV);
+ current->mce_whole_page = whole_page(&m);
current->mce_kill_me.func = kill_me_maybe;
if (kill_it)
current->mce_kill_me.func = kill_me_now;
task_work_add(current, &current->mce_kill_me, true);
} else {
- if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0))
- mce_panic("Failed kernel mode recovery", &m, msg);
+ /*
+ * Handle an MCE which has happened in kernel space but from
+ * which the kernel can recover: ex_has_fault_handler() has
+ * already verified that the rIP at which the error happened is
+ * a rIP from which the kernel can recover (by jumping to
+ * recovery code specified in _ASM_EXTABLE_FAULT()) and the
+ * corresponding exception handler which would do that is the
+ * proper one.
+ */
+ if (m.kflags & MCE_IN_KERNEL_RECOV) {
+ if (!fixup_exception(regs, X86_TRAP_MC, 0, 0))
+ mce_panic("Failed kernel mode recovery", &m, msg);
+ }
}
-
-out_ist:
- nmi_exit();
}
EXPORT_SYMBOL_GPL(do_machine_check);
@@ -1765,6 +1750,7 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
+ mce_flags.amd_threshold = 1;
if (mce_flags.smca) {
msr_ops.ctl = smca_ctl_reg;
@@ -1902,21 +1888,84 @@ bool filter_mce(struct mce *m)
}
/* Handle unconfigured int18 (should never happen) */
-static void unexpected_machine_check(struct pt_regs *regs, long error_code)
+static noinstr void unexpected_machine_check(struct pt_regs *regs)
{
+ instrumentation_begin();
pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
smp_processor_id());
+ instrumentation_end();
}
/* Call the installed machine check handler for this CPU setup. */
-void (*machine_check_vector)(struct pt_regs *, long error_code) =
- unexpected_machine_check;
+void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
-dotraplinkage notrace void do_mce(struct pt_regs *regs, long error_code)
+static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
{
- machine_check_vector(regs, error_code);
+ /*
+ * Only required when from kernel mode. See
+ * mce_check_crashing_cpu() for details.
+ */
+ if (machine_check_vector == do_machine_check &&
+ mce_check_crashing_cpu())
+ return;
+
+ nmi_enter();
+ /*
+ * The call targets are marked noinstr, but objtool can't figure
+ * that out because it's an indirect call. Annotate it.
+ */
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ machine_check_vector(regs);
+ if (regs->flags & X86_EFLAGS_IF)
+ trace_hardirqs_on_prepare();
+ instrumentation_end();
+ nmi_exit();
+}
+
+static __always_inline void exc_machine_check_user(struct pt_regs *regs)
+{
+ idtentry_enter_user(regs);
+ instrumentation_begin();
+ machine_check_vector(regs);
+ instrumentation_end();
+ idtentry_exit_user(regs);
}
-NOKPROBE_SYMBOL(do_mce);
+
+#ifdef CONFIG_X86_64
+/* MCE hit kernel mode */
+DEFINE_IDTENTRY_MCE(exc_machine_check)
+{
+ unsigned long dr7;
+
+ dr7 = local_db_save();
+ exc_machine_check_kernel(regs);
+ local_db_restore(dr7);
+}
+
+/* The user mode variant. */
+DEFINE_IDTENTRY_MCE_USER(exc_machine_check)
+{
+ unsigned long dr7;
+
+ dr7 = local_db_save();
+ exc_machine_check_user(regs);
+ local_db_restore(dr7);
+}
+#else
+/* 32bit unified entry point */
+DEFINE_IDTENTRY_MCE(exc_machine_check)
+{
+ unsigned long dr7;
+
+ dr7 = local_db_save();
+ if (user_mode(regs))
+ exc_machine_check_user(regs);
+ else
+ exc_machine_check_kernel(regs);
+ local_db_restore(dr7);
+}
+#endif
/*
* Called for each booted CPU to set up machine checks.
@@ -1999,6 +2048,7 @@ void mce_disable_bank(int bank)
* mce=no_cmci Disables CMCI
* mce=no_lmce Disables LMCE
* mce=dont_log_ce Clears corrected events silently, no log created for CEs.
+ * mce=print_all Print all machine check logs to console
* mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
* mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
* monarchtimeout is how long to wait for other CPUs on machine
@@ -2027,6 +2077,8 @@ static int __init mcheck_enable(char *str)
cfg->lmce_disabled = 1;
else if (!strcmp(str, "dont_log_ce"))
cfg->dont_log_ce = true;
+ else if (!strcmp(str, "print_all"))
+ cfg->print_all = true;
else if (!strcmp(str, "ignore_ce"))
cfg->ignore_ce = true;
else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
@@ -2049,7 +2101,7 @@ __setup("mce", mcheck_enable);
int __init mcheck_init(void)
{
mcheck_intel_therm_init();
- mce_register_decode_chain(&first_nb);
+ mce_register_decode_chain(&early_nb);
mce_register_decode_chain(&mce_uc_nb);
mce_register_decode_chain(&mce_default_nb);
mcheck_vendor_init_severity();
@@ -2293,6 +2345,7 @@ static ssize_t store_int_with_restart(struct device *s,
static DEVICE_INT_ATTR(tolerant, 0644, mca_cfg.tolerant);
static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
+static DEVICE_BOOL_ATTR(print_all, 0644, mca_cfg.print_all);
static struct dev_ext_attribute dev_attr_check_interval = {
__ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
@@ -2317,6 +2370,7 @@ static struct device_attribute *mce_device_attrs[] = {
#endif
&dev_attr_monarch_timeout.attr,
&dev_attr_dont_log_ce.attr,
+ &dev_attr_print_all.attr,
&dev_attr_ignore_ce.attr,
&dev_attr_cmci_disabled.attr,
NULL
@@ -2489,6 +2543,13 @@ static __init void mce_init_banks(void)
}
}
+/*
+ * When running on XEN, this initcall is ordered against the XEN mcelog
+ * initcall:
+ *
+ * device_initcall(xen_late_init_mcelog);
+ * device_initcall_sync(mcheck_init_device);
+ */
static __init int mcheck_init_device(void)
{
int err;
@@ -2520,6 +2581,10 @@ static __init int mcheck_init_device(void)
if (err)
goto err_out_mem;
+ /*
+ * Invokes mce_cpu_online() on all CPUs which are online when
+ * the state is installed.
+ */
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
mce_cpu_online, mce_cpu_pre_down);
if (err < 0)
@@ -2609,7 +2674,6 @@ static int __init mcheck_late_init(void)
static_branch_inc(&mcsafe_key);
mcheck_debugfs_init();
- cec_init();
/*
* Flush out everything that has been logged during early boot, now that
diff --git a/arch/x86/kernel/cpu/mce/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c
index d089567a9ce8..43c466020ed5 100644
--- a/arch/x86/kernel/cpu/mce/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c
@@ -39,6 +39,9 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val,
struct mce *mce = (struct mce *)data;
unsigned int entry;
+ if (mce->kflags & MCE_HANDLED_CEC)
+ return NOTIFY_DONE;
+
mutex_lock(&mce_chrdev_read_mutex);
entry = mcelog->next;
@@ -56,6 +59,7 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val,
memcpy(mcelog->entry + entry, mce, sizeof(struct mce));
mcelog->entry[entry].finished = 1;
+ mcelog->entry[entry].kflags = 0;
/* wake processes polling /dev/mcelog */
wake_up_interruptible(&mce_chrdev_wait);
@@ -63,6 +67,7 @@ static int dev_mce_log(struct notifier_block *nb, unsigned long val,
unlock:
mutex_unlock(&mce_chrdev_read_mutex);
+ mce->kflags |= MCE_HANDLED_MCELOG;
return NOTIFY_OK;
}
@@ -324,6 +329,7 @@ static const struct file_operations mce_chrdev_ops = {
.write = mce_chrdev_write,
.poll = mce_chrdev_poll,
.unlocked_ioctl = mce_chrdev_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.llseek = no_llseek,
};
@@ -343,7 +349,7 @@ static __init int dev_mcelog_init_device(void)
if (!mcelog)
return -ENOMEM;
- strncpy(mcelog->signature, MCE_LOG_SIGNATURE, sizeof(mcelog->signature));
+ memcpy(mcelog->signature, MCE_LOG_SIGNATURE, sizeof(mcelog->signature));
mcelog->len = mce_log_len;
mcelog->recordlen = sizeof(struct mce);
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 3413b41b8d55..0593b192eb8f 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -146,9 +146,9 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs)
regs.cs = m->cs;
pregs = &regs;
}
- /* in mcheck exeception handler, irq will be disabled */
+ /* do_machine_check() expects interrupts disabled -- at least */
local_irq_save(flags);
- do_machine_check(pregs, 0);
+ do_machine_check(pregs);
local_irq_restore(flags);
m->finished = 0;
}
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index 3b008172ad73..6473070b5da4 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -9,7 +9,7 @@
#include <asm/mce.h>
/* Pointer to the installed machine check handler for this CPU setup. */
-extern void (*machine_check_vector)(struct pt_regs *, long error_code);
+extern void (*machine_check_vector)(struct pt_regs *);
enum severity_level {
MCE_NO_SEVERITY,
@@ -119,6 +119,7 @@ struct mca_config {
bool dont_log_ce;
bool cmci_disabled;
bool ignore_ce;
+ bool print_all;
__u64 lmce_disabled : 1,
disabled : 1,
@@ -148,7 +149,7 @@ struct mce_vendor_flags {
* Recovery. It indicates support for data poisoning in HW and deferred
* error interrupts.
*/
- succor : 1,
+ succor : 1,
/*
* (AMD) SMCA: This bit indicates support for Scalable MCA which expands
@@ -156,9 +157,12 @@ struct mce_vendor_flags {
* banks. Also, to accommodate the new banks and registers, the MCA
* register space is moved to a new MSR range.
*/
- smca : 1,
+ smca : 1,
- __reserved_0 : 61;
+ /* AMD-style error thresholding banks present. */
+ amd_threshold : 1,
+
+ __reserved_0 : 60;
};
extern struct mce_vendor_flags mce_flags;
diff --git a/arch/x86/kernel/cpu/mce/p5.c b/arch/x86/kernel/cpu/mce/p5.c
index 5ee94aa1b766..19e90cae8e97 100644
--- a/arch/x86/kernel/cpu/mce/p5.c
+++ b/arch/x86/kernel/cpu/mce/p5.c
@@ -21,12 +21,11 @@
int mce_p5_enabled __read_mostly;
/* Machine check handler for Pentium class Intel CPUs: */
-static void pentium_machine_check(struct pt_regs *regs, long error_code)
+static noinstr void pentium_machine_check(struct pt_regs *regs)
{
u32 loaddr, hi, lotype;
- nmi_enter();
-
+ instrumentation_begin();
rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi);
@@ -39,8 +38,7 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code)
}
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
- nmi_exit();
+ instrumentation_end();
}
/* Set up machine check reporting for processors with Intel style MCE: */
diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
index 87bcdc6dc2f0..e1da619add19 100644
--- a/arch/x86/kernel/cpu/mce/severity.c
+++ b/arch/x86/kernel/cpu/mce/severity.c
@@ -213,8 +213,12 @@ static int error_context(struct mce *m)
{
if ((m->cs & 3) == 3)
return IN_USER;
- if (mc_recoverable(m->mcgstatus) && ex_has_fault_handler(m->ip))
+
+ if (mc_recoverable(m->mcgstatus) && ex_has_fault_handler(m->ip)) {
+ m->kflags |= MCE_IN_KERNEL_RECOV;
return IN_KERNEL_RECOV;
+ }
+
return IN_KERNEL;
}
diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c
index f36dc0742085..a7cd2d203ced 100644
--- a/arch/x86/kernel/cpu/mce/therm_throt.c
+++ b/arch/x86/kernel/cpu/mce/therm_throt.c
@@ -614,14 +614,13 @@ static void unexpected_thermal_interrupt(void)
static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
-asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
{
- entering_irq();
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
inc_irq_stat(irq_thermal_count);
smp_thermal_vector();
trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
- exiting_ack_irq();
+ ack_APIC_irq();
}
/* Thermal monitoring depends on APIC, ACPI and clock modulation */
diff --git a/arch/x86/kernel/cpu/mce/threshold.c b/arch/x86/kernel/cpu/mce/threshold.c
index 28812cc15300..6a059a035021 100644
--- a/arch/x86/kernel/cpu/mce/threshold.c
+++ b/arch/x86/kernel/cpu/mce/threshold.c
@@ -21,12 +21,11 @@ static void default_threshold_interrupt(void)
void (*mce_threshold_vector)(void) = default_threshold_interrupt;
-asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_threshold)
{
- entering_irq();
trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
inc_irq_stat(irq_threshold_count);
mce_threshold_vector();
trace_threshold_apic_exit(THRESHOLD_APIC_VECTOR);
- exiting_ack_irq();
+ ack_APIC_irq();
}
diff --git a/arch/x86/kernel/cpu/mce/winchip.c b/arch/x86/kernel/cpu/mce/winchip.c
index b3938c195365..9c9f0abd2d7f 100644
--- a/arch/x86/kernel/cpu/mce/winchip.c
+++ b/arch/x86/kernel/cpu/mce/winchip.c
@@ -17,14 +17,12 @@
#include "internal.h"
/* Machine check handler for WinChip C6: */
-static void winchip_machine_check(struct pt_regs *regs, long error_code)
+static noinstr void winchip_machine_check(struct pt_regs *regs)
{
- nmi_enter();
-
+ instrumentation_begin();
pr_emerg("CPU0: Machine Check Exception.\n");
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
- nmi_exit();
+ instrumentation_end();
}
/* Set up machine check reporting on the Winchip C6 series */
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index ebf34c7bc8bc..af94f05a5c66 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -23,6 +23,7 @@
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
#include <asm/desc.h>
+#include <asm/idtentry.h>
#include <asm/irq_regs.h>
#include <asm/i8259.h>
#include <asm/apic.h>
@@ -40,11 +41,10 @@ static void (*hv_stimer0_handler)(void);
static void (*hv_kexec_handler)(void);
static void (*hv_crash_handler)(struct pt_regs *regs);
-__visible void __irq_entry hyperv_vector_handler(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- entering_irq();
inc_irq_stat(irq_hv_callback_count);
if (vmbus_handler)
vmbus_handler();
@@ -52,7 +52,6 @@ __visible void __irq_entry hyperv_vector_handler(struct pt_regs *regs)
if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
ack_APIC_irq();
- exiting_irq();
set_irq_regs(old_regs);
}
@@ -73,19 +72,16 @@ EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq);
* Routines to do per-architecture handling of stimer0
* interrupts when in Direct Mode
*/
-
-__visible void __irq_entry hv_stimer0_vector_handler(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- entering_irq();
inc_irq_stat(hyperv_stimer0_count);
if (hv_stimer0_handler)
hv_stimer0_handler();
add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0);
ack_APIC_irq();
- exiting_irq();
set_irq_regs(old_regs);
}
@@ -331,17 +327,19 @@ static void __init ms_hyperv_init_platform(void)
x86_platform.apic_post_init = hyperv_init;
hyperv_setup_mmu_ops();
/* Setup the IDT for hypervisor callback */
- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_hyperv_callback);
/* Setup the IDT for reenlightenment notifications */
- if (ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT)
+ if (ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT) {
alloc_intr_gate(HYPERV_REENLIGHTENMENT_VECTOR,
- hyperv_reenlightenment_vector);
+ asm_sysvec_hyperv_reenlightenment);
+ }
/* Setup the IDT for stimer0 */
- if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE)
+ if (ms_hyperv.misc_features & HV_STIMER_DIRECT_MODE_AVAILABLE) {
alloc_intr_gate(HYPERV_STIMER0_VECTOR,
- hv_stimer0_callback_vector);
+ asm_sysvec_hyperv_stimer0);
+ }
# ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 4bd28b388a1a..0daf2f1cf7a8 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -1326,9 +1326,9 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
* pseudo-locked region will still be here on return.
*
* The mutex has to be released temporarily to avoid a potential
- * deadlock with the mm->mmap_sem semaphore which is obtained in
- * the device_create() and debugfs_create_dir() callpath below
- * as well as before the mmap() callback is called.
+ * deadlock with the mm->mmap_lock which is obtained in the
+ * device_create() and debugfs_create_dir() callpath below as well as
+ * before the mmap() callback is called.
*/
mutex_unlock(&rdtgroup_mutex);
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index d7cb5ab0d1f0..23b4b61319d3 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -3199,10 +3199,10 @@ int __init rdtgroup_init(void)
* during the debugfs directory creation also &sb->s_type->i_mutex_key
* (the lockdep class of inode->i_rwsem). Other filesystem
* interactions (eg. SyS_getdents) have the lock ordering:
- * &sb->s_type->i_mutex_key --> &mm->mmap_sem
- * During mmap(), called with &mm->mmap_sem, the rdtgroup_mutex
+ * &sb->s_type->i_mutex_key --> &mm->mmap_lock
+ * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex
* is taken, thus creating dependency:
- * &mm->mmap_sem --> rdtgroup_mutex for the latter that can cause
+ * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause
* issues considering the other two lock dependencies.
* By creating the debugfs directory here we avoid a dependency
* that may cause deadlock (even though file operations cannot
diff --git a/arch/x86/kernel/crash_core_32.c b/arch/x86/kernel/crash_core_32.c
index c0159a7bca6d..8a89c109e20a 100644
--- a/arch/x86/kernel/crash_core_32.c
+++ b/arch/x86/kernel/crash_core_32.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/crash_core.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
void arch_crash_save_vmcoreinfo(void)
diff --git a/arch/x86/kernel/crash_core_64.c b/arch/x86/kernel/crash_core_64.c
index 845a57eb4eb7..7d255f882afe 100644
--- a/arch/x86/kernel/crash_core_64.c
+++ b/arch/x86/kernel/crash_core_64.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/crash_core.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
void arch_crash_save_vmcoreinfo(void)
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
index 3793646f0fb5..759d392cbe9f 100644
--- a/arch/x86/kernel/doublefault_32.c
+++ b/arch/x86/kernel/doublefault_32.c
@@ -6,12 +6,10 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/desc.h>
#include <asm/traps.h>
-extern void double_fault(void);
#define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
#define TSS(x) this_cpu_read(cpu_tss_rw.x86_tss.x)
@@ -22,7 +20,7 @@ static void set_df_gdt_entry(unsigned int cpu);
* Called by double_fault with CR0.TS and EFLAGS.NT cleared. The CPU thinks
* we're running the doublefault task. Cannot return.
*/
-asmlinkage notrace void __noreturn doublefault_shim(void)
+asmlinkage noinstr void __noreturn doublefault_shim(void)
{
unsigned long cr2;
struct pt_regs regs;
@@ -41,7 +39,7 @@ asmlinkage notrace void __noreturn doublefault_shim(void)
* Fill in pt_regs. A downside of doing this in C is that the unwinder
* won't see it (no ENCODE_FRAME_POINTER), so a nested stack dump
* won't successfully unwind to the source of the double fault.
- * The main dump from do_double_fault() is fine, though, since it
+ * The main dump from exc_double_fault() is fine, though, since it
* uses these regs directly.
*
* If anyone ever cares, this could be moved to asm.
@@ -71,7 +69,7 @@ asmlinkage notrace void __noreturn doublefault_shim(void)
regs.cx = TSS(cx);
regs.bx = TSS(bx);
- do_double_fault(&regs, 0, cr2);
+ exc_double_fault(&regs, 0, cr2);
/*
* x86_32 does not save the original CR3 anywhere on a task switch.
@@ -85,7 +83,6 @@ asmlinkage notrace void __noreturn doublefault_shim(void)
*/
panic("cannot return from double fault\n");
}
-NOKPROBE_SYMBOL(doublefault_shim);
DEFINE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack) = {
.tss = {
@@ -96,7 +93,7 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack) = {
.ldt = 0,
.io_bitmap_base = IO_BITMAP_OFFSET_INVALID,
- .ip = (unsigned long) double_fault,
+ .ip = (unsigned long) asm_exc_double_fault,
.flags = X86_EFLAGS_FIXED,
.es = __USER_DS,
.cs = __KERNEL_CS,
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index ae64ec7f752f..456511b2284e 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -65,7 +65,7 @@ bool in_entry_stack(unsigned long *stack, struct stack_info *info)
}
static void printk_stack_address(unsigned long address, int reliable,
- char *log_lvl)
+ const char *log_lvl)
{
touch_nmi_watchdog();
printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
@@ -160,7 +160,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
}
void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *stack, char *log_lvl)
+ unsigned long *stack, const char *log_lvl)
{
struct unwind_state state;
struct stack_info stack_info = {0};
@@ -279,7 +279,8 @@ next:
}
}
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp,
+ const char *loglvl)
{
task = task ? : current;
@@ -290,7 +291,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
if (!sp && task == current)
sp = get_stack_pointer(current, NULL);
- show_trace_log_lvl(task, NULL, sp, KERN_DEFAULT);
+ show_trace_log_lvl(task, NULL, sp, loglvl);
}
void show_stack_regs(struct pt_regs *regs)
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 460ae7f66818..4a94d38cd141 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -22,15 +22,13 @@
static const char * const exception_stack_names[] = {
[ ESTACK_DF ] = "#DF",
[ ESTACK_NMI ] = "NMI",
- [ ESTACK_DB2 ] = "#DB2",
- [ ESTACK_DB1 ] = "#DB1",
[ ESTACK_DB ] = "#DB",
[ ESTACK_MCE ] = "#MC",
};
const char *stack_type_name(enum stack_type type)
{
- BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
+ BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
if (type == STACK_TYPE_IRQ)
return "IRQ";
@@ -79,7 +77,6 @@ static const
struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
EPAGERANGE(DF),
EPAGERANGE(NMI),
- EPAGERANGE(DB1),
EPAGERANGE(DB),
EPAGERANGE(MCE),
};
@@ -91,7 +88,7 @@ static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
struct pt_regs *regs;
unsigned int k;
- BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
+ BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
/*
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 4d13c57f370a..983cd53ed4c9 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -991,7 +991,15 @@ void __init e820__reserve_setup_data(void)
while (pa_data) {
data = early_memremap(pa_data, sizeof(*data));
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
- e820__range_update_kexec(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
+
+ /*
+ * SETUP_EFI is supplied by kexec and does not need to be
+ * reserved.
+ */
+ if (data->type != SETUP_EFI)
+ e820__range_update_kexec(pa_data,
+ sizeof(*data) + data->len,
+ E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
if (data->type == SETUP_INDIRECT &&
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 93fbdff2974f..d3c531d3b244 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -8,6 +8,7 @@
#include <linux/pci_regs.h>
#include <linux/pci_ids.h>
#include <linux/errno.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/fcntl.h>
@@ -15,7 +16,6 @@
#include <xen/hvc-console.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
#include <linux/usb/ehci_def.h>
#include <linux/usb/xhci-dbgp.h>
#include <asm/pci_x86.h>
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 12e7d4406c32..4fe7af58cfe1 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -29,7 +29,7 @@
#include <linux/percpu.h>
#include <linux/gfp.h>
#include <linux/random.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/setup.h>
#include <asm/espfix.h>
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index aa5d28aeb31e..083a3da7bb73 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -12,7 +12,7 @@
#include <asm/frame.h>
.code64
- .section .entry.text, "ax"
+ .section .text, "ax"
#ifdef CONFIG_FRAME_POINTER
/* Save parent and function stack frames (rip and rbp) */
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 206a4b6144c2..cbb71c1b574f 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -20,13 +20,13 @@
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/mem_encrypt.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/proto.h>
#include <asm/smp.h>
#include <asm/setup.h>
#include <asm/desc.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/kdebug.h>
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 4bbc770af632..16da4ac01597 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -13,8 +13,8 @@
#include <linux/linkage.h>
#include <linux/threads.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/segment.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/msr.h>
#include <asm/cache.h>
@@ -29,15 +29,16 @@
#ifdef CONFIG_PARAVIRT_XXL
#include <asm/asm-offsets.h>
#include <asm/paravirt.h>
+#define GET_CR2_INTO(reg) GET_CR2_INTO_AX ; _ASM_MOV %_ASM_AX, reg
#else
#define INTERRUPT_RETURN iretq
+#define GET_CR2_INTO(reg) _ASM_MOV %cr2, reg
#endif
-/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
+/*
+ * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
* because we need identity-mapped pages.
- *
*/
-
#define l4_index(x) (((x) >> 39) & 511)
#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 4d8d53ed02c9..8cdf29ffd95f 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -32,6 +32,8 @@
#include <asm/processor.h>
#include <asm/debugreg.h>
#include <asm/user.h>
+#include <asm/desc.h>
+#include <asm/tlbflush.h>
/* Per cpu debug control register value */
DEFINE_PER_CPU(unsigned long, cpu_dr7);
@@ -97,6 +99,8 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
unsigned long *dr7;
int i;
+ lockdep_assert_irqs_disabled();
+
for (i = 0; i < HBP_NUM; i++) {
struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
@@ -115,6 +119,12 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
dr7 = this_cpu_ptr(&cpu_dr7);
*dr7 |= encode_dr7(i, info->len, info->type);
+ /*
+ * Ensure we first write cpu_dr7 before we set the DR7 register.
+ * This ensures an NMI never see cpu_dr7 0 when DR7 is not.
+ */
+ barrier();
+
set_debugreg(*dr7, 7);
if (info->mask)
set_dr_addr_mask(info->mask, i);
@@ -134,9 +144,11 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- unsigned long *dr7;
+ unsigned long dr7;
int i;
+ lockdep_assert_irqs_disabled();
+
for (i = 0; i < HBP_NUM; i++) {
struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
@@ -149,12 +161,20 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
return;
- dr7 = this_cpu_ptr(&cpu_dr7);
- *dr7 &= ~__encode_dr7(i, info->len, info->type);
+ dr7 = this_cpu_read(cpu_dr7);
+ dr7 &= ~__encode_dr7(i, info->len, info->type);
- set_debugreg(*dr7, 7);
+ set_debugreg(dr7, 7);
if (info->mask)
set_dr_addr_mask(0, i);
+
+ /*
+ * Ensure the write to cpu_dr7 is after we've set the DR7 register.
+ * This ensures an NMI never see cpu_dr7 0 when DR7 is not.
+ */
+ barrier();
+
+ this_cpu_write(cpu_dr7, dr7);
}
static int arch_bp_generic_len(int x86_len)
@@ -227,10 +247,76 @@ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
}
+/*
+ * Checks whether the range [addr, end], overlaps the area [base, base + size).
+ */
+static inline bool within_area(unsigned long addr, unsigned long end,
+ unsigned long base, unsigned long size)
+{
+ return end >= base && addr < (base + size);
+}
+
+/*
+ * Checks whether the range from addr to end, inclusive, overlaps the fixed
+ * mapped CPU entry area range or other ranges used for CPU entry.
+ */
+static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
+{
+ int cpu;
+
+ /* CPU entry erea is always used for CPU entry */
+ if (within_area(addr, end, CPU_ENTRY_AREA_BASE,
+ CPU_ENTRY_AREA_TOTAL_SIZE))
+ return true;
+
+ for_each_possible_cpu(cpu) {
+ /* The original rw GDT is being used after load_direct_gdt() */
+ if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu),
+ GDT_SIZE))
+ return true;
+
+ /*
+ * cpu_tss_rw is not directly referenced by hardware, but
+ * cpu_tss_rw is also used in CPU entry code,
+ */
+ if (within_area(addr, end,
+ (unsigned long)&per_cpu(cpu_tss_rw, cpu),
+ sizeof(struct tss_struct)))
+ return true;
+
+ /*
+ * cpu_tlbstate.user_pcid_flush_mask is used for CPU entry.
+ * If a data breakpoint on it, it will cause an unwanted #DB.
+ * Protect the full cpu_tlbstate structure to be sure.
+ */
+ if (within_area(addr, end,
+ (unsigned long)&per_cpu(cpu_tlbstate, cpu),
+ sizeof(struct tlb_state)))
+ return true;
+ }
+
+ return false;
+}
+
static int arch_build_bp_info(struct perf_event *bp,
const struct perf_event_attr *attr,
struct arch_hw_breakpoint *hw)
{
+ unsigned long bp_end;
+
+ bp_end = attr->bp_addr + attr->bp_len - 1;
+ if (bp_end < attr->bp_addr)
+ return -EINVAL;
+
+ /*
+ * Prevent any breakpoint of any type that overlaps the CPU
+ * entry area and data. This protects the IST stacks and also
+ * reduces the chance that we ever find out what happens if
+ * there's a data breakpoint on the GDT, IDT, or TSS.
+ */
+ if (within_cpu_entry(attr->bp_addr, bp_end))
+ return -EINVAL;
+
hw->address = attr->bp_addr;
hw->mask = 0;
@@ -439,7 +525,7 @@ static int hw_breakpoint_handler(struct die_args *args)
{
int i, cpu, rc = NOTIFY_STOP;
struct perf_event *bp;
- unsigned long dr7, dr6;
+ unsigned long dr6;
unsigned long *dr6_p;
/* The DR6 value is pointed by args->err */
@@ -454,9 +540,6 @@ static int hw_breakpoint_handler(struct die_args *args)
if ((dr6 & DR_TRAP_BITS) == 0)
return NOTIFY_DONE;
- get_debugreg(dr7, 7);
- /* Disable breakpoints during exception handling */
- set_debugreg(0UL, 7);
/*
* Assert that local interrupts are disabled
* Reset the DRn bits in the virtualized register value.
@@ -513,7 +596,6 @@ static int hw_breakpoint_handler(struct die_args *args)
(dr6 & (~DR_TRAP_BITS)))
rc = NOTIFY_DONE;
- set_debugreg(dr7, 7);
put_cpu();
return rc;
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 519649ddf100..f3c76252247d 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -15,11 +15,11 @@
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/pgtable.h>
#include <linux/atomic.h>
#include <asm/timer.h>
#include <asm/hw_irq.h>
-#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/apic.h>
#include <asm/i8259.h>
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 87ef69a72c52..0db21206f2f3 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -4,6 +4,8 @@
*/
#include <linux/interrupt.h>
+#include <asm/cpu_entry_area.h>
+#include <asm/set_memory.h>
#include <asm/traps.h>
#include <asm/proto.h>
#include <asm/desc.h>
@@ -51,15 +53,23 @@ struct idt_data {
#define TSKG(_vector, _gdt) \
G(_vector, NULL, DEFAULT_STACK, GATE_TASK, DPL0, _gdt << 3)
+#define IDT_TABLE_SIZE (IDT_ENTRIES * sizeof(gate_desc))
+
+static bool idt_setup_done __initdata;
+
/*
* Early traps running on the DEFAULT_STACK because the other interrupt
* stacks work only after cpu_init().
*/
static const __initconst struct idt_data early_idts[] = {
- INTG(X86_TRAP_DB, debug),
- SYSG(X86_TRAP_BP, int3),
+ INTG(X86_TRAP_DB, asm_exc_debug),
+ SYSG(X86_TRAP_BP, asm_exc_int3),
+
#ifdef CONFIG_X86_32
- INTG(X86_TRAP_PF, page_fault),
+ /*
+ * Not possible on 64-bit. See idt_setup_early_pf() for details.
+ */
+ INTG(X86_TRAP_PF, asm_exc_page_fault),
#endif
};
@@ -70,33 +80,33 @@ static const __initconst struct idt_data early_idts[] = {
* set up TSS.
*/
static const __initconst struct idt_data def_idts[] = {
- INTG(X86_TRAP_DE, divide_error),
- INTG(X86_TRAP_NMI, nmi),
- INTG(X86_TRAP_BR, bounds),
- INTG(X86_TRAP_UD, invalid_op),
- INTG(X86_TRAP_NM, device_not_available),
- INTG(X86_TRAP_OLD_MF, coprocessor_segment_overrun),
- INTG(X86_TRAP_TS, invalid_TSS),
- INTG(X86_TRAP_NP, segment_not_present),
- INTG(X86_TRAP_SS, stack_segment),
- INTG(X86_TRAP_GP, general_protection),
- INTG(X86_TRAP_SPURIOUS, spurious_interrupt_bug),
- INTG(X86_TRAP_MF, coprocessor_error),
- INTG(X86_TRAP_AC, alignment_check),
- INTG(X86_TRAP_XF, simd_coprocessor_error),
+ INTG(X86_TRAP_DE, asm_exc_divide_error),
+ INTG(X86_TRAP_NMI, asm_exc_nmi),
+ INTG(X86_TRAP_BR, asm_exc_bounds),
+ INTG(X86_TRAP_UD, asm_exc_invalid_op),
+ INTG(X86_TRAP_NM, asm_exc_device_not_available),
+ INTG(X86_TRAP_OLD_MF, asm_exc_coproc_segment_overrun),
+ INTG(X86_TRAP_TS, asm_exc_invalid_tss),
+ INTG(X86_TRAP_NP, asm_exc_segment_not_present),
+ INTG(X86_TRAP_SS, asm_exc_stack_segment),
+ INTG(X86_TRAP_GP, asm_exc_general_protection),
+ INTG(X86_TRAP_SPURIOUS, asm_exc_spurious_interrupt_bug),
+ INTG(X86_TRAP_MF, asm_exc_coprocessor_error),
+ INTG(X86_TRAP_AC, asm_exc_alignment_check),
+ INTG(X86_TRAP_XF, asm_exc_simd_coprocessor_error),
#ifdef CONFIG_X86_32
TSKG(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS),
#else
- INTG(X86_TRAP_DF, double_fault),
+ INTG(X86_TRAP_DF, asm_exc_double_fault),
#endif
- INTG(X86_TRAP_DB, debug),
+ INTG(X86_TRAP_DB, asm_exc_debug),
#ifdef CONFIG_X86_MCE
- INTG(X86_TRAP_MC, &machine_check),
+ INTG(X86_TRAP_MC, asm_exc_machine_check),
#endif
- SYSG(X86_TRAP_OF, overflow),
+ SYSG(X86_TRAP_OF, asm_exc_overflow),
#if defined(CONFIG_IA32_EMULATION)
SYSG(IA32_SYSCALL_VECTOR, entry_INT80_compat),
#elif defined(CONFIG_X86_32)
@@ -109,95 +119,63 @@ static const __initconst struct idt_data def_idts[] = {
*/
static const __initconst struct idt_data apic_idts[] = {
#ifdef CONFIG_SMP
- INTG(RESCHEDULE_VECTOR, reschedule_interrupt),
- INTG(CALL_FUNCTION_VECTOR, call_function_interrupt),
- INTG(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt),
- INTG(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt),
- INTG(REBOOT_VECTOR, reboot_interrupt),
+ INTG(RESCHEDULE_VECTOR, asm_sysvec_reschedule_ipi),
+ INTG(CALL_FUNCTION_VECTOR, asm_sysvec_call_function),
+ INTG(CALL_FUNCTION_SINGLE_VECTOR, asm_sysvec_call_function_single),
+ INTG(IRQ_MOVE_CLEANUP_VECTOR, asm_sysvec_irq_move_cleanup),
+ INTG(REBOOT_VECTOR, asm_sysvec_reboot),
#endif
#ifdef CONFIG_X86_THERMAL_VECTOR
- INTG(THERMAL_APIC_VECTOR, thermal_interrupt),
+ INTG(THERMAL_APIC_VECTOR, asm_sysvec_thermal),
#endif
#ifdef CONFIG_X86_MCE_THRESHOLD
- INTG(THRESHOLD_APIC_VECTOR, threshold_interrupt),
+ INTG(THRESHOLD_APIC_VECTOR, asm_sysvec_threshold),
#endif
#ifdef CONFIG_X86_MCE_AMD
- INTG(DEFERRED_ERROR_VECTOR, deferred_error_interrupt),
+ INTG(DEFERRED_ERROR_VECTOR, asm_sysvec_deferred_error),
#endif
#ifdef CONFIG_X86_LOCAL_APIC
- INTG(LOCAL_TIMER_VECTOR, apic_timer_interrupt),
- INTG(X86_PLATFORM_IPI_VECTOR, x86_platform_ipi),
+ INTG(LOCAL_TIMER_VECTOR, asm_sysvec_apic_timer_interrupt),
+ INTG(X86_PLATFORM_IPI_VECTOR, asm_sysvec_x86_platform_ipi),
# ifdef CONFIG_HAVE_KVM
- INTG(POSTED_INTR_VECTOR, kvm_posted_intr_ipi),
- INTG(POSTED_INTR_WAKEUP_VECTOR, kvm_posted_intr_wakeup_ipi),
- INTG(POSTED_INTR_NESTED_VECTOR, kvm_posted_intr_nested_ipi),
+ INTG(POSTED_INTR_VECTOR, asm_sysvec_kvm_posted_intr_ipi),
+ INTG(POSTED_INTR_WAKEUP_VECTOR, asm_sysvec_kvm_posted_intr_wakeup_ipi),
+ INTG(POSTED_INTR_NESTED_VECTOR, asm_sysvec_kvm_posted_intr_nested_ipi),
# endif
# ifdef CONFIG_IRQ_WORK
- INTG(IRQ_WORK_VECTOR, irq_work_interrupt),
+ INTG(IRQ_WORK_VECTOR, asm_sysvec_irq_work),
# endif
-#ifdef CONFIG_X86_UV
- INTG(UV_BAU_MESSAGE, uv_bau_message_intr1),
-#endif
- INTG(SPURIOUS_APIC_VECTOR, spurious_interrupt),
- INTG(ERROR_APIC_VECTOR, error_interrupt),
+# ifdef CONFIG_X86_UV
+ INTG(UV_BAU_MESSAGE, asm_sysvec_uv_bau_message),
+# endif
+ INTG(SPURIOUS_APIC_VECTOR, asm_sysvec_spurious_apic_interrupt),
+ INTG(ERROR_APIC_VECTOR, asm_sysvec_error_interrupt),
#endif
};
-#ifdef CONFIG_X86_64
-/*
- * Early traps running on the DEFAULT_STACK because the other interrupt
- * stacks work only after cpu_init().
- */
-static const __initconst struct idt_data early_pf_idts[] = {
- INTG(X86_TRAP_PF, page_fault),
-};
-
-/*
- * Override for the debug_idt. Same as the default, but with interrupt
- * stack set to DEFAULT_STACK (0). Required for NMI trap handling.
- */
-static const __initconst struct idt_data dbg_idts[] = {
- INTG(X86_TRAP_DB, debug),
-};
-#endif
-
-/* Must be page-aligned because the real IDT is used in a fixmap. */
-gate_desc idt_table[IDT_ENTRIES] __page_aligned_bss;
+/* Must be page-aligned because the real IDT is used in the cpu entry area */
+static gate_desc idt_table[IDT_ENTRIES] __page_aligned_bss;
struct desc_ptr idt_descr __ro_after_init = {
- .size = (IDT_ENTRIES * 2 * sizeof(unsigned long)) - 1,
+ .size = IDT_TABLE_SIZE - 1,
.address = (unsigned long) idt_table,
};
-#ifdef CONFIG_X86_64
-/* No need to be aligned, but done to keep all IDTs defined the same way. */
-gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
-
-/*
- * The exceptions which use Interrupt stacks. They are setup after
- * cpu_init() when the TSS has been initialized.
- */
-static const __initconst struct idt_data ist_idts[] = {
- ISTG(X86_TRAP_DB, debug, IST_INDEX_DB),
- ISTG(X86_TRAP_NMI, nmi, IST_INDEX_NMI),
- ISTG(X86_TRAP_DF, double_fault, IST_INDEX_DF),
-#ifdef CONFIG_X86_MCE
- ISTG(X86_TRAP_MC, &machine_check, IST_INDEX_MCE),
-#endif
-};
+void load_current_idt(void)
+{
+ lockdep_assert_irqs_disabled();
+ load_idt(&idt_descr);
+}
-/*
- * Override for the debug_idt. Same as the default, but with interrupt
- * stack set to DEFAULT_STACK (0). Required for NMI trap handling.
- */
-const struct desc_ptr debug_idt_descr = {
- .size = IDT_ENTRIES * 16 - 1,
- .address = (unsigned long) debug_idt_table,
-};
+#ifdef CONFIG_X86_F00F_BUG
+bool idt_is_f00f_address(unsigned long address)
+{
+ return ((address - idt_descr.address) >> 3) == 6;
+}
#endif
static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d)
@@ -214,7 +192,7 @@ static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d)
#endif
}
-static void
+static __init void
idt_setup_from_table(gate_desc *idt, const struct idt_data *t, int size, bool sys)
{
gate_desc desc;
@@ -227,7 +205,7 @@ idt_setup_from_table(gate_desc *idt, const struct idt_data *t, int size, bool sy
}
}
-static void set_intr_gate(unsigned int n, const void *addr)
+static __init void set_intr_gate(unsigned int n, const void *addr)
{
struct idt_data data;
@@ -266,6 +244,27 @@ void __init idt_setup_traps(void)
}
#ifdef CONFIG_X86_64
+/*
+ * Early traps running on the DEFAULT_STACK because the other interrupt
+ * stacks work only after cpu_init().
+ */
+static const __initconst struct idt_data early_pf_idts[] = {
+ INTG(X86_TRAP_PF, asm_exc_page_fault),
+};
+
+/*
+ * The exceptions which use Interrupt stacks. They are setup after
+ * cpu_init() when the TSS has been initialized.
+ */
+static const __initconst struct idt_data ist_idts[] = {
+ ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB),
+ ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI),
+ ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF),
+#ifdef CONFIG_X86_MCE
+ ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE),
+#endif
+};
+
/**
* idt_setup_early_pf - Initialize the idt table with early pagefault handler
*
@@ -273,8 +272,10 @@ void __init idt_setup_traps(void)
* cpu_init() is invoked and sets up TSS. The IST variant is installed
* after that.
*
- * FIXME: Why is 32bit and 64bit installing the PF handler at different
- * places in the early setup code?
+ * Note, that X86_64 cannot install the real #PF handler in
+ * idt_setup_early_traps() because the memory intialization needs the #PF
+ * handler from the early_idt_handler_array to initialize the early page
+ * tables.
*/
void __init idt_setup_early_pf(void)
{
@@ -289,17 +290,20 @@ void __init idt_setup_ist_traps(void)
{
idt_setup_from_table(idt_table, ist_idts, ARRAY_SIZE(ist_idts), true);
}
+#endif
-/**
- * idt_setup_debugidt_traps - Initialize the debug idt table with debug traps
- */
-void __init idt_setup_debugidt_traps(void)
+static void __init idt_map_in_cea(void)
{
- memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
-
- idt_setup_from_table(debug_idt_table, dbg_idts, ARRAY_SIZE(dbg_idts), false);
+ /*
+ * Set the IDT descriptor to a fixed read-only location in the cpu
+ * entry area, so that the "sidt" instruction will not leak the
+ * location of the kernel, and to defend the IDT against arbitrary
+ * memory write vulnerabilities.
+ */
+ cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
+ PAGE_KERNEL_RO);
+ idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
}
-#endif
/**
* idt_setup_apic_and_irq_gates - Setup APIC/SMP and normal interrupt gates
@@ -318,11 +322,23 @@ void __init idt_setup_apic_and_irq_gates(void)
#ifdef CONFIG_X86_LOCAL_APIC
for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
- set_bit(i, system_vectors);
+ /*
+ * Don't set the non assigned system vectors in the
+ * system_vectors bitmap. Otherwise they show up in
+ * /proc/interrupts.
+ */
entry = spurious_entries_start + 8 * (i - FIRST_SYSTEM_VECTOR);
set_intr_gate(i, entry);
}
#endif
+ /* Map IDT into CPU entry area and reload it. */
+ idt_map_in_cea();
+ load_idt(&idt_descr);
+
+ /* Make the IDT table read only */
+ set_memory_ro((unsigned long)&idt_table, 1);
+
+ idt_setup_done = true;
}
/**
@@ -352,16 +368,14 @@ void idt_invalidate(void *addr)
load_idt(&idt);
}
-void __init update_intr_gate(unsigned int n, const void *addr)
+void __init alloc_intr_gate(unsigned int n, const void *addr)
{
- if (WARN_ON_ONCE(!test_bit(n, system_vectors)))
+ if (WARN_ON(n < FIRST_SYSTEM_VECTOR))
return;
- set_intr_gate(n, addr);
-}
-void alloc_intr_gate(unsigned int n, const void *addr)
-{
- BUG_ON(n < FIRST_SYSTEM_VECTOR);
- if (!test_and_set_bit(n, system_vectors))
+ if (WARN_ON(idt_setup_done))
+ return;
+
+ if (!WARN_ON(test_and_set_bit(n, system_vectors)))
set_intr_gate(n, addr);
}
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index c7965ff429c5..181060247e3c 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -13,12 +13,14 @@
#include <linux/export.h>
#include <linux/irq.h>
+#include <asm/irq_stack.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/irq.h>
#include <asm/mce.h>
#include <asm/hw_irq.h>
#include <asm/desc.h>
+#include <asm/traps.h>
#define CREATE_TRACE_POINTS
#include <asm/trace/irq_vectors.h>
@@ -26,9 +28,6 @@
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
-DEFINE_PER_CPU(struct pt_regs *, irq_regs);
-EXPORT_PER_CPU_SYMBOL(irq_regs);
-
atomic_t irq_err_count;
/*
@@ -224,35 +223,35 @@ u64 arch_irq_stat(void)
return sum;
}
+static __always_inline void handle_irq(struct irq_desc *desc,
+ struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_X86_64))
+ run_on_irqstack_cond(desc->handle_irq, desc, regs);
+ else
+ __handle_irq(desc, regs);
+}
/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
+ * common_interrupt() handles all normal device IRQ's (the special SMP
+ * cross-CPU interrupts have their own entry points).
*/
-__visible void __irq_entry do_IRQ(struct pt_regs *regs)
+DEFINE_IDTENTRY_IRQ(common_interrupt)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- struct irq_desc * desc;
- /* high bit used in ret_from_ code */
- unsigned vector = ~regs->orig_ax;
-
- entering_irq();
+ struct irq_desc *desc;
- /* entering_irq() tells RCU that we're not quiescent. Check it. */
+ /* entry code tells RCU that we're not quiescent. Check it. */
RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
desc = __this_cpu_read(vector_irq[vector]);
if (likely(!IS_ERR_OR_NULL(desc))) {
- if (IS_ENABLED(CONFIG_X86_32))
- handle_irq(desc, regs);
- else
- generic_handle_irq_desc(desc);
+ handle_irq(desc, regs);
} else {
ack_APIC_irq();
if (desc == VECTOR_UNUSED) {
- pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
+ pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
__func__, smp_processor_id(),
vector);
} else {
@@ -260,8 +259,6 @@ __visible void __irq_entry do_IRQ(struct pt_regs *regs)
}
}
- exiting_irq();
-
set_irq_regs(old_regs);
}
@@ -271,17 +268,16 @@ void (*x86_platform_ipi_callback)(void) = NULL;
/*
* Handler for X86_PLATFORM_IPI_VECTOR.
*/
-__visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- entering_ack_irq();
+ ack_APIC_irq();
trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
inc_irq_stat(x86_platform_ipis);
if (x86_platform_ipi_callback)
x86_platform_ipi_callback();
trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
- exiting_irq();
set_irq_regs(old_regs);
}
#endif
@@ -302,41 +298,29 @@ EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
/*
* Handler for POSTED_INTERRUPT_VECTOR.
*/
-__visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- entering_ack_irq();
+ ack_APIC_irq();
inc_irq_stat(kvm_posted_intr_ipis);
- exiting_irq();
- set_irq_regs(old_regs);
}
/*
* Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
*/
-__visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- entering_ack_irq();
+ ack_APIC_irq();
inc_irq_stat(kvm_posted_intr_wakeup_ipis);
kvm_posted_intr_wakeup_handler();
- exiting_irq();
- set_irq_regs(old_regs);
}
/*
* Handler for POSTED_INTERRUPT_NESTED_VECTOR.
*/
-__visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
{
- struct pt_regs *old_regs = set_irq_regs(regs);
-
- entering_ack_irq();
+ ack_APIC_irq();
inc_irq_stat(kvm_posted_intr_nested_ipis);
- exiting_irq();
- set_irq_regs(old_regs);
}
#endif
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index a759ca97cd01..0b79efc87be5 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -148,7 +148,7 @@ void do_softirq_own_stack(void)
call_on_stack(__do_softirq, isp);
}
-void handle_irq(struct irq_desc *desc, struct pt_regs *regs)
+void __handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{
int overflow = check_stack_overflow();
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 6b32ab009c19..1b4fe93a86c5 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -20,6 +20,7 @@
#include <linux/sched/task_stack.h>
#include <asm/cpu_entry_area.h>
+#include <asm/irq_stack.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@@ -70,3 +71,8 @@ int irq_init_percpu_irqstack(unsigned int cpu)
return 0;
return map_irq_stack(cpu);
}
+
+void do_softirq_own_stack(void)
+{
+ run_on_irqstack_cond(__do_softirq, NULL, NULL);
+}
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
index 80bee7695a20..890d4778cd35 100644
--- a/arch/x86/kernel/irq_work.c
+++ b/arch/x86/kernel/irq_work.c
@@ -9,18 +9,18 @@
#include <linux/irq_work.h>
#include <linux/hardirq.h>
#include <asm/apic.h>
+#include <asm/idtentry.h>
#include <asm/trace/irq_vectors.h>
#include <linux/interrupt.h>
#ifdef CONFIG_X86_LOCAL_APIC
-__visible void __irq_entry smp_irq_work_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_irq_work)
{
- ipi_entering_ack_irq();
+ ack_APIC_irq();
trace_irq_work_entry(IRQ_WORK_VECTOR);
inc_irq_stat(apic_irq_work_irqs);
irq_work_run();
trace_irq_work_exit(IRQ_WORK_VECTOR);
- exiting_irq();
}
void arch_irq_work_raise(void)
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 5aa523c2d573..dd73135d7cee 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -16,11 +16,11 @@
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/pgtable.h>
#include <linux/atomic.h>
#include <asm/timer.h>
#include <asm/hw_irq.h>
-#include <asm/pgtable.h>
#include <asm/desc.h>
#include <asm/apic.h>
#include <asm/setup.h>
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 4d7022a740ab..3bafe1bd4dc7 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -41,11 +41,11 @@
#include <linux/kasan.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
#include <asm/text-patching.h>
#include <asm/cacheflush.h>
#include <asm/desc.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/insn.h>
@@ -1073,13 +1073,6 @@ NOKPROBE_SYMBOL(kprobe_fault_handler);
int __init arch_populate_kprobe_blacklist(void)
{
- int ret;
-
- ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
- (unsigned long)__irqentry_text_end);
- if (ret)
- return ret;
-
return kprobe_add_area_blacklist((unsigned long)__entry_text_start,
(unsigned long)__entry_text_end);
}
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index ea13f6888284..321c19950285 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -16,11 +16,11 @@
#include <linux/kallsyms.h>
#include <linux/ftrace.h>
#include <linux/frame.h>
+#include <linux/pgtable.h>
#include <asm/text-patching.h>
#include <asm/cacheflush.h>
#include <asm/desc.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/insn.h>
@@ -286,9 +286,7 @@ static int can_optimize(unsigned long paddr)
* stack handling and registers setup.
*/
if (((paddr >= (unsigned long)__entry_text_start) &&
- (paddr < (unsigned long)__entry_text_end)) ||
- ((paddr >= (unsigned long)__irqentry_text_start) &&
- (paddr < (unsigned long)__irqentry_text_end)))
+ (paddr < (unsigned long)__entry_text_end)))
return 0;
/* Check there is enough space for a relative jump. */
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index d6f22a3a1f7d..df63786e7bfa 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -21,7 +21,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kprobes.h>
-#include <linux/debugfs.h>
#include <linux/nmi.h>
#include <linux/swait.h>
#include <asm/timer.h>
@@ -218,7 +217,7 @@ again:
}
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
-u32 kvm_read_and_reset_apf_flags(void)
+noinstr u32 kvm_read_and_reset_apf_flags(void)
{
u32 flags = 0;
@@ -230,11 +229,11 @@ u32 kvm_read_and_reset_apf_flags(void)
return flags;
}
EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
-NOKPROBE_SYMBOL(kvm_read_and_reset_apf_flags);
-bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
+noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{
u32 reason = kvm_read_and_reset_apf_flags();
+ bool rcu_exit;
switch (reason) {
case KVM_PV_REASON_PAGE_NOT_PRESENT:
@@ -244,6 +243,9 @@ bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
return false;
}
+ rcu_exit = idtentry_enter_cond_rcu(regs);
+ instrumentation_begin();
+
/*
* If the host managed to inject an async #PF into an interrupt
* disabled region, then die hard as this is not going to end well
@@ -258,13 +260,13 @@ bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
/* Page is swapped out by the host. */
kvm_async_pf_task_wait_schedule(token);
} else {
- rcu_irq_enter();
kvm_async_pf_task_wake(token);
- rcu_irq_exit();
}
+
+ instrumentation_end();
+ idtentry_exit_cond_rcu(regs, rcu_exit);
return true;
}
-NOKPROBE_SYMBOL(__kvm_handle_async_pf);
static void __init paravirt_ops_setup(void)
{
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 84c3ba32f211..8748321c4486 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -8,7 +8,7 @@
*
* Lock order:
* contex.ldt_usr_sem
- * mmap_sem
+ * mmap_lock
* context.lock
*/
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 02bddfc122a4..64b00b0d7fe8 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -13,7 +13,6 @@
#include <linux/gfp.h>
#include <linux/io.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index ad5cdd6a5f23..a29a44a98e5b 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -19,7 +19,6 @@
#include <linux/efi.h>
#include <asm/init.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/io_apic.h>
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 23c95a53d20e..34b153cbd4ac 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -22,7 +22,6 @@
#include <asm/text-patching.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/unwind.h>
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index bdcc5146de96..2de365f15684 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -303,7 +303,7 @@ NOKPROBE_SYMBOL(unknown_nmi_error);
static DEFINE_PER_CPU(bool, swallow_nmi);
static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
-static void default_do_nmi(struct pt_regs *regs)
+static noinstr void default_do_nmi(struct pt_regs *regs)
{
unsigned char reason = 0;
int handled;
@@ -329,6 +329,9 @@ static void default_do_nmi(struct pt_regs *regs)
__this_cpu_write(last_nmi_rip, regs->ip);
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+
handled = nmi_handle(NMI_LOCAL, regs);
__this_cpu_add(nmi_stats.normal, handled);
if (handled) {
@@ -342,7 +345,7 @@ static void default_do_nmi(struct pt_regs *regs)
*/
if (handled > 1)
__this_cpu_write(swallow_nmi, true);
- return;
+ goto out;
}
/*
@@ -374,7 +377,7 @@ static void default_do_nmi(struct pt_regs *regs)
#endif
__this_cpu_add(nmi_stats.external, 1);
raw_spin_unlock(&nmi_reason_lock);
- return;
+ goto out;
}
raw_spin_unlock(&nmi_reason_lock);
@@ -412,8 +415,12 @@ static void default_do_nmi(struct pt_regs *regs)
__this_cpu_add(nmi_stats.swallow, 1);
else
unknown_nmi_error(reason, regs);
+
+out:
+ if (regs->flags & X86_EFLAGS_IF)
+ trace_hardirqs_on_prepare();
+ instrumentation_end();
}
-NOKPROBE_SYMBOL(default_do_nmi);
/*
* NMIs can page fault or hit breakpoints which will cause it to lose
@@ -467,44 +474,9 @@ enum nmi_states {
};
static DEFINE_PER_CPU(enum nmi_states, nmi_state);
static DEFINE_PER_CPU(unsigned long, nmi_cr2);
+static DEFINE_PER_CPU(unsigned long, nmi_dr7);
-#ifdef CONFIG_X86_64
-/*
- * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
- * some care, the inner breakpoint will clobber the outer breakpoint's
- * stack.
- *
- * If a breakpoint is being processed, and the debug stack is being
- * used, if an NMI comes in and also hits a breakpoint, the stack
- * pointer will be set to the same fixed address as the breakpoint that
- * was interrupted, causing that stack to be corrupted. To handle this
- * case, check if the stack that was interrupted is the debug stack, and
- * if so, change the IDT so that new breakpoints will use the current
- * stack and not switch to the fixed address. On return of the NMI,
- * switch back to the original IDT.
- */
-static DEFINE_PER_CPU(int, update_debug_stack);
-
-static bool notrace is_debug_stack(unsigned long addr)
-{
- struct cea_exception_stacks *cs = __this_cpu_read(cea_exception_stacks);
- unsigned long top = CEA_ESTACK_TOP(cs, DB);
- unsigned long bot = CEA_ESTACK_BOT(cs, DB1);
-
- if (__this_cpu_read(debug_stack_usage))
- return true;
- /*
- * Note, this covers the guard page between DB and DB1 as well to
- * avoid two checks. But by all means @addr can never point into
- * the guard page.
- */
- return addr >= bot && addr < top;
-}
-NOKPROBE_SYMBOL(is_debug_stack);
-#endif
-
-dotraplinkage notrace void
-do_nmi(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY_RAW(exc_nmi)
{
if (IS_ENABLED(CONFIG_SMP) && cpu_is_offline(smp_processor_id()))
return;
@@ -517,18 +489,7 @@ do_nmi(struct pt_regs *regs, long error_code)
this_cpu_write(nmi_cr2, read_cr2());
nmi_restart:
-#ifdef CONFIG_X86_64
- /*
- * If we interrupted a breakpoint, it is possible that
- * the nmi handler will have breakpoints too. We need to
- * change the IDT such that breakpoints that happen here
- * continue to use the NMI stack.
- */
- if (unlikely(is_debug_stack(regs->sp))) {
- debug_stack_set_zero();
- this_cpu_write(update_debug_stack, 1);
- }
-#endif
+ this_cpu_write(nmi_dr7, local_db_save());
nmi_enter();
@@ -539,12 +500,7 @@ nmi_restart:
nmi_exit();
-#ifdef CONFIG_X86_64
- if (unlikely(this_cpu_read(update_debug_stack))) {
- debug_stack_reset();
- this_cpu_write(update_debug_stack, 0);
- }
-#endif
+ local_db_restore(this_cpu_read(nmi_dr7));
if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
write_cr2(this_cpu_read(nmi_cr2));
@@ -554,7 +510,6 @@ nmi_restart:
if (user_mode(regs))
mds_user_clear_cpu_buffers();
}
-NOKPROBE_SYMBOL(do_nmi);
void stop_nmi(void)
{
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 5638e4ae2ea6..674a7d66d960 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -13,13 +13,13 @@
#include <linux/bcd.h>
#include <linux/highmem.h>
#include <linux/kprobes.h>
+#include <linux/pgtable.h>
#include <asm/bug.h>
#include <asm/paravirt.h>
#include <asm/debugreg.h>
#include <asm/desc.h>
#include <asm/setup.h>
-#include <asm/pgtable.h>
#include <asm/time.h>
#include <asm/pgalloc.h>
#include <asm/irq.h>
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 8e3d0347b664..f362ce0d5ac0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -545,28 +545,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
lockdep_assert_irqs_disabled();
- /*
- * If TIF_SSBD is different, select the proper mitigation
- * method. Note that if SSBD mitigation is disabled or permanentely
- * enabled this branch can't be taken because nothing can set
- * TIF_SSBD.
- */
- if (tif_diff & _TIF_SSBD) {
- if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+ /* Handle change of TIF_SSBD depending on the mitigation method. */
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+ if (tif_diff & _TIF_SSBD)
amd_set_ssb_virt_state(tifn);
- } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+ if (tif_diff & _TIF_SSBD)
amd_set_core_ssb_state(tifn);
- } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
- static_cpu_has(X86_FEATURE_AMD_SSBD)) {
- msr |= ssbd_tif_to_spec_ctrl(tifn);
- updmsr = true;
- }
+ } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ updmsr |= !!(tif_diff & _TIF_SSBD);
+ msr |= ssbd_tif_to_spec_ctrl(tifn);
}
- /*
- * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
- * otherwise avoid the MSR write.
- */
+ /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
if (IS_ENABLED(CONFIG_SMP) &&
static_branch_unlikely(&switch_to_cond_stibp)) {
updmsr |= !!(tif_diff & _TIF_SPEC_IB);
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 538d4e8d6589..acfd6d2a0cbf 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -39,7 +39,6 @@
#include <linux/kdebug.h>
#include <linux/syscalls.h>
-#include <asm/pgtable.h>
#include <asm/ldt.h>
#include <asm/processor.h>
#include <asm/fpu/internal.h>
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 0c169a5687e1..9a97415b2139 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -40,7 +40,6 @@
#include <linux/ftrace.h>
#include <linux/syscalls.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/fpu/internal.h>
#include <asm/mmu_context.h>
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index f0e1ddbc2fd7..44130588987f 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -28,7 +28,6 @@
#include <linux/nospec.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/fpu/internal.h>
#include <asm/fpu/signal.h>
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 3ca43be4f9cf..0ec7ced727fe 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -11,13 +11,13 @@
#include <linux/tboot.h>
#include <linux/delay.h>
#include <linux/frame.h>
+#include <linux/pgtable.h>
#include <acpi/reboot.h>
#include <asm/io.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/desc.h>
#include <asm/hpet.h>
-#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/reboot_fixups.h>
#include <asm/reboot.h>
@@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
},
},
+ { /* Handle problems with rebooting on Apple MacBook6,1 */
+ .callback = set_pci_reboot,
+ .ident = "Apple MacBook6,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
+ },
+ },
{ /* Handle problems with rebooting on Apple MacBookPro5 */
.callback = set_pci_reboot,
.ident = "Apple MacBookPro5",
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index b8d4e9c3c070..eff4ce3b10da 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -27,6 +27,7 @@
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/apic.h>
+#include <asm/idtentry.h>
#include <asm/nmi.h>
#include <asm/mce.h>
#include <asm/trace/irq_vectors.h>
@@ -130,13 +131,11 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
/*
* this function calls the 'stop' function on all other CPUs in the system.
*/
-
-asmlinkage __visible void smp_reboot_interrupt(void)
+DEFINE_IDTENTRY_SYSVEC(sysvec_reboot)
{
- ipi_entering_ack_irq();
+ ack_APIC_irq();
cpu_emergency_vmxoff();
stop_this_cpu(NULL);
- irq_exit();
}
static int register_stop_handler(void)
@@ -221,47 +220,33 @@ static void native_stop_other_cpus(int wait)
/*
* Reschedule call back. KVM uses this interrupt to force a cpu out of
- * guest mode
+ * guest mode.
*/
-__visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_reschedule_ipi)
{
ack_APIC_irq();
+ trace_reschedule_entry(RESCHEDULE_VECTOR);
inc_irq_stat(irq_resched_count);
- kvm_set_cpu_l1tf_flush_l1d();
-
- if (trace_resched_ipi_enabled()) {
- /*
- * scheduler_ipi() might call irq_enter() as well, but
- * nested calls are fine.
- */
- irq_enter();
- trace_reschedule_entry(RESCHEDULE_VECTOR);
- scheduler_ipi();
- trace_reschedule_exit(RESCHEDULE_VECTOR);
- irq_exit();
- return;
- }
scheduler_ipi();
+ trace_reschedule_exit(RESCHEDULE_VECTOR);
}
-__visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_call_function)
{
- ipi_entering_ack_irq();
+ ack_APIC_irq();
trace_call_function_entry(CALL_FUNCTION_VECTOR);
inc_irq_stat(irq_call_count);
generic_smp_call_function_interrupt();
trace_call_function_exit(CALL_FUNCTION_VECTOR);
- exiting_irq();
}
-__visible void __irq_entry smp_call_function_single_interrupt(struct pt_regs *r)
+DEFINE_IDTENTRY_SYSVEC(sysvec_call_function_single)
{
- ipi_entering_ack_irq();
+ ack_APIC_irq();
trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
inc_irq_stat(irq_call_count);
generic_smp_call_function_single_interrupt();
trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
- exiting_irq();
}
static int __init nonmi_ipi_setup(char *str)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2467f3dd35d3..ffbd9a3d78d8 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -55,6 +55,7 @@
#include <linux/gfp.h>
#include <linux/cpuidle.h>
#include <linux/numa.h>
+#include <linux/pgtable.h>
#include <asm/acpi.h>
#include <asm/desc.h>
@@ -63,7 +64,6 @@
#include <asm/realmode.h>
#include <asm/cpu.h>
#include <asm/numa.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mtrr.h>
#include <asm/mwait.h>
diff --git a/arch/x86/kernel/sys_ia32.c b/arch/x86/kernel/sys_ia32.c
index ab03fede1422..f8d65c99feb8 100644
--- a/arch/x86/kernel/sys_ia32.c
+++ b/arch/x86/kernel/sys_ia32.c
@@ -135,26 +135,30 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
typeof(ubuf->st_gid) gid = 0;
SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
- if (!access_ok(ubuf, sizeof(struct stat64)) ||
- __put_user(huge_encode_dev(stat->dev), &ubuf->st_dev) ||
- __put_user(stat->ino, &ubuf->__st_ino) ||
- __put_user(stat->ino, &ubuf->st_ino) ||
- __put_user(stat->mode, &ubuf->st_mode) ||
- __put_user(stat->nlink, &ubuf->st_nlink) ||
- __put_user(uid, &ubuf->st_uid) ||
- __put_user(gid, &ubuf->st_gid) ||
- __put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev) ||
- __put_user(stat->size, &ubuf->st_size) ||
- __put_user(stat->atime.tv_sec, &ubuf->st_atime) ||
- __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec) ||
- __put_user(stat->mtime.tv_sec, &ubuf->st_mtime) ||
- __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
- __put_user(stat->ctime.tv_sec, &ubuf->st_ctime) ||
- __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
- __put_user(stat->blksize, &ubuf->st_blksize) ||
- __put_user(stat->blocks, &ubuf->st_blocks))
+ if (!user_write_access_begin(ubuf, sizeof(struct stat64)))
return -EFAULT;
+ unsafe_put_user(huge_encode_dev(stat->dev), &ubuf->st_dev, Efault);
+ unsafe_put_user(stat->ino, &ubuf->__st_ino, Efault);
+ unsafe_put_user(stat->ino, &ubuf->st_ino, Efault);
+ unsafe_put_user(stat->mode, &ubuf->st_mode, Efault);
+ unsafe_put_user(stat->nlink, &ubuf->st_nlink, Efault);
+ unsafe_put_user(uid, &ubuf->st_uid, Efault);
+ unsafe_put_user(gid, &ubuf->st_gid, Efault);
+ unsafe_put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev, Efault);
+ unsafe_put_user(stat->size, &ubuf->st_size, Efault);
+ unsafe_put_user(stat->atime.tv_sec, &ubuf->st_atime, Efault);
+ unsafe_put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec, Efault);
+ unsafe_put_user(stat->mtime.tv_sec, &ubuf->st_mtime, Efault);
+ unsafe_put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec, Efault);
+ unsafe_put_user(stat->ctime.tv_sec, &ubuf->st_ctime, Efault);
+ unsafe_put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec, Efault);
+ unsafe_put_user(stat->blksize, &ubuf->st_blksize, Efault);
+ unsafe_put_user(stat->blocks, &ubuf->st_blocks, Efault);
+ user_access_end();
return 0;
+Efault:
+ user_write_access_end();
+ return -EFAULT;
}
COMPAT_SYSCALL_DEFINE2(ia32_stat64, const char __user *, filename,
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index b2942b2dbfcf..992fb1415c0f 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -23,7 +23,6 @@
#include <asm/realmode.h>
#include <asm/processor.h>
#include <asm/bootparam.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/swiotlb.h>
#include <asm/fixmap.h>
@@ -94,7 +93,7 @@ static struct mm_struct tboot_mm = {
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
- .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
+ MMAP_LOCK_INITIALIZER(init_mm)
.page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
};
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 371a6b348e44..e42faa792c07 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -25,10 +25,6 @@
#include <asm/hpet.h>
#include <asm/time.h>
-#ifdef CONFIG_X86_64
-__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
-#endif
-
unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
index 496748ed266a..fcfc077afe2d 100644
--- a/arch/x86/kernel/tracepoint.c
+++ b/arch/x86/kernel/tracepoint.c
@@ -25,20 +25,3 @@ void trace_pagefault_unreg(void)
{
static_branch_dec(&trace_pagefault_key);
}
-
-#ifdef CONFIG_SMP
-
-DEFINE_STATIC_KEY_FALSE(trace_resched_ipi_key);
-
-int trace_resched_ipi_reg(void)
-{
- static_branch_inc(&trace_resched_ipi_key);
- return 0;
-}
-
-void trace_resched_ipi_unreg(void)
-{
- static_branch_dec(&trace_resched_ipi_key);
-}
-
-#endif
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 4cc541051994..af75109485c2 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -97,24 +97,6 @@ int is_valid_bugaddr(unsigned long addr)
return ud == INSN_UD0 || ud == INSN_UD2;
}
-int fixup_bug(struct pt_regs *regs, int trapnr)
-{
- if (trapnr != X86_TRAP_UD)
- return 0;
-
- switch (report_bug(regs->ip, regs)) {
- case BUG_TRAP_TYPE_NONE:
- case BUG_TRAP_TYPE_BUG:
- break;
-
- case BUG_TRAP_TYPE_WARN:
- regs->ip += LEN_UD2;
- return 1;
- }
-
- return 0;
-}
-
static nokprobe_inline int
do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
struct pt_regs *regs, long error_code)
@@ -145,7 +127,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
* process no chance to handle the signal and notice the
* kernel fault information, so that won't result in polluting
* the information about previously queued, but not yet
- * delivered, faults. See also do_general_protection below.
+ * delivered, faults. See also exc_general_protection below.
*/
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr;
@@ -190,41 +172,119 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
{
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
- /*
- * WARN*()s end up here; fix them up before we call the
- * notifier chain.
- */
- if (!user_mode(regs) && fixup_bug(regs, trapnr))
- return;
-
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
NOTIFY_STOP) {
cond_local_irq_enable(regs);
do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
+ cond_local_irq_disable(regs);
}
}
-#define IP ((void __user *)uprobe_get_trap_addr(regs))
-#define DO_ERROR(trapnr, signr, sicode, addr, str, name) \
-dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
-{ \
- do_error_trap(regs, error_code, str, trapnr, signr, sicode, addr); \
+/*
+ * Posix requires to provide the address of the faulting instruction for
+ * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t.
+ *
+ * This address is usually regs->ip, but when an uprobe moved the code out
+ * of line then regs->ip points to the XOL code which would confuse
+ * anything which analyzes the fault address vs. the unmodified binary. If
+ * a trap happened in XOL code then uprobe maps regs->ip back to the
+ * original instruction address.
+ */
+static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
+{
+ return (void __user *)uprobe_get_trap_addr(regs);
}
-DO_ERROR(X86_TRAP_DE, SIGFPE, FPE_INTDIV, IP, "divide error", divide_error)
-DO_ERROR(X86_TRAP_OF, SIGSEGV, 0, NULL, "overflow", overflow)
-DO_ERROR(X86_TRAP_UD, SIGILL, ILL_ILLOPN, IP, "invalid opcode", invalid_op)
-DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, 0, NULL, "coprocessor segment overrun", coprocessor_segment_overrun)
-DO_ERROR(X86_TRAP_TS, SIGSEGV, 0, NULL, "invalid TSS", invalid_TSS)
-DO_ERROR(X86_TRAP_NP, SIGBUS, 0, NULL, "segment not present", segment_not_present)
-DO_ERROR(X86_TRAP_SS, SIGBUS, 0, NULL, "stack segment", stack_segment)
-#undef IP
+DEFINE_IDTENTRY(exc_divide_error)
+{
+ do_error_trap(regs, 0, "divide_error", X86_TRAP_DE, SIGFPE,
+ FPE_INTDIV, error_get_trap_addr(regs));
+}
-dotraplinkage void do_alignment_check(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY(exc_overflow)
{
- char *str = "alignment check";
+ do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
+}
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
+#ifdef CONFIG_X86_F00F_BUG
+void handle_invalid_op(struct pt_regs *regs)
+#else
+static inline void handle_invalid_op(struct pt_regs *regs)
+#endif
+{
+ do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
+ ILL_ILLOPN, error_get_trap_addr(regs));
+}
+
+DEFINE_IDTENTRY_RAW(exc_invalid_op)
+{
+ bool rcu_exit;
+
+ /*
+ * Handle BUG/WARN like NMIs instead of like normal idtentries:
+ * if we bugged/warned in a bad RCU context, for example, the last
+ * thing we want is to BUG/WARN again in the idtentry code, ad
+ * infinitum.
+ */
+ if (!user_mode(regs) && is_valid_bugaddr(regs->ip)) {
+ enum bug_trap_type type;
+
+ nmi_enter();
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ type = report_bug(regs->ip, regs);
+ if (regs->flags & X86_EFLAGS_IF)
+ trace_hardirqs_on_prepare();
+ instrumentation_end();
+ nmi_exit();
+
+ if (type == BUG_TRAP_TYPE_WARN) {
+ /* Skip the ud2. */
+ regs->ip += LEN_UD2;
+ return;
+ }
+
+ /*
+ * Else, if this was a BUG and report_bug returns or if this
+ * was just a normal #UD, we want to continue onward and
+ * crash.
+ */
+ }
+
+ rcu_exit = idtentry_enter_cond_rcu(regs);
+ instrumentation_begin();
+ handle_invalid_op(regs);
+ instrumentation_end();
+ idtentry_exit_cond_rcu(regs, rcu_exit);
+}
+
+DEFINE_IDTENTRY(exc_coproc_segment_overrun)
+{
+ do_error_trap(regs, 0, "coprocessor segment overrun",
+ X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
+}
+
+DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
+{
+ do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
+ 0, NULL);
+}
+
+DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
+{
+ do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
+ SIGBUS, 0, NULL);
+}
+
+DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
+{
+ do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
+ 0, NULL);
+}
+
+DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
+{
+ char *str = "alignment check";
if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
return;
@@ -271,12 +331,19 @@ __visible void __noreturn handle_stack_overflow(const char *message,
* from the TSS. Returning is, in principle, okay, but changes to regs will
* be lost. If, for some reason, we need to return to a context with modified
* regs, the shim code could be adjusted to synchronize the registers.
+ *
+ * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs
+ * to be read before doing anything else.
*/
-dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long cr2)
+DEFINE_IDTENTRY_DF(exc_double_fault)
{
static const char str[] = "double fault";
struct task_struct *tsk = current;
+#ifdef CONFIG_VMAP_STACK
+ unsigned long address = read_cr2();
+#endif
+
#ifdef CONFIG_X86_ESPFIX64
extern unsigned char native_irq_return_iret[];
@@ -299,6 +366,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
regs->ip == (unsigned long)native_irq_return_iret)
{
struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
+ unsigned long *p = (unsigned long *)regs->sp;
/*
* regs->sp points to the failing IRET frame on the
@@ -306,7 +374,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
* in gpregs->ss through gpregs->ip.
*
*/
- memmove(&gpregs->ip, (void *)regs->sp, 5*8);
+ gpregs->ip = p[0];
+ gpregs->cs = p[1];
+ gpregs->flags = p[2];
+ gpregs->sp = p[3];
+ gpregs->ss = p[4];
gpregs->orig_ax = 0; /* Missing (lost) #GP error code */
/*
@@ -320,7 +392,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
* which is what the stub expects, given that the faulting
* RIP will be the IRET instruction.
*/
- regs->ip = (unsigned long)general_protection;
+ regs->ip = (unsigned long)asm_exc_general_protection;
regs->sp = (unsigned long)&gpregs->orig_ax;
return;
@@ -328,6 +400,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
#endif
nmi_enter();
+ instrumentation_begin();
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
tsk->thread.error_code = error_code;
@@ -371,27 +444,31 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
* stack even if the actual trigger for the double fault was
* something else.
*/
- if ((unsigned long)task_stack_page(tsk) - 1 - cr2 < PAGE_SIZE)
- handle_stack_overflow("kernel stack overflow (double-fault)", regs, cr2);
+ if ((unsigned long)task_stack_page(tsk) - 1 - address < PAGE_SIZE) {
+ handle_stack_overflow("kernel stack overflow (double-fault)",
+ regs, address);
+ }
#endif
pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
die("double fault", regs, error_code);
panic("Machine halted.");
+ instrumentation_end();
}
-dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY(exc_bounds)
{
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
- if (notify_die(DIE_TRAP, "bounds", regs, error_code,
+ if (notify_die(DIE_TRAP, "bounds", regs, 0,
X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
return;
cond_local_irq_enable(regs);
if (!user_mode(regs))
- die("bounds", regs, error_code);
+ die("bounds", regs, 0);
+
+ do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
- do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, 0, NULL);
+ cond_local_irq_disable(regs);
}
enum kernel_gp_hint {
@@ -438,7 +515,7 @@ static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
#define GPFSTR "general protection fault"
-dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
{
char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
enum kernel_gp_hint hint = GP_NO_HINT;
@@ -446,17 +523,17 @@ dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code)
unsigned long gp_addr;
int ret;
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
cond_local_irq_enable(regs);
if (static_cpu_has(X86_FEATURE_UMIP)) {
if (user_mode(regs) && fixup_umip_exception(regs))
- return;
+ goto exit;
}
if (v8086_mode(regs)) {
local_irq_enable();
handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
+ local_irq_disable();
return;
}
@@ -468,12 +545,11 @@ dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code)
show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
force_sig(SIGSEGV);
-
- return;
+ goto exit;
}
if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
- return;
+ goto exit;
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_GP;
@@ -485,11 +561,11 @@ dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code)
if (!preemptible() &&
kprobe_running() &&
kprobe_fault_handler(regs, X86_TRAP_GP))
- return;
+ goto exit;
ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV);
if (ret == NOTIFY_STOP)
- return;
+ goto exit;
if (error_code)
snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
@@ -511,47 +587,74 @@ dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code)
die_addr(desc, regs, error_code, gp_addr);
+exit:
+ cond_local_irq_disable(regs);
}
-NOKPROBE_SYMBOL(do_general_protection);
-dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
+static bool do_int3(struct pt_regs *regs)
{
- if (poke_int3_handler(regs))
- return;
-
- /*
- * Unlike any other non-IST entry, we can be called from pretty much
- * any location in the kernel through kprobes -- text_poke() will most
- * likely be handled by poke_int3_handler() above. This means this
- * handler is effectively NMI-like.
- */
- if (!user_mode(regs))
- nmi_enter();
+ int res;
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
- if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
- SIGTRAP) == NOTIFY_STOP)
- goto exit;
+ if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
+ SIGTRAP) == NOTIFY_STOP)
+ return true;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
#ifdef CONFIG_KPROBES
if (kprobe_int3_handler(regs))
- goto exit;
+ return true;
#endif
+ res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
- if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
- SIGTRAP) == NOTIFY_STOP)
- goto exit;
+ return res == NOTIFY_STOP;
+}
+
+static void do_int3_user(struct pt_regs *regs)
+{
+ if (do_int3(regs))
+ return;
cond_local_irq_enable(regs);
- do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, 0, NULL);
+ do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
cond_local_irq_disable(regs);
+}
-exit:
- if (!user_mode(regs))
+DEFINE_IDTENTRY_RAW(exc_int3)
+{
+ /*
+ * poke_int3_handler() is completely self contained code; it does (and
+ * must) *NOT* call out to anything, lest it hits upon yet another
+ * INT3.
+ */
+ if (poke_int3_handler(regs))
+ return;
+
+ /*
+ * idtentry_enter_user() uses static_branch_{,un}likely() and therefore
+ * can trigger INT3, hence poke_int3_handler() must be done
+ * before. If the entry came from kernel mode, then use nmi_enter()
+ * because the INT3 could have been hit in any context including
+ * NMI.
+ */
+ if (user_mode(regs)) {
+ idtentry_enter_user(regs);
+ instrumentation_begin();
+ do_int3_user(regs);
+ instrumentation_end();
+ idtentry_exit_user(regs);
+ } else {
+ nmi_enter();
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ if (!do_int3(regs))
+ die("int3", regs, 0);
+ if (regs->flags & X86_EFLAGS_IF)
+ trace_hardirqs_on_prepare();
+ instrumentation_end();
nmi_exit();
+ }
}
-NOKPROBE_SYMBOL(do_int3);
#ifdef CONFIG_X86_64
/*
@@ -559,21 +662,20 @@ NOKPROBE_SYMBOL(do_int3);
* to switch to the normal thread stack if the interrupted code was in
* user mode. The actual stack switch is done in entry_64.S
*/
-asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
+asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
{
struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
if (regs != eregs)
*regs = *eregs;
return regs;
}
-NOKPROBE_SYMBOL(sync_regs);
struct bad_iret_stack {
void *error_entry_ret;
struct pt_regs regs;
};
-asmlinkage __visible notrace
+asmlinkage __visible noinstr
struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
{
/*
@@ -584,19 +686,21 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
* just below the IRET frame) and we want to pretend that the
* exception came from the IRET target.
*/
- struct bad_iret_stack *new_stack =
- (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
+ struct bad_iret_stack tmp, *new_stack =
+ (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
- /* Copy the IRET target to the new stack. */
- memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
+ /* Copy the IRET target to the temporary storage. */
+ memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
/* Copy the remainder of the stack from the current stack. */
- memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
+ memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
+
+ /* Update the entry stack */
+ memcpy(new_stack, &tmp, sizeof(tmp));
BUG_ON(!user_mode(&new_stack->regs));
return new_stack;
}
-NOKPROBE_SYMBOL(fixup_bad_iret);
#endif
static bool is_sysenter_singlestep(struct pt_regs *regs)
@@ -622,6 +726,43 @@ static bool is_sysenter_singlestep(struct pt_regs *regs)
#endif
}
+static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
+{
+ /*
+ * Disable breakpoints during exception handling; recursive exceptions
+ * are exceedingly 'fun'.
+ *
+ * Since this function is NOKPROBE, and that also applies to
+ * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
+ * HW_BREAKPOINT_W on our stack)
+ *
+ * Entry text is excluded for HW_BP_X and cpu_entry_area, which
+ * includes the entry stack is excluded for everything.
+ */
+ *dr7 = local_db_save();
+
+ /*
+ * The Intel SDM says:
+ *
+ * Certain debug exceptions may clear bits 0-3. The remaining
+ * contents of the DR6 register are never cleared by the
+ * processor. To avoid confusion in identifying debug
+ * exceptions, debug handlers should clear the register before
+ * returning to the interrupted task.
+ *
+ * Keep it simple: clear DR6 immediately.
+ */
+ get_debugreg(*dr6, 6);
+ set_debugreg(0, 6);
+ /* Filter out all the reserved bits which are preset to 1 */
+ *dr6 &= ~DR6_RESERVED;
+}
+
+static __always_inline void debug_exit(unsigned long dr7)
+{
+ local_db_restore(dr7);
+}
+
/*
* Our handling of the processor debug registers is non-trivial.
* We do not clear them on entry and exit from the kernel. Therefore
@@ -646,86 +787,54 @@ static bool is_sysenter_singlestep(struct pt_regs *regs)
*
* May run on IST stack.
*/
-dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
+static void handle_debug(struct pt_regs *regs, unsigned long dr6, bool user)
{
struct task_struct *tsk = current;
- int user_icebp = 0;
- unsigned long dr6;
+ bool user_icebp;
int si_code;
- nmi_enter();
-
- get_debugreg(dr6, 6);
- /*
- * The Intel SDM says:
- *
- * Certain debug exceptions may clear bits 0-3. The remaining
- * contents of the DR6 register are never cleared by the
- * processor. To avoid confusion in identifying debug
- * exceptions, debug handlers should clear the register before
- * returning to the interrupted task.
- *
- * Keep it simple: clear DR6 immediately.
- */
- set_debugreg(0, 6);
-
- /* Filter out all the reserved bits which are preset to 1 */
- dr6 &= ~DR6_RESERVED;
-
/*
* The SDM says "The processor clears the BTF flag when it
* generates a debug exception." Clear TIF_BLOCKSTEP to keep
* TIF_BLOCKSTEP in sync with the hardware BTF flag.
*/
- clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
+ clear_thread_flag(TIF_BLOCKSTEP);
- if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) &&
- is_sysenter_singlestep(regs))) {
- dr6 &= ~DR_STEP;
- if (!dr6)
- goto exit;
- /*
- * else we might have gotten a single-step trap and hit a
- * watchpoint at the same time, in which case we should fall
- * through and handle the watchpoint.
- */
- }
+ /*
+ * If DR6 is zero, no point in trying to handle it. The kernel is
+ * not using INT1.
+ */
+ if (!user && !dr6)
+ return;
/*
* If dr6 has no reason to give us about the origin of this trap,
* then it's very likely the result of an icebp/int01 trap.
* User wants a sigtrap for that.
*/
- if (!dr6 && user_mode(regs))
- user_icebp = 1;
+ user_icebp = user && !dr6;
/* Store the virtualized DR6 value */
tsk->thread.debugreg6 = dr6;
#ifdef CONFIG_KPROBES
- if (kprobe_debug_handler(regs))
- goto exit;
+ if (kprobe_debug_handler(regs)) {
+ return;
+ }
#endif
- if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
- SIGTRAP) == NOTIFY_STOP)
- goto exit;
-
- /*
- * Let others (NMI) know that the debug stack is in use
- * as we may switch to the interrupt stack.
- */
- debug_stack_usage_inc();
+ if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, 0,
+ SIGTRAP) == NOTIFY_STOP) {
+ return;
+ }
/* It's safe to allow irq's after DR6 has been saved */
cond_local_irq_enable(regs);
if (v8086_mode(regs)) {
- handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
- X86_TRAP_DB);
- cond_local_irq_disable(regs);
- debug_stack_usage_dec();
- goto exit;
+ handle_vm86_trap((struct kernel_vm86_regs *) regs, 0,
+ X86_TRAP_DB);
+ goto out;
}
if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
@@ -739,23 +848,91 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
regs->flags &= ~X86_EFLAGS_TF;
}
+
si_code = get_si_code(tsk->thread.debugreg6);
if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
- send_sigtrap(regs, error_code, si_code);
+ send_sigtrap(regs, 0, si_code);
+
+out:
cond_local_irq_disable(regs);
- debug_stack_usage_dec();
+}
-exit:
+static __always_inline void exc_debug_kernel(struct pt_regs *regs,
+ unsigned long dr6)
+{
+ nmi_enter();
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+
+ /*
+ * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
+ * watchpoint at the same time then that will still be handled.
+ */
+ if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
+ dr6 &= ~DR_STEP;
+
+ handle_debug(regs, dr6, false);
+
+ if (regs->flags & X86_EFLAGS_IF)
+ trace_hardirqs_on_prepare();
+ instrumentation_end();
nmi_exit();
}
-NOKPROBE_SYMBOL(do_debug);
+
+static __always_inline void exc_debug_user(struct pt_regs *regs,
+ unsigned long dr6)
+{
+ idtentry_enter_user(regs);
+ instrumentation_begin();
+
+ handle_debug(regs, dr6, true);
+ instrumentation_end();
+ idtentry_exit_user(regs);
+}
+
+#ifdef CONFIG_X86_64
+/* IST stack entry */
+DEFINE_IDTENTRY_DEBUG(exc_debug)
+{
+ unsigned long dr6, dr7;
+
+ debug_enter(&dr6, &dr7);
+ exc_debug_kernel(regs, dr6);
+ debug_exit(dr7);
+}
+
+/* User entry, runs on regular task stack */
+DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
+{
+ unsigned long dr6, dr7;
+
+ debug_enter(&dr6, &dr7);
+ exc_debug_user(regs, dr6);
+ debug_exit(dr7);
+}
+#else
+/* 32 bit does not have separate entry points. */
+DEFINE_IDTENTRY_DEBUG(exc_debug)
+{
+ unsigned long dr6, dr7;
+
+ debug_enter(&dr6, &dr7);
+
+ if (user_mode(regs))
+ exc_debug_user(regs, dr6);
+ else
+ exc_debug_kernel(regs, dr6);
+
+ debug_exit(dr7);
+}
+#endif
/*
* Note that we play around with the 'TS' bit in an attempt to get
* the correct behaviour even in the presence of the asynchronous
* IRQ13 behaviour
*/
-static void math_error(struct pt_regs *regs, int error_code, int trapnr)
+static void math_error(struct pt_regs *regs, int trapnr)
{
struct task_struct *task = current;
struct fpu *fpu = &task->thread.fpu;
@@ -766,16 +943,16 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
cond_local_irq_enable(regs);
if (!user_mode(regs)) {
- if (fixup_exception(regs, trapnr, error_code, 0))
- return;
+ if (fixup_exception(regs, trapnr, 0, 0))
+ goto exit;
- task->thread.error_code = error_code;
+ task->thread.error_code = 0;
task->thread.trap_nr = trapnr;
- if (notify_die(DIE_TRAP, str, regs, error_code,
- trapnr, SIGFPE) != NOTIFY_STOP)
- die(str, regs, error_code);
- return;
+ if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
+ SIGFPE) != NOTIFY_STOP)
+ die(str, regs, 0);
+ goto exit;
}
/*
@@ -784,32 +961,37 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
fpu__save(fpu);
task->thread.trap_nr = trapnr;
- task->thread.error_code = error_code;
+ task->thread.error_code = 0;
si_code = fpu__exception_code(fpu, trapnr);
/* Retry when we get spurious exceptions: */
if (!si_code)
- return;
+ goto exit;
force_sig_fault(SIGFPE, si_code,
(void __user *)uprobe_get_trap_addr(regs));
+exit:
+ cond_local_irq_disable(regs);
}
-dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY(exc_coprocessor_error)
{
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
- math_error(regs, error_code, X86_TRAP_MF);
+ math_error(regs, X86_TRAP_MF);
}
-dotraplinkage void
-do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY(exc_simd_coprocessor_error)
{
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
- math_error(regs, error_code, X86_TRAP_XF);
+ if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
+ /* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */
+ if (!static_cpu_has(X86_FEATURE_XMM)) {
+ __exc_general_protection(regs, 0);
+ return;
+ }
+ }
+ math_error(regs, X86_TRAP_XF);
}
-dotraplinkage void
-do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
{
/*
* This addresses a Pentium Pro Erratum:
@@ -832,13 +1014,10 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
*/
}
-dotraplinkage void
-do_device_not_available(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY(exc_device_not_available)
{
unsigned long cr0 = read_cr0();
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
-
#ifdef CONFIG_MATH_EMULATION
if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
struct math_emu_info info = { };
@@ -847,6 +1026,8 @@ do_device_not_available(struct pt_regs *regs, long error_code)
info.regs = regs;
math_emulate(&info);
+
+ cond_local_irq_disable(regs);
return;
}
#endif
@@ -861,22 +1042,20 @@ do_device_not_available(struct pt_regs *regs, long error_code)
* to kill the task than getting stuck in a never-ending
* loop of #NM faults.
*/
- die("unexpected #NM exception", regs, error_code);
+ die("unexpected #NM exception", regs, 0);
}
}
-NOKPROBE_SYMBOL(do_device_not_available);
#ifdef CONFIG_X86_32
-dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
+DEFINE_IDTENTRY_SW(iret_error)
{
- RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
local_irq_enable();
-
- if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
+ if (notify_die(DIE_TRAP, "iret exception", regs, 0,
X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
- do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
+ do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
ILL_BADSTK, (void __user *)NULL);
}
+ local_irq_disable();
}
#endif
@@ -888,20 +1067,9 @@ void __init trap_init(void)
idt_setup_traps();
/*
- * Set the IDT descriptor to a fixed read-only location, so that the
- * "sidt" instruction will not leak the location of the kernel, and
- * to defend the IDT against arbitrary memory write vulnerabilities.
- * It will be reloaded in cpu_init() */
- cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
- PAGE_KERNEL_RO);
- idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
-
- /*
* Should be a barrier for any external CPU state:
*/
cpu_init();
idt_setup_ist_traps();
-
- idt_setup_debugidt_traps();
}
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 54226110bc7f..722a85f3b2dd 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -74,13 +74,7 @@ static bool in_entry_code(unsigned long ip)
{
char *addr = (char *)ip;
- if (addr >= __entry_text_start && addr < __entry_text_end)
- return true;
-
- if (addr >= __irqentry_text_start && addr < __irqentry_text_end)
- return true;
-
- return false;
+ return addr >= __entry_text_start && addr < __entry_text_end;
}
static inline unsigned long *last_frame(struct unwind_state *state)
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 47a8676c7395..764573de3996 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -171,7 +171,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
pte_t *pte;
int i;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -197,7 +197,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, PAGE_SHIFT, false);
}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 1bf7e312361f..3bfc8dd8a43d 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -40,13 +40,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
#ifdef CONFIG_X86_32
OUTPUT_ARCH(i386)
ENTRY(phys_startup_32)
-jiffies = jiffies_64;
#else
OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64)
-jiffies_64 = jiffies;
#endif
+jiffies = jiffies_64;
+
#if defined(CONFIG_X86_64)
/*
* On 64-bit, align RODATA to 2MB so we retain large page mappings for
@@ -134,7 +134,6 @@ SECTIONS
KPROBES_TEXT
ALIGN_ENTRY_TEXT_BEGIN
ENTRY_TEXT
- IRQENTRY_TEXT
ALIGN_ENTRY_TEXT_END
SOFTIRQENTRY_TEXT
*(.fixup)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 253b8e875ccd..8a294f9747aa 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -181,17 +181,14 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
r = -E2BIG;
if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
goto out;
- r = -ENOMEM;
if (cpuid->nent) {
- cpuid_entries =
- vmalloc(array_size(sizeof(struct kvm_cpuid_entry),
- cpuid->nent));
- if (!cpuid_entries)
- goto out;
- r = -EFAULT;
- if (copy_from_user(cpuid_entries, entries,
- cpuid->nent * sizeof(struct kvm_cpuid_entry)))
+ cpuid_entries = vmemdup_user(entries,
+ array_size(sizeof(struct kvm_cpuid_entry),
+ cpuid->nent));
+ if (IS_ERR(cpuid_entries)) {
+ r = PTR_ERR(cpuid_entries);
goto out;
+ }
}
for (i = 0; i < cpuid->nent; i++) {
vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
@@ -211,8 +208,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
kvm_x86_ops.cpuid_update(vcpu);
r = kvm_update_cpuid(vcpu);
+ kvfree(cpuid_entries);
out:
- vfree(cpuid_entries);
return r;
}
@@ -325,7 +322,7 @@ void kvm_set_cpu_caps(void)
);
kvm_cpu_cap_mask(CPUID_7_ECX,
- F(AVX512VBMI) | F(LA57) | 0 /*PKU*/ | 0 /*OSPKE*/ | F(RDPID) |
+ F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/
@@ -334,6 +331,13 @@ void kvm_set_cpu_caps(void)
if (cpuid_ecx(7) & F(LA57))
kvm_cpu_cap_set(X86_FEATURE_LA57);
+ /*
+ * PKU not yet implemented for shadow paging and requires OSPKE
+ * to be set on the host. Clear it if that is not the case
+ */
+ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
+ kvm_cpu_cap_clear(X86_FEATURE_PKU);
+
kvm_cpu_cap_mask(CPUID_7_EDX,
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
@@ -426,7 +430,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
struct kvm_cpuid_array {
struct kvm_cpuid_entry2 *entries;
- const int maxnent;
+ int maxnent;
int nent;
};
@@ -870,7 +874,6 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_array array = {
.nent = 0,
- .maxnent = cpuid->nent,
};
int r, i;
@@ -887,6 +890,8 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
if (!array.entries)
return -ENOMEM;
+ array.maxnent = cpuid->nent;
+
for (i = 0; i < ARRAY_SIZE(funcs); i++) {
r = get_cpuid_func(&array, funcs[i], type);
if (r)
diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c
index 018aebce33ff..7e818d64bb4d 100644
--- a/arch/x86/kvm/debugfs.c
+++ b/arch/x86/kvm/debugfs.c
@@ -43,22 +43,22 @@ static int vcpu_get_tsc_scaling_frac_bits(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_frac_fops, vcpu_get_tsc_scaling_frac_bits, NULL, "%llu\n");
-void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
+void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
{
- debugfs_create_file("tsc-offset", 0444, vcpu->debugfs_dentry, vcpu,
+ debugfs_create_file("tsc-offset", 0444, debugfs_dentry, vcpu,
&vcpu_tsc_offset_fops);
if (lapic_in_kernel(vcpu))
debugfs_create_file("lapic_timer_advance_ns", 0444,
- vcpu->debugfs_dentry, vcpu,
+ debugfs_dentry, vcpu,
&vcpu_timer_advance_ns_fops);
if (kvm_has_tsc_control) {
debugfs_create_file("tsc-scaling-ratio", 0444,
- vcpu->debugfs_dentry, vcpu,
+ debugfs_dentry, vcpu,
&vcpu_tsc_scaling_fops);
debugfs_create_file("tsc-scaling-ratio-frac-bits", 0444,
- vcpu->debugfs_dentry, vcpu,
+ debugfs_dentry, vcpu,
&vcpu_tsc_scaling_frac_fops);
}
}
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index de5476f8683e..d0e2825ae617 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4800,8 +4800,12 @@ static const struct opcode twobyte_table[256] = {
GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
N, N, N, N, N, N,
- D(ImplicitOps | ModRM | SrcMem | NoAccess),
- N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
+ D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
+ D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
+ D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
+ D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
+ D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
+ D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
/* 0x20 - 0x2F */
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 238b78e069fe..af9cdb426dd2 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1252,7 +1252,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
* only, there can be valuable data in the rest which needs
* to be preserved e.g. on migration.
*/
- if (__clear_user((void __user *)addr, sizeof(u32)))
+ if (__put_user(0, (u32 __user *)addr))
return 1;
hv_vcpu->hv_vapic = data;
kvm_vcpu_mark_page_dirty(vcpu, gfn);
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index febca334c320..a6e218c6140d 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -462,7 +462,6 @@ static int pit_ioport_write(struct kvm_vcpu *vcpu,
if (channel == 3) {
/* Read-Back Command. */
for (channel = 0; channel < 3; channel++) {
- s = &pit_state->channels[channel];
if (val & (2 << channel)) {
if (!(val & 0x20))
pit_latch_count(pit, channel);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 38c576495048..a6d484ea110b 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -165,22 +165,22 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
unsigned long pfn;
unsigned long paddr;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return -EFAULT;
}
pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
paddr = pfn << PAGE_SHIFT;
table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
if (!table) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return -EFAULT;
}
ret = CMPXCHG(&table[index], orig_pte, new_pte);
memunmap(table);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
}
return (ret != orig_pte);
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 8a6db11dcb43..6bceafb19108 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -258,7 +258,7 @@ void sync_nested_vmcb_control(struct vcpu_svm *svm)
/* Only a few fields of int_ctl are written by the processor. */
mask = V_IRQ_MASK | V_TPR_MASK;
if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
- is_intercept(svm, SVM_EXIT_VINTR)) {
+ is_intercept(svm, INTERCEPT_VINTR)) {
/*
* In order to request an interrupt window, L0 is usurping
* svm->vmcb->control.int_ctl and possibly setting V_IRQ
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 9e333b91ff78..8ccfa4197d9c 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1378,6 +1378,8 @@ static void svm_clear_vintr(struct vcpu_svm *svm)
/* Drop int_ctl fields related to VINTR injection. */
svm->vmcb->control.int_ctl &= mask;
if (is_guest_mode(&svm->vcpu)) {
+ svm->nested.hsave->control.int_ctl &= mask;
+
WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
(svm->nested.ctl.int_ctl & V_TPR_MASK));
svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask;
@@ -1837,7 +1839,7 @@ static void kvm_machine_check(void)
.flags = X86_EFLAGS_IF,
};
- do_machine_check(&regs, 0);
+ do_machine_check(&regs);
#endif
}
@@ -1999,7 +2001,7 @@ void svm_set_gif(struct vcpu_svm *svm, bool value)
*/
if (vgif_enabled(svm))
clr_intercept(svm, INTERCEPT_STGI);
- if (is_intercept(svm, SVM_EXIT_VINTR))
+ if (is_intercept(svm, INTERCEPT_VINTR))
svm_clear_vintr(svm);
enable_gif(svm);
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 9c74a732b08d..d1af20b050a8 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3087,9 +3087,9 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
/*
* VMExit clears RFLAGS.IF and DR7, even on a consistency check.
*/
- local_irq_enable();
if (hw_breakpoint_active())
set_debugreg(__this_cpu_read(cpu_dr7), 7);
+ local_irq_enable();
preempt_enable();
/*
@@ -4624,19 +4624,24 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
}
}
-static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
+static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
+ int *ret)
{
gva_t gva;
struct x86_exception e;
+ int r;
if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmcs_read32(VMX_INSTRUCTION_INFO), false,
- sizeof(*vmpointer), &gva))
- return 1;
+ sizeof(*vmpointer), &gva)) {
+ *ret = 1;
+ return -EINVAL;
+ }
- if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
+ r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
+ if (r != X86EMUL_CONTINUE) {
+ *ret = vmx_handle_memory_failure(vcpu, r, &e);
+ return -EINVAL;
}
return 0;
@@ -4764,8 +4769,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1;
}
- if (nested_vmx_get_vmptr(vcpu, &vmptr))
- return 1;
+ if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret))
+ return ret;
/*
* SDM 3: 24.11.5
@@ -4838,12 +4843,13 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
u32 zero = 0;
gpa_t vmptr;
u64 evmcs_gpa;
+ int r;
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (nested_vmx_get_vmptr(vcpu, &vmptr))
- return 1;
+ if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
+ return r;
if (!page_address_valid(vcpu, vmptr))
return nested_vmx_failValid(vcpu,
@@ -4902,7 +4908,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
u64 value;
gva_t gva = 0;
short offset;
- int len;
+ int len, r;
if (!nested_vmx_check_permission(vcpu))
return 1;
@@ -4943,10 +4949,9 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
instr_info, true, len, &gva))
return 1;
/* _system ok, nested_vmx_check_permission has verified cpl=0 */
- if (kvm_write_guest_virt_system(vcpu, gva, &value, len, &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
}
return nested_vmx_succeed(vcpu);
@@ -4987,7 +4992,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
unsigned long field;
short offset;
gva_t gva;
- int len;
+ int len, r;
/*
* The value to write might be 32 or 64 bits, depending on L1's long
@@ -5017,10 +5022,9 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, exit_qualification,
instr_info, false, len, &gva))
return 1;
- if (kvm_read_guest_virt(vcpu, gva, &value, len, &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
}
field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
@@ -5103,12 +5107,13 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
gpa_t vmptr;
+ int r;
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (nested_vmx_get_vmptr(vcpu, &vmptr))
- return 1;
+ if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
+ return r;
if (!page_address_valid(vcpu, vmptr))
return nested_vmx_failValid(vcpu,
@@ -5170,6 +5175,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
struct x86_exception e;
gva_t gva;
+ int r;
if (!nested_vmx_check_permission(vcpu))
return 1;
@@ -5181,11 +5187,11 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
true, sizeof(gpa_t), &gva))
return 1;
/* *_system ok, nested_vmx_check_permission has verified cpl=0 */
- if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
- sizeof(gpa_t), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
+ sizeof(gpa_t), &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
+
return nested_vmx_succeed(vcpu);
}
@@ -5209,7 +5215,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
struct {
u64 eptp, gpa;
} operand;
- int i;
+ int i, r;
if (!(vmx->nested.msrs.secondary_ctls_high &
SECONDARY_EXEC_ENABLE_EPT) ||
@@ -5236,10 +5242,9 @@ static int handle_invept(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmx_instruction_info, false, sizeof(operand), &gva))
return 1;
- if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
/*
* Nested EPT roots are always held through guest_mmu,
@@ -5291,6 +5296,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
u64 gla;
} operand;
u16 vpid02;
+ int r;
if (!(vmx->nested.msrs.secondary_ctls_high &
SECONDARY_EXEC_ENABLE_VPID) ||
@@ -5318,10 +5324,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmx_instruction_info, false, sizeof(operand), &gva))
return 1;
- if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
+
if (operand.vpid >> 16)
return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
@@ -5666,7 +5672,7 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
{
u32 intr_info;
- switch (exit_reason) {
+ switch ((u16)exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
intr_info = vmx_get_intr_info(vcpu);
if (is_nmi(intr_info))
@@ -5727,7 +5733,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 intr_info;
- switch (exit_reason) {
+ switch ((u16)exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
intr_info = vmx_get_intr_info(vcpu);
if (is_nmi(intr_info))
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index d33d890b605f..bdcce65c7a1d 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -181,7 +181,7 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
ret = pmu->version > 1;
break;
case MSR_IA32_PERF_CAPABILITIES:
- ret = guest_cpuid_has(vcpu, X86_FEATURE_PDCM);
+ ret = 1;
break;
default:
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 170cc76a581f..36c771728c8c 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1600,6 +1600,32 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
return 1;
}
+/*
+ * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
+ * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
+ * indicates whether exit to userspace is needed.
+ */
+int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+ struct x86_exception *e)
+{
+ if (r == X86EMUL_PROPAGATE_FAULT) {
+ kvm_inject_emulated_page_fault(vcpu, e);
+ return 1;
+ }
+
+ /*
+ * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
+ * while handling a VMX instruction KVM could've handled the request
+ * correctly by exiting to userspace and performing I/O but there
+ * doesn't seem to be a real use-case behind such requests, just return
+ * KVM_EXIT_INTERNAL_ERROR for now.
+ */
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+
+ return 0;
+}
/*
* Recognizes a pending MTF VM-exit and records the nested state for later
@@ -4683,7 +4709,7 @@ static void kvm_machine_check(void)
.flags = X86_EFLAGS_IF,
};
- do_machine_check(&regs, 0);
+ do_machine_check(&regs);
#endif
}
@@ -5486,6 +5512,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
u64 pcid;
u64 gla;
} operand;
+ int r;
if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
kvm_queue_exception(vcpu, UD_VECTOR);
@@ -5508,10 +5535,9 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
sizeof(operand), &gva))
return 1;
- if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
- kvm_inject_emulated_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
if (operand.pcid >> 12 != 0) {
kvm_inject_gp(vcpu, 0);
@@ -7282,10 +7308,6 @@ static __init void vmx_set_cpu_caps(void)
if (vmx_pt_mode_is_host_guest())
kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
- /* PKU is not yet implemented for shadow paging. */
- if (enable_ept && boot_cpu_has(X86_FEATURE_OSPKE))
- kvm_cpu_cap_check_and_set(X86_FEATURE_PKU);
-
if (vmx_umip_emulated())
kvm_cpu_cap_set(X86_FEATURE_UMIP);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 672c28f17e49..8a83b5edc820 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -355,6 +355,8 @@ struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
+int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+ struct x86_exception *e);
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9e41b5135340..00c88c2f34e4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -239,8 +239,7 @@ u64 __read_mostly host_xcr0;
u64 __read_mostly supported_xcr0;
EXPORT_SYMBOL_GPL(supported_xcr0);
-struct kmem_cache *x86_fpu_cache;
-EXPORT_SYMBOL_GPL(x86_fpu_cache);
+static struct kmem_cache *x86_fpu_cache;
static struct kmem_cache *x86_emulator_cache;
@@ -5647,13 +5646,6 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
/* kvm_write_guest_virt_system can pull in tons of pages. */
vcpu->arch.l1tf_flush_l1d = true;
- /*
- * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
- * is returned, but our callers are not ready for that and they blindly
- * call kvm_inject_page_fault. Ensure that they at least do not leak
- * uninitialized kernel stack memory into cr2 and error code.
- */
- memset(exception, 0, sizeof(*exception));
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
PFERR_WRITE_MASK, exception);
}
@@ -7018,7 +7010,7 @@ restart:
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
kvm_rip_write(vcpu, ctxt->eip);
- if (r && ctxt->tf)
+ if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
r = kvm_vcpu_do_singlestep(vcpu);
if (kvm_x86_ops.update_emulated_instruction)
kvm_x86_ops.update_emulated_instruction(vcpu);
@@ -8277,9 +8269,8 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap);
}
-int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end,
- bool blockable)
+void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end)
{
unsigned long apic_address;
@@ -8290,8 +8281,6 @@ int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
if (start <= apic_address && apic_address < end)
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
-
- return 0;
}
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
@@ -9962,13 +9951,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
if (!slot || !slot->npages)
return 0;
- /*
- * Stuff a non-canonical value to catch use-after-delete. This
- * ends up being 0 on 32-bit KVM, but there's no better
- * alternative.
- */
- hva = (unsigned long)(0xdeadull << 48);
old_npages = slot->npages;
+ hva = 0;
}
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
@@ -10140,43 +10124,65 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
}
static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
- struct kvm_memory_slot *new)
+ struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
- /* Still write protect RO slot */
- if (new->flags & KVM_MEM_READONLY) {
- kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
+ /*
+ * Nothing to do for RO slots or CREATE/MOVE/DELETE of a slot.
+ * See comments below.
+ */
+ if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY))
return;
- }
/*
- * Call kvm_x86_ops dirty logging hooks when they are valid.
- *
- * kvm_x86_ops.slot_disable_log_dirty is called when:
- *
- * - KVM_MR_CREATE with dirty logging is disabled
- * - KVM_MR_FLAGS_ONLY with dirty logging is disabled in new flag
- *
- * The reason is, in case of PML, we need to set D-bit for any slots
- * with dirty logging disabled in order to eliminate unnecessary GPA
- * logging in PML buffer (and potential PML buffer full VMEXIT). This
- * guarantees leaving PML enabled during guest's lifetime won't have
- * any additional overhead from PML when guest is running with dirty
- * logging disabled for memory slots.
+ * Dirty logging tracks sptes in 4k granularity, meaning that large
+ * sptes have to be split. If live migration is successful, the guest
+ * in the source machine will be destroyed and large sptes will be
+ * created in the destination. However, if the guest continues to run
+ * in the source machine (for example if live migration fails), small
+ * sptes will remain around and cause bad performance.
*
- * kvm_x86_ops.slot_enable_log_dirty is called when switching new slot
- * to dirty logging mode.
+ * Scan sptes if dirty logging has been stopped, dropping those
+ * which can be collapsed into a single large-page spte. Later
+ * page faults will create the large-page sptes.
*
- * If kvm_x86_ops dirty logging hooks are invalid, use write protect.
+ * There is no need to do this in any of the following cases:
+ * CREATE: No dirty mappings will already exist.
+ * MOVE/DELETE: The old mappings will already have been cleaned up by
+ * kvm_arch_flush_shadow_memslot()
+ */
+ if ((old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
+ !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
+ kvm_mmu_zap_collapsible_sptes(kvm, new);
+
+ /*
+ * Enable or disable dirty logging for the slot.
*
- * In case of write protect:
+ * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of the old
+ * slot have been zapped so no dirty logging updates are needed for
+ * the old slot.
+ * For KVM_MR_CREATE and KVM_MR_MOVE, once the new slot is visible
+ * any mappings that might be created in it will consume the
+ * properties of the new slot and do not need to be updated here.
*
- * Write protect all pages for dirty logging.
+ * When PML is enabled, the kvm_x86_ops dirty logging hooks are
+ * called to enable/disable dirty logging.
*
- * All the sptes including the large sptes which point to this
- * slot are set to readonly. We can not create any new large
- * spte on this slot until the end of the logging.
+ * When disabling dirty logging with PML enabled, the D-bit is set
+ * for sptes in the slot in order to prevent unnecessary GPA
+ * logging in the PML buffer (and potential PML buffer full VMEXIT).
+ * This guarantees leaving PML enabled for the guest's lifetime
+ * won't have any additional overhead from PML when the guest is
+ * running with dirty logging disabled.
*
+ * When enabling dirty logging, large sptes are write-protected
+ * so they can be split on first write. New large sptes cannot
+ * be created for this slot until the end of the logging.
* See the comments in fast_page_fault().
+ * For small sptes, nothing is done if the dirty log is in the
+ * initial-all-set state. Otherwise, depending on whether pml
+ * is enabled the D-bit or the W-bit will be cleared.
*/
if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
if (kvm_x86_ops.slot_enable_log_dirty) {
@@ -10213,39 +10219,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm_mmu_calculate_default_mmu_pages(kvm));
/*
- * Dirty logging tracks sptes in 4k granularity, meaning that large
- * sptes have to be split. If live migration is successful, the guest
- * in the source machine will be destroyed and large sptes will be
- * created in the destination. However, if the guest continues to run
- * in the source machine (for example if live migration fails), small
- * sptes will remain around and cause bad performance.
- *
- * Scan sptes if dirty logging has been stopped, dropping those
- * which can be collapsed into a single large-page spte. Later
- * page faults will create the large-page sptes.
- *
- * There is no need to do this in any of the following cases:
- * CREATE: No dirty mappings will already exist.
- * MOVE/DELETE: The old mappings will already have been cleaned up by
- * kvm_arch_flush_shadow_memslot()
- */
- if (change == KVM_MR_FLAGS_ONLY &&
- (old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
- !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
- kvm_mmu_zap_collapsible_sptes(kvm, new);
-
- /*
- * Set up write protection and/or dirty logging for the new slot.
- *
- * For KVM_MR_DELETE and KVM_MR_MOVE, the shadow pages of old slot have
- * been zapped so no dirty logging staff is needed for old slot. For
- * KVM_MR_FLAGS_ONLY, the old slot is essentially the same one as the
- * new and it's also covered when dealing with the new slot.
- *
* FIXME: const-ify all uses of struct kvm_memory_slot.
*/
- if (change != KVM_MR_DELETE)
- kvm_mmu_slot_apply_flags(kvm, (struct kvm_memory_slot *) new);
+ kvm_mmu_slot_apply_flags(kvm, old, (struct kvm_memory_slot *) new, change);
/* Free the arrays associated with the old memslot. */
if (change == KVM_MR_MOVE)
@@ -10530,7 +10506,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
return kvm_arch_interrupt_allowed(vcpu);
}
-void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
struct x86_exception fault;
@@ -10547,6 +10523,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
fault.address = work->arch.token;
fault.async_page_fault = true;
kvm_inject_page_fault(vcpu, &fault);
+ return true;
} else {
/*
* It is not possible to deliver a paravirtualized asynchronous
@@ -10557,6 +10534,7 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
* fault is retried, hopefully the page will be ready in the host.
*/
kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+ return false;
}
}
@@ -10574,7 +10552,8 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
- if (kvm_pv_async_pf_enabled(vcpu) &&
+ if ((work->wakeup_all || work->notpresent_injected) &&
+ kvm_pv_async_pf_enabled(vcpu) &&
!apf_put_user_ready(vcpu, work->arch.token)) {
vcpu->arch.apf.pageready_pending = true;
kvm_apic_set_irq(vcpu, &irq, NULL);
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 5246db42de45..6110bce7237b 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -6,10 +6,19 @@
# Produces uninteresting flaky coverage.
KCOV_INSTRUMENT_delay.o := n
+# KCSAN uses udelay for introducing watchpoint delay; avoid recursion.
+KCSAN_SANITIZE_delay.o := n
+ifdef CONFIG_KCSAN
+# In case KCSAN+lockdep+ftrace are enabled, disable ftrace for delay.o to avoid
+# lockdep -> [other libs] -> KCSAN -> udelay -> ftrace -> lockdep recursion.
+CFLAGS_REMOVE_delay.o = $(CC_FLAGS_FTRACE)
+endif
+
# Early boot use of cmdline; don't instrument it
ifdef CONFIG_AMD_MEM_ENCRYPT
KCOV_INSTRUMENT_cmdline.o := n
KASAN_SANITIZE_cmdline.o := n
+KCSAN_SANITIZE_cmdline.o := n
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_cmdline.o = -pg
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 98f7c6fa2eaa..f7fd0e868c9c 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -7,6 +7,10 @@ KCOV_INSTRUMENT_mem_encrypt_identity.o := n
KASAN_SANITIZE_mem_encrypt.o := n
KASAN_SANITIZE_mem_encrypt_identity.o := n
+# Disable KCSAN entirely, because otherwise we get warnings that some functions
+# reference __initdata sections.
+KCSAN_SANITIZE := n
+
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_mem_encrypt.o = -pg
CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 5199d8a1daf1..770b613790b3 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -4,9 +4,9 @@
#include <linux/percpu.h>
#include <linux/kallsyms.h>
#include <linux/kcore.h>
+#include <linux/pgtable.h>
#include <asm/cpu_entry_area.h>
-#include <asm/pgtable.h>
#include <asm/fixmap.h>
#include <asm/desc.h>
@@ -107,7 +107,6 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu)
*/
cea_map_stack(DF);
cea_map_stack(NMI);
- cea_map_stack(DB1);
cea_map_stack(DB);
cea_map_stack(MCE);
}
diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c
index 4a3b62f780b4..092ea436c7e6 100644
--- a/arch/x86/mm/debug_pagetables.c
+++ b/arch/x86/mm/debug_pagetables.c
@@ -3,7 +3,7 @@
#include <linux/efi.h>
#include <linux/module.h>
#include <linux/seq_file.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
static int ptdump_show(struct seq_file *m, void *v)
{
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index ea9010113f69..e1b599ecbbc2 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -19,7 +19,6 @@
#include <linux/ptdump.h>
#include <asm/e820/types.h>
-#include <asm/pgtable.h>
/*
* The dumper groups pagetable entries of the same type into one, and for
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index b991aa4bdfae..1d6cb07f4f86 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -204,8 +204,19 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
if (fixup_exception(regs, trapnr, regs->orig_ax, 0))
return;
- if (fixup_bug(regs, trapnr))
- return;
+ if (trapnr == X86_TRAP_UD) {
+ if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
+ /* Skip the ud2. */
+ regs->ip += LEN_UD2;
+ return;
+ }
+
+ /*
+ * If this was a BUG and report_bug returns or if this
+ * was just a normal #UD, we want to continue onward and
+ * crash.
+ */
+ }
fail:
early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index c5437f2964ee..66be9bd60307 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -414,21 +414,13 @@ static int is_errata100(struct pt_regs *regs, unsigned long address)
return 0;
}
+/* Pentium F0 0F C7 C8 bug workaround: */
static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
- unsigned long nr;
-
- /*
- * Pentium F0 0F C7 C8 bug workaround:
- */
- if (boot_cpu_has_bug(X86_BUG_F00F)) {
- nr = (address - idt_descr.address) >> 3;
-
- if (nr == 6) {
- do_invalid_op(regs, 0);
- return 1;
- }
+ if (boot_cpu_has_bug(X86_BUG_F00F) && idt_is_f00f_address(address)) {
+ handle_invalid_op(regs);
+ return 1;
}
#endif
return 0;
@@ -786,6 +778,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+ local_irq_disable();
+
return;
}
@@ -811,7 +805,7 @@ __bad_area(struct pt_regs *regs, unsigned long error_code,
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
__bad_area_nosemaphore(regs, error_code, address, pkey, si_code);
}
@@ -865,7 +859,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
* 2. T1 : set PKRU to deny access to pkey=4, touches page
* 3. T1 : faults...
* 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
- * 5. T1 : enters fault handler, takes mmap_sem, etc...
+ * 5. T1 : enters fault handler, takes mmap_lock, etc...
* 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
* faulted on a pte with its pkey=4.
*/
@@ -1231,15 +1225,15 @@ void do_user_addr_fault(struct pt_regs *regs,
* Kernel-mode access to the user address space should only occur
* on well-defined single instructions listed in the exception
* tables. But, an erroneous kernel fault occurring outside one of
- * those areas which also holds mmap_sem might deadlock attempting
+ * those areas which also holds mmap_lock might deadlock attempting
* to validate the fault against the address space.
*
* Only do the expensive exception table search when we might be at
* risk of a deadlock. This happens if we
- * 1. Failed to acquire mmap_sem, and
+ * 1. Failed to acquire mmap_lock, and
* 2. The access did not originate in userspace.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mmap_read_trylock(mm))) {
if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
/*
* Fault from code in kernel from
@@ -1249,7 +1243,7 @@ void do_user_addr_fault(struct pt_regs *regs,
return;
}
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
@@ -1289,9 +1283,9 @@ good_area:
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
- * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
+ * we get VM_FAULT_RETRY back, the mmap_lock has been unlocked.
*
- * Note that handle_userfault() may also release and reacquire mmap_sem
+ * Note that handle_userfault() may also release and reacquire mmap_lock
* (and not return with VM_FAULT_RETRY), when returning to userland to
* repeat the page fault later with a VM_FAULT_NOPAGE retval
* (potentially after handling any pending signal during the return to
@@ -1310,7 +1304,7 @@ good_area:
}
/*
- * If we need to retry the mmap_sem has already been released,
+ * If we need to retry the mmap_lock has already been released,
* and if there is a fatal signal pending there is no guarantee
* that we made any progress. Handle this case first.
*/
@@ -1320,7 +1314,7 @@ good_area:
goto retry;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (unlikely(fault & VM_FAULT_ERROR)) {
mm_fault_error(regs, hw_error_code, address, fault);
return;
@@ -1355,11 +1349,38 @@ trace_page_fault_entries(struct pt_regs *regs, unsigned long error_code,
trace_page_fault_kernel(address, regs, error_code);
}
-dotraplinkage void
-do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
- unsigned long address)
+static __always_inline void
+handle_page_fault(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
{
- prefetchw(&current->mm->mmap_sem);
+ trace_page_fault_entries(regs, error_code, address);
+
+ if (unlikely(kmmio_fault(regs, address)))
+ return;
+
+ /* Was the fault on kernel-controlled part of the address space? */
+ if (unlikely(fault_in_kernel_space(address))) {
+ do_kern_addr_fault(regs, error_code, address);
+ } else {
+ do_user_addr_fault(regs, error_code, address);
+ /*
+ * User address page fault handling might have reenabled
+ * interrupts. Fixing up all potential exit points of
+ * do_user_addr_fault() and its leaf functions is just not
+ * doable w/o creating an unholy mess or turning the code
+ * upside down.
+ */
+ local_irq_disable();
+ }
+}
+
+DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
+{
+ unsigned long address = read_cr2();
+ bool rcu_exit;
+
+ prefetchw(&current->mm->mmap_lock);
+
/*
* KVM has two types of events that are, logically, interrupts, but
* are unfortunately delivered using the #PF vector. These events are
@@ -1374,19 +1395,28 @@ do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
* getting values from real and async page faults mixed up.
*
* Fingers crossed.
+ *
+ * The async #PF handling code takes care of idtentry handling
+ * itself.
*/
if (kvm_handle_async_pf(regs, (u32)address))
return;
- trace_page_fault_entries(regs, hw_error_code, address);
+ /*
+ * Entry handling for valid #PF from kernel mode is slightly
+ * different: RCU is already watching and rcu_irq_enter() must not
+ * be invoked because a kernel fault on a user space address might
+ * sleep.
+ *
+ * In case the fault hit a RCU idle region the conditional entry
+ * code reenabled RCU to avoid subsequent wreckage which helps
+ * debugability.
+ */
+ rcu_exit = idtentry_enter_cond_rcu(regs);
- if (unlikely(kmmio_fault(regs, address)))
- return;
+ instrumentation_begin();
+ handle_page_fault(regs, error_code, address);
+ instrumentation_end();
- /* Was the fault on kernel-controlled part of the address space? */
- if (unlikely(fault_in_kernel_space(address)))
- do_kern_addr_fault(regs, hw_error_code, address);
- else
- do_user_addr_fault(regs, hw_error_code, address);
+ idtentry_exit_cond_rcu(regs, rcu_exit);
}
-NOKPROBE_SYMBOL(do_page_fault);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 112d3b98a3b6..001dd7dc829f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -680,6 +680,28 @@ static void __init memory_map_bottom_up(unsigned long map_start,
}
}
+/*
+ * The real mode trampoline, which is required for bootstrapping CPUs
+ * occupies only a small area under the low 1MB. See reserve_real_mode()
+ * for details.
+ *
+ * If KASLR is disabled the first PGD entry of the direct mapping is copied
+ * to map the real mode trampoline.
+ *
+ * If KASLR is enabled, copy only the PUD which covers the low 1MB
+ * area. This limits the randomization granularity to 1GB for both 4-level
+ * and 5-level paging.
+ */
+static void __init init_trampoline(void)
+{
+#ifdef CONFIG_X86_64
+ if (!kaslr_memory_enabled())
+ trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
+ else
+ init_trampoline_kaslr();
+#endif
+}
+
void __init init_mem_mapping(void)
{
unsigned long end;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 4222a010057a..bda909e3e37e 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -35,7 +35,6 @@
#include <asm/bios_ebda.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
#include <asm/e820/api.h>
@@ -396,15 +395,6 @@ repeat:
pte_t *kmap_pte;
-static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
-{
- pgd_t *pgd = pgd_offset_k(vaddr);
- p4d_t *p4d = p4d_offset(pgd, vaddr);
- pud_t *pud = pud_offset(p4d, vaddr);
- pmd_t *pmd = pmd_offset(pud, vaddr);
- return pte_offset_kernel(pmd, vaddr);
-}
-
static void __init kmap_init(void)
{
unsigned long kmap_vstart;
@@ -413,28 +403,17 @@ static void __init kmap_init(void)
* Cache the first kmap pte:
*/
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+ kmap_pte = virt_to_kpte(kmap_vstart);
}
#ifdef CONFIG_HIGHMEM
static void __init permanent_kmaps_init(pgd_t *pgd_base)
{
- unsigned long vaddr;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
+ unsigned long vaddr = PKMAP_BASE;
- vaddr = PKMAP_BASE;
page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
- pgd = swapper_pg_dir + pgd_index(vaddr);
- p4d = p4d_offset(pgd, vaddr);
- pud = pud_offset(p4d, vaddr);
- pmd = pmd_offset(pud, vaddr);
- pte = pte_offset_kernel(pmd, vaddr);
- pkmap_page_table = pte;
+ pkmap_page_table = virt_to_kpte(vaddr);
}
void __init add_highpages_with_active_regions(int nid,
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index add03c35aa34..dbae185511cd 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -37,7 +37,6 @@
#include <asm/processor.h>
#include <asm/bios_ebda.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 986d57534fd6..84d85dbd1dad 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -16,12 +16,12 @@
#include <linux/mmiotrace.h>
#include <linux/mem_encrypt.h>
#include <linux/efi.h>
+#include <linux/pgtable.h>
#include <asm/set_memory.h>
#include <asm/e820/api.h>
#include <asm/efi.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#include <asm/memtype.h>
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 763e71abc0fe..1a50434c8a4d 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -17,7 +17,6 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/cpu_entry_area.h>
extern struct range pfn_mapped[E820_MAX_ENTRIES];
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index dc6182eecefa..fb620fd9dae9 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -24,9 +24,9 @@
#include <linux/init.h>
#include <linux/random.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/kaslr.h>
@@ -61,15 +61,6 @@ static inline unsigned long get_padding(struct kaslr_memory_region *region)
return (region->size_tb << TB_SHIFT);
}
-/*
- * Apply no randomization if KASLR was disabled at boot or if KASAN
- * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
- */
-static inline bool kaslr_memory_enabled(void)
-{
- return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
-}
-
/* Initialize base and padding for each memory region randomized with KASLR */
void __init kernel_randomize_memory(void)
{
@@ -148,7 +139,7 @@ void __init kernel_randomize_memory(void)
}
}
-static void __meminit init_trampoline_pud(void)
+void __meminit init_trampoline_kaslr(void)
{
pud_t *pud_page_tramp, *pud, *pud_tramp;
p4d_t *p4d_page_tramp, *p4d, *p4d_tramp;
@@ -189,25 +180,3 @@ static void __meminit init_trampoline_pud(void)
__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
}
}
-
-/*
- * The real mode trampoline, which is required for bootstrapping CPUs
- * occupies only a small area under the low 1MB. See reserve_real_mode()
- * for details.
- *
- * If KASLR is disabled the first PGD entry of the direct mapping is copied
- * to map the real mode trampoline.
- *
- * If KASLR is enabled, copy only the PUD which covers the low 1MB
- * area. This limits the randomization granularity to 1GB for both 4-level
- * and 5-level paging.
- */
-void __meminit init_trampoline(void)
-{
- if (!kaslr_memory_enabled()) {
- init_trampoline_default();
- return;
- }
-
- init_trampoline_pud();
-}
diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
index f5b85bdc0535..e1d7d7477c22 100644
--- a/arch/x86/mm/maccess.c
+++ b/arch/x86/mm/maccess.c
@@ -9,35 +9,21 @@ static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits)
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
}
-static __always_inline bool invalid_probe_range(u64 vaddr)
+bool probe_kernel_read_allowed(const void *unsafe_src, size_t size)
{
+ unsigned long vaddr = (unsigned long)unsafe_src;
+
/*
* Range covering the highest possible canonical userspace address
* as well as non-canonical address range. For the canonical range
* we also need to include the userspace guard page.
*/
- return vaddr < TASK_SIZE_MAX + PAGE_SIZE ||
- canonical_address(vaddr, boot_cpu_data.x86_virt_bits) != vaddr;
+ return vaddr >= TASK_SIZE_MAX + PAGE_SIZE &&
+ canonical_address(vaddr, boot_cpu_data.x86_virt_bits) == vaddr;
}
#else
-static __always_inline bool invalid_probe_range(u64 vaddr)
+bool probe_kernel_read_allowed(const void *unsafe_src, size_t size)
{
- return vaddr < TASK_SIZE_MAX;
+ return (unsigned long)unsafe_src >= TASK_SIZE_MAX;
}
#endif
-
-long probe_kernel_read_strict(void *dst, const void *src, size_t size)
-{
- if (unlikely(invalid_probe_range((unsigned long)src)))
- return -EFAULT;
-
- return __probe_kernel_read(dst, src, size);
-}
-
-long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr, long count)
-{
- if (unlikely(invalid_probe_range((unsigned long)unsafe_addr)))
- return -EFAULT;
-
- return __strncpy_from_unsafe(dst, unsafe_addr, count);
-}
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
index 106ead05bbe3..7a84fc8bc5c3 100644
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -8,7 +8,7 @@
*/
#include <linux/linkage.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
#include <asm/processor-flags.h>
#include <asm/msr-index.h>
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 43fd19b3f118..bd7aff5c51f7 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -17,8 +17,8 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/io.h>
-#include <asm/pgtable.h>
#include <linux/mmiotrace.h>
+#include <linux/pgtable.h>
#include <asm/e820/api.h> /* for ISA_START_ADDRESS */
#include <linux/atomic.h>
#include <linux/percpu.h>
diff --git a/arch/x86/mm/pat/cpa-test.c b/arch/x86/mm/pat/cpa-test.c
index facce271e8b9..0612a73638a8 100644
--- a/arch/x86/mm/pat/cpa-test.c
+++ b/arch/x86/mm/pat/cpa-test.c
@@ -14,7 +14,6 @@
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
#include <asm/kdebug.h>
/*
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index 394be8611748..8f665c352bf0 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -46,7 +46,6 @@
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/x86_init.h>
-#include <asm/pgtable.h>
#include <asm/fcntl.h>
#include <asm/e820/api.h>
#include <asm/mtrr.h>
diff --git a/arch/x86/mm/pat/memtype_interval.c b/arch/x86/mm/pat/memtype_interval.c
index a07e4882bf36..645613d59942 100644
--- a/arch/x86/mm/pat/memtype_interval.c
+++ b/arch/x86/mm/pat/memtype_interval.c
@@ -14,8 +14,8 @@
#include <linux/interval_tree_generic.h>
#include <linux/sched.h>
#include <linux/gfp.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/memtype.h>
#include "memtype.h"
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index d88e9064c28e..dfd82f51ba66 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -3,7 +3,6 @@
#include <linux/gfp.h>
#include <linux/hugetlb.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
#include <asm/mtrr.h>
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index e1ce59dc558f..1953685c2ddf 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -11,7 +11,6 @@
#include <linux/spinlock.h>
#include <asm/cpu_entry_area.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
#include <asm/e820/api.h>
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index da0fb17a1a36..a8a924b3c335 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -34,7 +34,6 @@
#include <asm/vsyscall.h>
#include <asm/cmdline.h>
#include <asm/pti.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/desc.h>
@@ -493,12 +492,12 @@ static void __init pti_setup_espfix64(void)
}
/*
- * Clone the populated PMDs of the entry and irqentry text and force it RO.
+ * Clone the populated PMDs of the entry text and force it RO.
*/
static void pti_clone_entry_text(void)
{
pti_clone_pgtable((unsigned long) __entry_text_start,
- (unsigned long) __irqentry_text_end,
+ (unsigned long) __entry_text_end,
PTI_CLONE_PMD);
}
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index adb3c5784dac..ed5667f5169f 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -2,8 +2,8 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/cpufeature.h>
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index e723559c386a..0c67a5a94de3 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -572,6 +572,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar);
/*
* Device [1022:7808]
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 91220cc25854..e3f1ca316068 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -60,8 +60,7 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev)
}
#ifdef CONFIG_ACPI
-static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
- bool set_pirq)
+static int xen_register_pirq(u32 gsi, int triggering, bool set_pirq)
{
int rc, pirq = -1, irq = -1;
struct physdev_map_pirq map_irq;
@@ -94,9 +93,6 @@ static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
name = "ioapic-level";
}
- if (gsi_override >= 0)
- gsi = gsi_override;
-
irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name);
if (irq < 0)
goto out;
@@ -112,12 +108,12 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
if (!xen_hvm_domain())
return -1;
- return xen_register_pirq(gsi, -1 /* no GSI override */, trigger,
+ return xen_register_pirq(gsi, trigger,
false /* no mapping of GSI to PIRQ */);
}
#ifdef CONFIG_XEN_DOM0
-static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
+static int xen_register_gsi(u32 gsi, int triggering, int polarity)
{
int rc, irq;
struct physdev_setup_gsi setup_gsi;
@@ -128,7 +124,7 @@ static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polar
printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
gsi, triggering, polarity);
- irq = xen_register_pirq(gsi, gsi_override, triggering, true);
+ irq = xen_register_pirq(gsi, triggering, true);
setup_gsi.gsi = gsi;
setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
@@ -148,7 +144,7 @@ static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polar
static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
int trigger, int polarity)
{
- return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
+ return xen_register_gsi(gsi, trigger, polarity);
}
#endif
#endif
@@ -491,7 +487,7 @@ int __init pci_xen_initial_domain(void)
if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
continue;
- xen_register_pirq(irq, -1 /* no GSI override */,
+ xen_register_pirq(irq,
trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE,
true /* Map GSI to PIRQ */);
}
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index c049c432745d..826ead67753d 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -24,11 +24,11 @@
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/efi.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/desc.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/efi.h>
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index c5e393f8bb3f..8e364c4c6768 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -39,7 +39,6 @@
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/e820/api.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/proto.h>
#include <asm/efi.h>
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
index 933dd4fe3a97..f03a6883dcc6 100644
--- a/arch/x86/platform/olpc/olpc-xo1-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -52,7 +52,7 @@ static const char * const lid_wake_mode_names[] = {
static void battery_status_changed(void)
{
- struct power_supply *psy = power_supply_get_by_name("olpc-battery");
+ struct power_supply *psy = power_supply_get_by_name("olpc_battery");
if (psy) {
power_supply_changed(psy);
@@ -62,7 +62,7 @@ static void battery_status_changed(void)
static void ac_status_changed(void)
{
- struct power_supply *psy = power_supply_get_by_name("olpc-ac");
+ struct power_supply *psy = power_supply_get_by_name("olpc_ac");
if (psy) {
power_supply_changed(psy);
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 089413cd944e..85f4638764d6 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -75,7 +75,7 @@ static struct kobj_attribute lid_wake_on_close_attr =
static void battery_status_changed(void)
{
- struct power_supply *psy = power_supply_get_by_name("olpc-battery");
+ struct power_supply *psy = power_supply_get_by_name("olpc_battery");
if (psy) {
power_supply_changed(psy);
@@ -85,7 +85,7 @@ static void battery_status_changed(void)
static void ac_status_changed(void)
{
- struct power_supply *psy = power_supply_get_by_name("olpc-ac");
+ struct power_supply *psy = power_supply_get_by_name("olpc_ac");
if (psy) {
power_supply_changed(psy);
diff --git a/arch/x86/platform/olpc/olpc_ofw.c b/arch/x86/platform/olpc/olpc_ofw.c
index 20a064568463..6bab0f0aa8f3 100644
--- a/arch/x86/platform/olpc/olpc_ofw.c
+++ b/arch/x86/platform/olpc/olpc_ofw.c
@@ -3,12 +3,12 @@
#include <linux/export.h>
#include <linux/spinlock_types.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/io.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
-#include <asm/pgtable.h>
#include <asm/olpc_ofw.h>
/* address of OFW callback interface; will be NULL if OFW isn't found */
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 4ea69690c3e4..0ac96ca304c7 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1272,7 +1272,7 @@ static void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
* (the resource will not be freed until noninterruptable cpus see this
* interrupt; hardware may timeout the s/w ack and reply ERROR)
*/
-void uv_bau_message_interrupt(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_uv_bau_message)
{
int count = 0;
cycles_t time_start;
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index fc3b757afb2c..7c65102debaf 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -13,8 +13,8 @@
#include <linux/perf_event.h>
#include <linux/tboot.h>
#include <linux/dmi.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/mtrr.h>
#include <asm/page.h>
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
index fc413717a45f..d147f1b2c925 100644
--- a/arch/x86/power/hibernate.c
+++ b/arch/x86/power/hibernate.c
@@ -12,6 +12,7 @@
#include <linux/scatterlist.h>
#include <linux/kdebug.h>
#include <linux/cpu.h>
+#include <linux/pgtable.h>
#include <crypto/hash.h>
@@ -19,7 +20,6 @@
#include <asm/init.h>
#include <asm/proto.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mtrr.h>
#include <asm/sections.h>
#include <asm/suspend.h>
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
index a1061d471b73..223d5bca29b8 100644
--- a/arch/x86/power/hibernate_32.c
+++ b/arch/x86/power/hibernate_32.c
@@ -8,9 +8,9 @@
#include <linux/gfp.h>
#include <linux/suspend.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmzone.h>
#include <asm/sections.h>
#include <asm/suspend.h>
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 0197095d9637..a595953f1d6d 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -12,6 +12,7 @@
#include <linux/suspend.h>
#include <linux/scatterlist.h>
#include <linux/kdebug.h>
+#include <linux/pgtable.h>
#include <crypto/hash.h>
@@ -19,7 +20,6 @@
#include <asm/init.h>
#include <asm/proto.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mtrr.h>
#include <asm/sections.h>
#include <asm/suspend.h>
diff --git a/arch/x86/purgatory/.gitignore b/arch/x86/purgatory/.gitignore
new file mode 100644
index 000000000000..d2be1500671d
--- /dev/null
+++ b/arch/x86/purgatory/.gitignore
@@ -0,0 +1 @@
+purgatory.chk
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index fb4ee5444379..b04e6e72a592 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -14,10 +14,18 @@ $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
CFLAGS_sha256.o := -D__DISABLE_EXPORTS
-LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
-targets += purgatory.ro
-
+# When linking purgatory.ro with -r unresolved symbols are not checked,
+# also link a purgatory.chk binary without -r to check for unresolved symbols.
+PURGATORY_LDFLAGS := -e purgatory_start -nostdlib -z nodefaultlib
+LDFLAGS_purgatory.ro := -r $(PURGATORY_LDFLAGS)
+LDFLAGS_purgatory.chk := $(PURGATORY_LDFLAGS)
+targets += purgatory.ro purgatory.chk
+
+# Sanitizer, etc. runtimes are unavailable and cannot be linked here.
+GCOV_PROFILE := n
KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
+KCSAN_SANITIZE := n
KCOV_INSTRUMENT := n
# These are adjustments to the compiler flags used for objects that
@@ -25,7 +33,7 @@ KCOV_INSTRUMENT := n
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
-PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN)
+PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
# in turn leaves some undefined symbols like __fentry__ in purgatory and not
@@ -58,12 +66,15 @@ CFLAGS_string.o += $(PURGATORY_CFLAGS)
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
+$(obj)/purgatory.chk: $(obj)/purgatory.ro FORCE
+ $(call if_changed,ld)
+
targets += kexec-purgatory.c
quiet_cmd_bin2c = BIN2C $@
cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
-$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
+$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro $(obj)/purgatory.chk FORCE
$(call if_changed,bin2c)
obj-$(CONFIG_KEXEC_FILE) += kexec-purgatory.o
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
index 682c895753d9..6b1f3a4eeb44 100644
--- a/arch/x86/realmode/Makefile
+++ b/arch/x86/realmode/Makefile
@@ -6,7 +6,10 @@
# for more details.
#
#
+
+# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
subdir- := rm
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 262f83cad355..1ed1208931e0 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -3,9 +3,9 @@
#include <linux/slab.h>
#include <linux/memblock.h>
#include <linux/mem_encrypt.h>
+#include <linux/pgtable.h>
#include <asm/set_memory.h>
-#include <asm/pgtable.h>
#include <asm/realmode.h>
#include <asm/tlbflush.h>
#include <asm/crash.h>
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index b11ec5d8f8ac..83f1b6a56449 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -6,7 +6,10 @@
# for more details.
#
#
+
+# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
diff --git a/arch/x86/um/vdso/vma.c b/arch/x86/um/vdso/vma.c
index 9e7c4aba6c3a..76d9f6ce7a3d 100644
--- a/arch/x86/um/vdso/vma.c
+++ b/arch/x86/um/vdso/vma.c
@@ -58,7 +58,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (!vdso_enabled)
return 0;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
err = install_special_mapping(mm, um_vdso_addr, PAGE_SIZE,
@@ -66,7 +66,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdsop);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return err;
}
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index e138f7de52d2..3e89b0067ff0 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -13,6 +13,7 @@
#include <asm/smp.h>
#include <asm/reboot.h>
#include <asm/setup.h>
+#include <asm/idtentry.h>
#include <asm/hypervisor.h>
#include <asm/e820/api.h>
#include <asm/early_ioremap.h>
@@ -118,6 +119,17 @@ static void __init init_hvm_pv_info(void)
this_cpu_write(xen_vcpu_id, smp_processor_id());
}
+DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ inc_irq_stat(irq_hv_callback_count);
+
+ xen_hvm_evtchn_do_upcall();
+
+ set_irq_regs(old_regs);
+}
+
#ifdef CONFIG_KEXEC_CORE
static void xen_hvm_shutdown(void)
{
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 507f4fb88fa7..33b309d65955 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -63,7 +63,6 @@
#include <asm/setup.h>
#include <asm/desc.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/reboot.h>
#include <asm/stackprotector.h>
@@ -605,32 +604,42 @@ struct trap_array_entry {
bool ist_okay;
};
+#define TRAP_ENTRY(func, ist_ok) { \
+ .orig = asm_##func, \
+ .xen = xen_asm_##func, \
+ .ist_okay = ist_ok }
+
+#define TRAP_ENTRY_REDIR(func, xenfunc, ist_ok) { \
+ .orig = asm_##func, \
+ .xen = xen_asm_##xenfunc, \
+ .ist_okay = ist_ok }
+
static struct trap_array_entry trap_array[] = {
- { debug, xen_xendebug, true },
- { double_fault, xen_double_fault, true },
+ TRAP_ENTRY_REDIR(exc_debug, exc_xendebug, true ),
+ TRAP_ENTRY(exc_double_fault, true ),
#ifdef CONFIG_X86_MCE
- { machine_check, xen_machine_check, true },
+ TRAP_ENTRY(exc_machine_check, true ),
#endif
- { nmi, xen_xennmi, true },
- { int3, xen_int3, false },
- { overflow, xen_overflow, false },
+ TRAP_ENTRY_REDIR(exc_nmi, exc_xennmi, true ),
+ TRAP_ENTRY(exc_int3, false ),
+ TRAP_ENTRY(exc_overflow, false ),
#ifdef CONFIG_IA32_EMULATION
{ entry_INT80_compat, xen_entry_INT80_compat, false },
#endif
- { page_fault, xen_page_fault, false },
- { divide_error, xen_divide_error, false },
- { bounds, xen_bounds, false },
- { invalid_op, xen_invalid_op, false },
- { device_not_available, xen_device_not_available, false },
- { coprocessor_segment_overrun, xen_coprocessor_segment_overrun, false },
- { invalid_TSS, xen_invalid_TSS, false },
- { segment_not_present, xen_segment_not_present, false },
- { stack_segment, xen_stack_segment, false },
- { general_protection, xen_general_protection, false },
- { spurious_interrupt_bug, xen_spurious_interrupt_bug, false },
- { coprocessor_error, xen_coprocessor_error, false },
- { alignment_check, xen_alignment_check, false },
- { simd_coprocessor_error, xen_simd_coprocessor_error, false },
+ TRAP_ENTRY(exc_page_fault, false ),
+ TRAP_ENTRY(exc_divide_error, false ),
+ TRAP_ENTRY(exc_bounds, false ),
+ TRAP_ENTRY(exc_invalid_op, false ),
+ TRAP_ENTRY(exc_device_not_available, false ),
+ TRAP_ENTRY(exc_coproc_segment_overrun, false ),
+ TRAP_ENTRY(exc_invalid_tss, false ),
+ TRAP_ENTRY(exc_segment_not_present, false ),
+ TRAP_ENTRY(exc_stack_segment, false ),
+ TRAP_ENTRY(exc_general_protection, false ),
+ TRAP_ENTRY(exc_spurious_interrupt_bug, false ),
+ TRAP_ENTRY(exc_coprocessor_error, false ),
+ TRAP_ENTRY(exc_alignment_check, false ),
+ TRAP_ENTRY(exc_simd_coprocessor_error, false ),
};
static bool __ref get_trap_addr(void **addr, unsigned int ist)
@@ -642,7 +651,7 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
* Replace trap handler addresses by Xen specific ones.
* Check for known traps using IST and whitelist them.
* The debugger ones are the only ones we care about.
- * Xen will handle faults like double_fault, * so we should never see
+ * Xen will handle faults like double_fault, so we should never see
* them. Warn if there's an unexpected IST-using fault handler.
*/
for (nr = 0; nr < ARRAY_SIZE(trap_array); nr++) {
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index ecb0d5450334..4988e19598c8 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -21,7 +21,6 @@
#include <xen/grant_table.h>
#include <xen/xen.h>
-#include <asm/pgtable.h>
static struct gnttab_vm_area {
struct vm_struct *area;
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index bbba8b17829a..a58d9c69807a 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -51,13 +51,13 @@
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/crash_dump.h>
+#include <linux/pgtable.h>
#ifdef CONFIG_KEXEC_CORE
#include <linux/kexec.h>
#endif
#include <trace/events/xen.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
#include <asm/mmu_context.h>
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1a2d8a50dac4..3566e37241d7 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -20,6 +20,7 @@
#include <asm/setup.h>
#include <asm/acpi.h>
#include <asm/numa.h>
+#include <asm/idtentry.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
@@ -993,7 +994,8 @@ static void __init xen_pvmmu_arch_setup(void)
HYPERVISOR_vm_assist(VMASST_CMD_enable,
VMASST_TYPE_pae_extended_cr3);
- if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
+ if (register_callback(CALLBACKTYPE_event,
+ xen_asm_exc_xen_hypervisor_callback) ||
register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
BUG();
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index f2adb63b2d7c..171aff1b11f2 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -23,10 +23,11 @@
#include <linux/nmi.h>
#include <linux/cpuhotplug.h>
#include <linux/stackprotector.h>
+#include <linux/pgtable.h>
#include <asm/paravirt.h>
+#include <asm/idtentry.h>
#include <asm/desc.h>
-#include <asm/pgtable.h>
#include <asm/cpu.h>
#include <xen/interface/xen.h>
@@ -348,7 +349,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif
ctxt->event_callback_eip =
- (unsigned long)xen_hypervisor_callback;
+ (unsigned long)xen_asm_exc_xen_hypervisor_callback;
ctxt->failsafe_callback_eip =
(unsigned long)xen_failsafe_callback;
per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
diff --git a/arch/x86/xen/suspend_hvm.c b/arch/x86/xen/suspend_hvm.c
index e666b614cf6d..9d548b0c772f 100644
--- a/arch/x86/xen/suspend_hvm.c
+++ b/arch/x86/xen/suspend_hvm.c
@@ -2,6 +2,7 @@
#include <linux/types.h>
#include <xen/xen.h>
+#include <xen/hvm.h>
#include <xen/features.h>
#include <xen/interface/features.h>
@@ -13,6 +14,6 @@ void xen_hvm_post_suspend(int suspend_cancelled)
xen_hvm_init_shared_info();
xen_vcpu_restore();
}
- xen_callback_vector();
+ xen_setup_callback_vector();
xen_unplug_emulated_devices();
}
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 2712e9155306..4757cec33abe 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -93,7 +93,7 @@ xen_iret_start_crit:
/*
* If there's something pending, mask events again so we can
- * jump back into xen_hypervisor_callback. Otherwise do not
+ * jump back into exc_xen_hypervisor_callback. Otherwise do not
* touch XEN_vcpu_info_mask.
*/
jne 1f
@@ -113,11 +113,11 @@ iret_restore_end:
* Events are masked, so jumping out of the critical region is
* OK.
*/
- je xen_hypervisor_callback
+ je xen_asm_exc_xen_hypervisor_callback
1: iret
xen_iret_end_crit:
- _ASM_EXTABLE(1b, iret_exc)
+ _ASM_EXTABLE(1b, asm_iret_error)
hyper_iret:
/* put this out of line since its very rarely used */
@@ -127,7 +127,7 @@ SYM_CODE_END(xen_iret)
.globl xen_iret_start_crit, xen_iret_end_crit
/*
- * This is called by xen_hypervisor_callback in entry_32.S when it sees
+ * This is called by xen_asm_exc_xen_hypervisor_callback in entry_32.S when it sees
* that the EIP at the time of interrupt was between
* xen_iret_start_crit and xen_iret_end_crit.
*
@@ -144,7 +144,7 @@ SYM_CODE_END(xen_iret)
* eflags }
* cs } nested exception info
* eip }
- * return address : (into xen_hypervisor_callback)
+ * return address : (into xen_asm_exc_xen_hypervisor_callback)
*
* In order to deliver the nested exception properly, we need to discard the
* nested exception frame such that when we handle the exception, we do it
@@ -152,7 +152,8 @@ SYM_CODE_END(xen_iret)
*
* The only caveat is that if the outer eax hasn't been restored yet (i.e.
* it's still on stack), we need to restore its value here.
- */
+*/
+.pushsection .noinstr.text, "ax"
SYM_CODE_START(xen_iret_crit_fixup)
/*
* Paranoia: Make sure we're really coming from kernel space.
@@ -181,3 +182,4 @@ SYM_CODE_START(xen_iret_crit_fixup)
2:
ret
SYM_CODE_END(xen_iret_crit_fixup)
+.popsection
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 0a0fd168683a..5d252aaeade8 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -28,33 +28,33 @@ SYM_CODE_END(xen_\name)
_ASM_NOKPROBE(xen_\name)
.endm
-xen_pv_trap divide_error
-xen_pv_trap debug
-xen_pv_trap xendebug
-xen_pv_trap int3
-xen_pv_trap xennmi
-xen_pv_trap overflow
-xen_pv_trap bounds
-xen_pv_trap invalid_op
-xen_pv_trap device_not_available
-xen_pv_trap double_fault
-xen_pv_trap coprocessor_segment_overrun
-xen_pv_trap invalid_TSS
-xen_pv_trap segment_not_present
-xen_pv_trap stack_segment
-xen_pv_trap general_protection
-xen_pv_trap page_fault
-xen_pv_trap spurious_interrupt_bug
-xen_pv_trap coprocessor_error
-xen_pv_trap alignment_check
+xen_pv_trap asm_exc_divide_error
+xen_pv_trap asm_exc_debug
+xen_pv_trap asm_exc_xendebug
+xen_pv_trap asm_exc_int3
+xen_pv_trap asm_exc_xennmi
+xen_pv_trap asm_exc_overflow
+xen_pv_trap asm_exc_bounds
+xen_pv_trap asm_exc_invalid_op
+xen_pv_trap asm_exc_device_not_available
+xen_pv_trap asm_exc_double_fault
+xen_pv_trap asm_exc_coproc_segment_overrun
+xen_pv_trap asm_exc_invalid_tss
+xen_pv_trap asm_exc_segment_not_present
+xen_pv_trap asm_exc_stack_segment
+xen_pv_trap asm_exc_general_protection
+xen_pv_trap asm_exc_page_fault
+xen_pv_trap asm_exc_spurious_interrupt_bug
+xen_pv_trap asm_exc_coprocessor_error
+xen_pv_trap asm_exc_alignment_check
#ifdef CONFIG_X86_MCE
-xen_pv_trap machine_check
+xen_pv_trap asm_exc_machine_check
#endif /* CONFIG_X86_MCE */
-xen_pv_trap simd_coprocessor_error
+xen_pv_trap asm_exc_simd_coprocessor_error
#ifdef CONFIG_IA32_EMULATION
xen_pv_trap entry_INT80_compat
#endif
-xen_pv_trap hypervisor_callback
+xen_pv_trap asm_exc_xen_hypervisor_callback
__INIT
SYM_CODE_START(xen_early_idt_handler_array)
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 45a441c33d6d..53b224fd6177 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -8,7 +8,6 @@
#include <xen/xen-ops.h>
/* These are code, but not functions. Defined in entry.S */
-extern const char xen_hypervisor_callback[];
extern const char xen_failsafe_callback[];
void xen_sysenter_target(void);
@@ -55,7 +54,6 @@ void xen_enable_sysenter(void);
void xen_enable_syscall(void);
void xen_vcpu_restore(void);
-void xen_callback_vector(void);
void xen_hvm_init_shared_info(void);
void xen_unplug_emulated_devices(void);
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index a0d50be5a8cb..cf907e5bf2f2 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -145,6 +145,8 @@ void local_flush_cache_page(struct vm_area_struct *vma,
#endif
+#define flush_icache_user_range flush_icache_range
+
/* Ensure consistency between data and instruction cache. */
#define local_flush_icache_range(start, end) \
do { \
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index cfb8696917e9..a06ffb0c61c7 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -13,9 +13,9 @@
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
-#include <asm/pgtable.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
+#include <linux/pgtable.h>
#include <asm/kmap_types.h>
#endif
@@ -76,12 +76,4 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#endif
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel( \
- pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), \
- (vaddr)), \
- (vaddr)), \
- (vaddr)), \
- (vaddr))
-
#endif
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index d6a10704307a..eac503215f17 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -13,10 +13,10 @@
#define _XTENSA_HIGHMEM_H
#include <linux/wait.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/kmap_types.h>
-#include <asm/pgtable.h>
#define PKMAP_BASE ((FIXADDR_START - \
(LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index e3e1d9a1ef69..9ee0c1d004f9 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -24,7 +24,7 @@
#define _XTENSA_INITIALIZE_MMU_H
#include <linux/init.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <asm/vectors.h>
#if XCHAL_HAVE_PTP_MMU
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index de5e6cbbafe4..74923ef3b228 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -18,10 +18,10 @@
#include <linux/stringify.h>
#include <linux/sched.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/vectors.h>
-#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 8be0c0568c50..fa054a1772e1 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -267,7 +267,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline pte_t pte_wrprotect(pte_t pte)
+static inline pte_t pte_wrprotect(pte_t pte)
{ pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
static inline pte_t pte_mkclean(pte_t pte)
{ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
@@ -359,22 +359,6 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
update_pte(ptep, pte_wrprotect(pte));
}
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* to find an entry in a page-table-directory */
-#define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address))
-
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir,addr) \
- ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
-#define pte_unmap(pte) do { } while (0)
-
-
/*
* Encode and decode a swap and file entry.
*/
@@ -438,6 +422,4 @@ typedef pte_t *pte_addr_t;
*/
#define HAVE_ARCH_UNMAPPED_AREA
-#include <asm-generic/pgtable.h>
-
#endif /* _XTENSA_PGTABLE_H */
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index fae33ddcaebb..98515c24d9b2 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -13,6 +13,7 @@
*/
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/asm-offsets.h>
#include <asm/asmmacro.h>
#include <asm/processor.h>
@@ -22,7 +23,6 @@
#include <asm/unistd.h>
#include <asm/ptrace.h>
#include <asm/current.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/signal.h>
#include <asm/tlbflush.h>
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 3edecc41ef8c..b7fe6f443b42 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -37,7 +37,6 @@
#include <linux/slab.h>
#include <linux/rcupdate.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/processor.h>
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 145742d70a9f..b4c07bd890fe 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -33,7 +33,6 @@
#include <asm/coprocessor.h>
#include <asm/elf.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
static int gpr_get(struct task_struct *target,
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 3880c765d448..d9204dc2656e 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -37,7 +37,6 @@
#include <asm/bootparam.h>
#include <asm/kasan.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/timex.h>
#include <asm/platform.h>
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 0976e27b8d5d..efc3a29cde80 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -34,12 +34,12 @@
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/ratelimit.h>
+#include <linux/pgtable.h>
#include <asm/stacktrace.h>
#include <asm/ptrace.h>
#include <asm/timex.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/traps.h>
#include <asm/hw_breakpoint.h>
@@ -479,25 +479,29 @@ void show_regs(struct pt_regs * regs)
static int show_trace_cb(struct stackframe *frame, void *data)
{
+ const char *loglvl = data;
+
if (kernel_text_address(frame->pc))
- pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc);
+ printk("%s [<%08lx>] %pB\n",
+ loglvl, frame->pc, (void *)frame->pc);
return 0;
}
-void show_trace(struct task_struct *task, unsigned long *sp)
+static void show_trace(struct task_struct *task, unsigned long *sp,
+ const char *loglvl)
{
if (!sp)
sp = stack_pointer(task);
- pr_info("Call Trace:\n");
- walk_stackframe(sp, show_trace_cb, NULL);
+ printk("%sCall Trace:\n", loglvl);
+ walk_stackframe(sp, show_trace_cb, (void *)loglvl);
}
#define STACK_DUMP_ENTRY_SIZE 4
#define STACK_DUMP_LINE_SIZE 32
static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
size_t len;
@@ -507,11 +511,11 @@ void show_stack(struct task_struct *task, unsigned long *sp)
len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE),
kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
- pr_info("Stack:\n");
- print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE,
+ printk("%sStack:\n", loglvl);
+ print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
sp, len, false);
- show_trace(task, sp);
+ show_trace(task, sp, loglvl);
}
DEFINE_SPINLOCK(die_lock);
@@ -530,7 +534,7 @@ void die(const char * str, struct pt_regs * regs, long err)
pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr);
show_regs(regs);
if (!user_mode(regs))
- show_stack(NULL, (unsigned long*)regs->areg[1]);
+ show_stack(NULL, (unsigned long *)regs->areg[1], KERN_INFO);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die_lock);
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 95ad1e773991..1a7538ccfc5a 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -43,11 +43,11 @@
*/
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/asm-offsets.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/thread_info.h>
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index b27359e2a464..2369433b734a 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -24,6 +24,7 @@
#include <linux/memblock.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
+#include <linux/pgtable.h>
#include <asm/bootparam.h>
#include <asm/mmu_context.h>
@@ -31,7 +32,6 @@
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
/*
* Note:
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index e7172bd53ced..c4decc73fd86 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -74,7 +74,7 @@ void do_page_fault(struct pt_regs *regs)
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
if (!vma)
@@ -130,7 +130,7 @@ good_area:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -139,7 +139,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (flags & VM_FAULT_MAJOR)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
@@ -152,7 +152,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (user_mode(regs)) {
current->thread.bad_vaddr = address;
current->thread.error_code = is_write;
@@ -167,7 +167,7 @@ bad_area:
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
bad_page_fault(regs, address, SIGKILL);
else
@@ -175,7 +175,7 @@ out_of_memory:
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Send a sigbus, regardless of whether we were in kernel
* or user mode.
diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
index 99b5ad137ab5..673196fe862e 100644
--- a/arch/xtensa/mm/highmem.c
+++ b/arch/xtensa/mm/highmem.c
@@ -86,6 +86,6 @@ void __init kmap_init(void)
BUILD_BUG_ON(PKMAP_BASE < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+ kmap_pte = virt_to_kpte(kmap_vstart);
kmap_waitqueues_init();
}
diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c
index 9ea3f21d60c7..a400188c16b9 100644
--- a/arch/xtensa/mm/ioremap.c
+++ b/arch/xtensa/mm/ioremap.c
@@ -7,9 +7,9 @@
#include <linux/io.h>
#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size,
pgprot_t prot)
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index e3baa21ff24c..1fef24db2ff6 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -19,10 +19,7 @@
void __init kasan_early_init(void)
{
unsigned long vaddr = KASAN_SHADOW_START;
- pgd_t *pgd = pgd_offset_k(vaddr);
- p4d_t *p4d = p4d_offset(pgd, vaddr);
- pud_t *pud = pud_offset(p4d, vaddr);
- pmd_t *pmd = pmd_offset(pud, vaddr);
+ pmd_t *pmd = pmd_off_k(vaddr);
int i;
for (i = 0; i < PTRS_PER_PTE; ++i)
@@ -43,10 +40,7 @@ static void __init populate(void *start, void *end)
unsigned long n_pmds = n_pages / PTRS_PER_PTE;
unsigned long i, j;
unsigned long vaddr = (unsigned long)start;
- pgd_t *pgd = pgd_offset_k(vaddr);
- p4d_t *p4d = p4d_offset(pgd, vaddr);
- pud_t *pud = pud_offset(p4d, vaddr);
- pmd_t *pmd = pmd_offset(pud, vaddr);
+ pmd_t *pmd = pmd_off_k(vaddr);
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
if (!pte)
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index 6aa036c427c3..25cd67debee6 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -14,8 +14,8 @@
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/cacheasm.h>
#include <asm/tlbflush.h>
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 37e478a27877..fd2193df8a14 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -21,10 +21,7 @@
#if defined(CONFIG_HIGHMEM)
static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
{
- pgd_t *pgd = pgd_offset_k(vaddr);
- p4d_t *p4d = p4d_offset(pgd, vaddr);
- pud_t *pud = pud_offset(p4d, vaddr);
- pmd_t *pmd = pmd_offset(pud, vaddr);
+ pmd_t *pmd = pmd_off_k(vaddr);
pte_t *pte;
unsigned long i;