summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/extable.h55
-rw-r--r--arch/alpha/include/asm/futex.h16
-rw-r--r--arch/alpha/include/asm/uaccess.h296
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/alpha/kernel/traps.c152
-rw-r--r--arch/alpha/lib/clear_user.S66
-rw-r--r--arch/alpha/lib/copy_user.S82
-rw-r--r--arch/alpha/lib/csum_partial_copy.c10
-rw-r--r--arch/alpha/lib/ev6-clear_user.S84
-rw-r--r--arch/alpha/lib/ev6-copy_user.S104
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/include/asm/hugepage.h1
-rw-r--r--arch/arc/include/asm/pgtable.h1
-rw-r--r--arch/arc/include/asm/uaccess.h19
-rw-r--r--arch/arc/mm/extable.c14
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/am335x-pcm-953.dtsi4
-rw-r--r--arch/arm/boot/dts/am57xx-idk-common.dtsi14
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi4
-rw-r--r--arch/arm/boot/dts/bcm953012k.dts5
-rw-r--r--arch/arm/boot/dts/bcm958522er.dts1
-rw-r--r--arch/arm/boot/dts/bcm958525er.dts1
-rw-r--r--arch/arm/boot/dts/bcm958525xmc.dts1
-rw-r--r--arch/arm/boot/dts/bcm958622hr.dts1
-rw-r--r--arch/arm/boot/dts/bcm958623hr.dts1
-rw-r--r--arch/arm/boot/dts/bcm958625hr.dts1
-rw-r--r--arch/arm/boot/dts/bcm988312hr.dts1
-rw-r--r--arch/arm/boot/dts/imx6sx-udoo-neo.dtsi5
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi19
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi9
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts9
-rw-r--r--arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi7
-rw-r--r--arch/arm/configs/omap2plus_defconfig1
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/kvm_arm.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h1
-rw-r--r--arch/arm/include/asm/pgtable.h1
-rw-r--r--arch/arm/include/asm/uaccess.h80
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/kvm/handle_exit.c19
-rw-r--r--arch/arm/mach-at91/pm.c18
-rw-r--r--arch/arm/mach-omap2/Makefile3
-rw-r--r--arch/arm/mach-omap2/gpmc-nand.c154
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c10
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c45
-rw-r--r--arch/arm/tools/syscall.tbl1
-rw-r--r--arch/arm64/Kconfig15
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2.dtsi11
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/include/asm/extable.h25
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/pgtable-types.h4
-rw-r--r--arch/arm64/include/asm/uaccess.h78
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h8
-rw-r--r--arch/arm64/kernel/arm64ksyms.c2
-rw-r--r--arch/arm64/kernel/cpuidle.c2
-rw-r--r--arch/arm64/kernel/kaslr.c10
-rw-r--r--arch/arm64/kernel/probes/kprobes.c6
-rw-r--r--arch/arm64/kvm/handle_exit.c19
-rw-r--r--arch/arm64/kvm/hyp/tlb.c64
-rw-r--r--arch/arm64/lib/copy_in_user.S4
-rw-r--r--arch/arm64/mm/kasan_init.c2
-rw-r--r--arch/avr32/Kconfig1
-rw-r--r--arch/avr32/include/asm/Kbuild1
-rw-r--r--arch/avr32/include/asm/pgtable-2level.h1
-rw-r--r--arch/avr32/include/asm/uaccess.h33
-rw-r--r--arch/avr32/kernel/avr32_ksyms.c2
-rw-r--r--arch/avr32/lib/copy_user.S15
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/blackfin/include/asm/Kbuild1
-rw-r--r--arch/blackfin/include/asm/uaccess.h43
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/uaccess.h16
-rw-r--r--arch/cris/Kconfig1
-rw-r--r--arch/cris/arch-v10/lib/usercopy.c31
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c2
-rw-r--r--arch/cris/arch-v32/lib/usercopy.c30
-rw-r--r--arch/cris/include/arch-v10/arch/uaccess.h46
-rw-r--r--arch/cris/include/arch-v32/arch/uaccess.h54
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/pgtable.h1
-rw-r--r--arch/cris/include/asm/uaccess.h67
-rw-r--r--arch/frv/Kconfig1
-rw-r--r--arch/frv/include/asm/Kbuild1
-rw-r--r--arch/frv/include/asm/pgtable.h1
-rw-r--r--arch/frv/include/asm/uaccess.h80
-rw-r--r--arch/frv/kernel/traps.c7
-rw-r--r--arch/frv/mm/extable.c27
-rw-r--r--arch/frv/mm/fault.c6
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/include/asm/pgtable.h1
-rw-r--r--arch/h8300/include/asm/uaccess.h10
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/hexagon/include/asm/pgtable.h1
-rw-r--r--arch/hexagon/include/asm/uaccess.h23
-rw-r--r--arch/hexagon/kernel/hexagon_ksyms.c4
-rw-r--r--arch/hexagon/mm/copy_from_user.S2
-rw-r--r--arch/hexagon/mm/copy_to_user.S2
-rw-r--r--arch/ia64/include/asm/pgtable.h2
-rw-r--r--arch/m32r/Kconfig1
-rw-r--r--arch/m32r/include/asm/Kbuild1
-rw-r--r--arch/m32r/include/asm/uaccess.h184
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c2
-rw-r--r--arch/m32r/lib/usercopy.c21
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/configs/amiga_defconfig14
-rw-r--r--arch/m68k/configs/apollo_defconfig14
-rw-r--r--arch/m68k/configs/atari_defconfig14
-rw-r--r--arch/m68k/configs/bvme6000_defconfig14
-rw-r--r--arch/m68k/configs/hp300_defconfig14
-rw-r--r--arch/m68k/configs/mac_defconfig14
-rw-r--r--arch/m68k/configs/multi_defconfig14
-rw-r--r--arch/m68k/configs/mvme147_defconfig14
-rw-r--r--arch/m68k/configs/mvme16x_defconfig14
-rw-r--r--arch/m68k/configs/q40_defconfig14
-rw-r--r--arch/m68k/configs/sun3_defconfig14
-rw-r--r--arch/m68k/configs/sun3x_defconfig14
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/bitops.h2
-rw-r--r--arch/m68k/include/asm/processor.h10
-rw-r--r--arch/m68k/include/asm/uaccess.h1
-rw-r--r--arch/m68k/include/asm/uaccess_mm.h96
-rw-r--r--arch/m68k/include/asm/uaccess_no.h39
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/signal.c2
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/m68k/kernel/traps.c9
-rw-r--r--arch/m68k/lib/uaccess.c12
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/metag/Kconfig1
-rw-r--r--arch/metag/include/asm/Kbuild1
-rw-r--r--arch/metag/include/asm/pgtable.h1
-rw-r--r--arch/metag/include/asm/uaccess.h57
-rw-r--r--arch/metag/lib/usercopy.c318
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/page.h3
-rw-r--r--arch/microblaze/include/asm/uaccess.h53
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/cavium-octeon/cpu.c2
-rw-r--r--arch/mips/cavium-octeon/crypto/octeon-crypto.c1
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S31
-rw-r--r--arch/mips/cavium-octeon/smp.c1
-rw-r--r--arch/mips/include/asm/fpu.h1
-rw-r--r--arch/mips/include/asm/pgtable-32.h1
-rw-r--r--arch/mips/include/asm/pgtable-64.h1
-rw-r--r--arch/mips/include/asm/uaccess.h442
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c24
-rw-r--r--arch/mips/kernel/smp-bmips.c1
-rw-r--r--arch/mips/kernel/smp-mt.c1
-rw-r--r--arch/mips/kernel/syscall.c2
-rw-r--r--arch/mips/lib/memcpy.S49
-rw-r--r--arch/mips/loongson64/loongson-3/cop2-ex.c1
-rw-r--r--arch/mips/netlogic/common/smp.c1
-rw-r--r--arch/mips/netlogic/xlp/cop2-ex.c3
-rw-r--r--arch/mips/oprofile/backtrace.c2
-rw-r--r--arch/mips/sgi-ip22/ip28-berr.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-berr.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c3
-rw-r--r--arch/mips/sgi-ip32/ip32-berr.c1
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c1
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/mn10300/include/asm/Kbuild1
-rw-r--r--arch/mn10300/include/asm/page.h1
-rw-r--r--arch/mn10300/include/asm/uaccess.h182
-rw-r--r--arch/mn10300/kernel/mn10300_ksyms.c2
-rw-r--r--arch/mn10300/lib/usercopy.c18
-rw-r--r--arch/nios2/Kconfig1
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/nios2/include/asm/pgtable.h1
-rw-r--r--arch/nios2/include/asm/uaccess.h50
-rw-r--r--arch/nios2/mm/uaccess.c16
-rw-r--r--arch/openrisc/Kconfig1
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/cmpxchg.h8
-rw-r--r--arch/openrisc/include/asm/pgtable.h1
-rw-r--r--arch/openrisc/include/asm/uaccess.h48
-rw-r--r--arch/openrisc/kernel/or32_ksyms.c4
-rw-r--r--arch/openrisc/kernel/process.c1
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/cacheflush.h23
-rw-r--r--arch/parisc/include/asm/uaccess.h126
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/cache.c22
-rw-r--r--arch/parisc/kernel/module.c8
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c10
-rw-r--r--arch/parisc/kernel/perf.c94
-rw-r--r--arch/parisc/kernel/process.c4
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/parisc/lib/Makefile2
-rw-r--r--arch/parisc/lib/fixup.S98
-rw-r--r--arch/parisc/lib/lusercopy.S318
-rw-r--r--arch/parisc/lib/memcpy.c475
-rw-r--r--arch/parisc/mm/fault.c17
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/boot/zImage.lds.S1
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c2
-rw-r--r--arch/powerpc/include/asm/bitops.h4
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h88
-rw-r--r--arch/powerpc/include/asm/extable.h29
-rw-r--r--arch/powerpc/include/asm/mce.h108
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-4k.h3
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-64k.h1
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/uaccess.h85
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/cputable.c3
-rw-r--r--arch/powerpc/kernel/idle_book3s.S20
-rw-r--r--arch/powerpc/kernel/mce.c88
-rw-r--r--arch/powerpc/kernel/mce_power.c237
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/copy_32.S14
-rw-r--r--arch/powerpc/lib/copyuser_64.S35
-rw-r--r--arch/powerpc/lib/usercopy_64.c41
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/isa207-common.c43
-rw-r--r--arch/powerpc/perf/isa207-common.h1
-rw-r--r--arch/powerpc/platforms/powernv/opal.c21
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c20
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c4
-rw-r--r--arch/powerpc/purgatory/trampoline.S12
-rw-r--r--arch/powerpc/sysdev/axonram.c5
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/crypto/paes_s390.c5
-rw-r--r--arch/s390/include/asm/cputime.h20
-rw-r--r--arch/s390/include/asm/extable.h28
-rw-r--r--arch/s390/include/asm/pgtable.h1
-rw-r--r--arch/s390/include/asm/timex.h12
-rw-r--r--arch/s390/include/asm/uaccess.h146
-rw-r--r--arch/s390/include/uapi/asm/unistd.h4
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/entry.S10
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/process.c3
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/lib/uaccess.c68
-rw-r--r--arch/s390/mm/pgtable.c19
-rw-r--r--arch/score/Kconfig1
-rw-r--r--arch/score/include/asm/Kbuild1
-rw-r--r--arch/score/include/asm/extable.h11
-rw-r--r--arch/score/include/asm/pgtable.h1
-rw-r--r--arch/score/include/asm/uaccess.h54
-rw-r--r--arch/score/kernel/traps.c1
-rw-r--r--arch/score/mm/extable.c2
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/boards/mach-cayman/setup.c2
-rw-r--r--arch/sh/include/asm/extable.h10
-rw-r--r--arch/sh/include/asm/pgtable-2level.h1
-rw-r--r--arch/sh/include/asm/pgtable-3level.h1
-rw-r--r--arch/sh/include/asm/uaccess.h59
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/pgtable_64.h1
-rw-r--r--arch/sparc/include/asm/uaccess_32.h31
-rw-r--r--arch/sparc/include/asm/uaccess_64.h33
-rw-r--r--arch/sparc/kernel/head_32.S7
-rw-r--r--arch/sparc/lib/GENcopy_from_user.S2
-rw-r--r--arch/sparc/lib/GENcopy_to_user.S2
-rw-r--r--arch/sparc/lib/GENpatch.S4
-rw-r--r--arch/sparc/lib/NG2copy_from_user.S2
-rw-r--r--arch/sparc/lib/NG2copy_to_user.S2
-rw-r--r--arch/sparc/lib/NG2patch.S4
-rw-r--r--arch/sparc/lib/NG4copy_from_user.S2
-rw-r--r--arch/sparc/lib/NG4copy_to_user.S2
-rw-r--r--arch/sparc/lib/NG4patch.S4
-rw-r--r--arch/sparc/lib/NGcopy_from_user.S2
-rw-r--r--arch/sparc/lib/NGcopy_to_user.S2
-rw-r--r--arch/sparc/lib/NGpatch.S4
-rw-r--r--arch/sparc/lib/U1copy_from_user.S4
-rw-r--r--arch/sparc/lib/U1copy_to_user.S4
-rw-r--r--arch/sparc/lib/U3copy_to_user.S2
-rw-r--r--arch/sparc/lib/U3patch.S4
-rw-r--r--arch/sparc/lib/copy_in_user.S6
-rw-r--r--arch/sparc/lib/copy_user.S16
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/pgtable_32.h1
-rw-r--r--arch/tile/include/asm/pgtable_64.h1
-rw-r--r--arch/tile/include/asm/uaccess.h162
-rw-r--r--arch/tile/lib/exports.c7
-rw-r--r--arch/tile/lib/memcpy_32.S41
-rw-r--r--arch/tile/lib/memcpy_user_64.c15
-rw-r--r--arch/um/Kconfig.common1
-rw-r--r--arch/um/include/asm/pgtable-2level.h1
-rw-r--r--arch/um/include/asm/pgtable-3level.h1
-rw-r--r--arch/um/include/asm/uaccess.h8
-rw-r--r--arch/um/kernel/skas/uaccess.c8
-rw-r--r--arch/unicore32/Kconfig1
-rw-r--r--arch/unicore32/include/asm/pgtable.h1
-rw-r--r--arch/unicore32/include/asm/uaccess.h6
-rw-r--r--arch/unicore32/kernel/ksyms.c4
-rw-r--r--arch/unicore32/lib/copy_from_user.S16
-rw-r--r--arch/unicore32/lib/copy_to_user.S6
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/events/core.c16
-rw-r--r--arch/x86/include/asm/cpufeatures.h3
-rw-r--r--arch/x86/include/asm/kvm_page_track.h1
-rw-r--r--arch/x86/include/asm/pgtable-3level.h3
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/pgtable_types.h4
-rw-r--r--arch/x86/include/asm/purgatory.h20
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/include/asm/uaccess.h65
-rw-r--r--arch/x86/include/asm/uaccess_32.h125
-rw-r--r--arch/x86/include/asm/uaccess_64.h127
-rw-r--r--arch/x86/kernel/acpi/boot.c9
-rw-r--r--arch/x86/kernel/apic/apic.c26
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c2
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/head64.c1
-rw-r--r--arch/x86/kernel/machine_kexec_64.c9
-rw-r--r--arch/x86/kernel/nmi.c6
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/unwind_frame.c36
-rw-r--r--arch/x86/kvm/i8259.c3
-rw-r--r--arch/x86/kvm/ioapic.c3
-rw-r--r--arch/x86/kvm/page_track.c8
-rw-r--r--arch/x86/kvm/svm.c3
-rw-r--r--arch/x86/kvm/vmx.c74
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/lib/usercopy.c54
-rw-r--r--arch/x86/lib/usercopy_32.c288
-rw-r--r--arch/x86/lib/usercopy_64.c13
-rw-r--r--arch/x86/mm/gup.c37
-rw-r--r--arch/x86/mm/kasan_init_64.c1
-rw-r--r--arch/x86/mm/mpx.c2
-rw-r--r--arch/x86/pci/xen.c23
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile1
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c82
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c2
-rw-r--r--arch/x86/platform/intel-mid/mfld.c15
-rw-r--r--arch/x86/purgatory/purgatory.c35
-rw-r--r--arch/x86/purgatory/purgatory.h8
-rw-r--r--arch/x86/purgatory/setup-x86_64.S2
-rw-r--r--arch/x86/purgatory/sha256.h1
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/include/asm/pgtable.h1
-rw-r--r--arch/xtensa/include/asm/uaccess.h59
-rw-r--r--arch/xtensa/lib/usercopy.S116
354 files changed, 3083 insertions, 5211 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 0e49d39ea74a..1be5f61dc630 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -26,6 +26,7 @@ config ALPHA
select ODD_RT_SIGACTION
select OLD_SIGSUSPEND
select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67
+ select ARCH_HAS_RAW_COPY_USER
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/include/asm/extable.h b/arch/alpha/include/asm/extable.h
new file mode 100644
index 000000000000..048e209e524c
--- /dev/null
+++ b/arch/alpha/include/asm/extable.h
@@ -0,0 +1,55 @@
+#ifndef _ASM_EXTABLE_H
+#define _ASM_EXTABLE_H
+
+/*
+ * About the exception table:
+ *
+ * - insn is a 32-bit pc-relative offset from the faulting insn.
+ * - nextinsn is a 16-bit offset off of the faulting instruction
+ * (not off of the *next* instruction as branches are).
+ * - errreg is the register in which to place -EFAULT.
+ * - valreg is the final target register for the load sequence
+ * and will be zeroed.
+ *
+ * Either errreg or valreg may be $31, in which case nothing happens.
+ *
+ * The exception fixup information "just so happens" to be arranged
+ * as in a MEM format instruction. This lets us emit our three
+ * values like so:
+ *
+ * lda valreg, nextinsn(errreg)
+ *
+ */
+
+struct exception_table_entry
+{
+ signed int insn;
+ union exception_fixup {
+ unsigned unit;
+ struct {
+ signed int nextinsn : 16;
+ unsigned int errreg : 5;
+ unsigned int valreg : 5;
+ } bits;
+ } fixup;
+};
+
+/* Returns the new pc */
+#define fixup_exception(map_reg, _fixup, pc) \
+({ \
+ if ((_fixup)->fixup.bits.valreg != 31) \
+ map_reg((_fixup)->fixup.bits.valreg) = 0; \
+ if ((_fixup)->fixup.bits.errreg != 31) \
+ map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \
+ (pc) + (_fixup)->fixup.bits.nextinsn; \
+})
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+#define swap_ex_entry_fixup(a, b, tmp, delta) \
+ do { \
+ (a)->fixup.unit = (b)->fixup.unit; \
+ (b)->fixup.unit = (tmp).fixup.unit; \
+ } while (0)
+
+#endif
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
index f939794363ac..fb01dfb760c2 100644
--- a/arch/alpha/include/asm/futex.h
+++ b/arch/alpha/include/asm/futex.h
@@ -19,12 +19,8 @@
"3: .subsection 2\n" \
"4: br 1b\n" \
" .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .long 1b-.\n" \
- " lda $31,3b-1b(%1)\n" \
- " .long 2b-.\n" \
- " lda $31,3b-2b(%1)\n" \
- " .previous\n" \
+ EXC(1b,3b,%1,$31) \
+ EXC(2b,3b,%1,$31) \
: "=&r" (oldval), "=&r"(ret) \
: "r" (uaddr), "r"(oparg) \
: "memory")
@@ -101,12 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"3: .subsection 2\n"
"4: br 1b\n"
" .previous\n"
- " .section __ex_table,\"a\"\n"
- " .long 1b-.\n"
- " lda $31,3b-1b(%0)\n"
- " .long 2b-.\n"
- " lda $31,3b-2b(%0)\n"
- " .previous\n"
+ EXC(1b,3b,%0,$31)
+ EXC(2b,3b,%0,$31)
: "+r"(ret), "=&r"(prev), "=&r"(cmp)
: "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
: "memory");
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index b2f62b98e290..7b82dc9a8556 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -32,13 +32,13 @@
* - AND "addr+size" doesn't have any high-bits set
* - OR we are in kernel mode.
*/
-#define __access_ok(addr, size, segment) \
- (((segment).seg & (addr | size | (addr+size))) == 0)
+#define __access_ok(addr, size) \
+ ((get_fs().seg & (addr | size | (addr+size))) == 0)
-#define access_ok(type, addr, size) \
-({ \
- __chk_user_ptr(addr); \
- __access_ok(((unsigned long)(addr)), (size), get_fs()); \
+#define access_ok(type, addr, size) \
+({ \
+ __chk_user_ptr(addr); \
+ __access_ok(((unsigned long)(addr)), (size)); \
})
/*
@@ -54,9 +54,9 @@
* (b) require any knowledge of processes at this stage
*/
#define put_user(x, ptr) \
- __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs())
+ __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
#define get_user(x, ptr) \
- __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
+ __get_user_check((x), (ptr), sizeof(*(ptr)))
/*
* The "__xxx" versions do not do address space checking, useful when
@@ -74,6 +74,11 @@
* more extensive comments with fixup_inline_exception below for
* more information.
*/
+#define EXC(label,cont,res,err) \
+ ".section __ex_table,\"a\"\n" \
+ " .long "#label"-.\n" \
+ " lda "#res","#cont"-"#label"("#err")\n" \
+ ".previous\n"
extern void __get_user_unknown(void);
@@ -93,23 +98,23 @@ extern void __get_user_unknown(void);
__gu_err; \
})
-#define __get_user_check(x, ptr, size, segment) \
-({ \
- long __gu_err = -EFAULT; \
- unsigned long __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- if (__access_ok((unsigned long)__gu_addr, size, segment)) { \
- __gu_err = 0; \
- switch (size) { \
- case 1: __get_user_8(__gu_addr); break; \
- case 2: __get_user_16(__gu_addr); break; \
- case 4: __get_user_32(__gu_addr); break; \
- case 8: __get_user_64(__gu_addr); break; \
- default: __get_user_unknown(); break; \
- } \
- } \
- (x) = (__force __typeof__(*(ptr))) __gu_val; \
- __gu_err; \
+#define __get_user_check(x, ptr, size) \
+({ \
+ long __gu_err = -EFAULT; \
+ unsigned long __gu_val = 0; \
+ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ if (__access_ok((unsigned long)__gu_addr, size)) { \
+ __gu_err = 0; \
+ switch (size) { \
+ case 1: __get_user_8(__gu_addr); break; \
+ case 2: __get_user_16(__gu_addr); break; \
+ case 4: __get_user_32(__gu_addr); break; \
+ case 8: __get_user_64(__gu_addr); break; \
+ default: __get_user_unknown(); break; \
+ } \
+ } \
+ (x) = (__force __typeof__(*(ptr))) __gu_val; \
+ __gu_err; \
})
struct __large_struct { unsigned long buf[100]; };
@@ -118,20 +123,14 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user_64(addr) \
__asm__("1: ldq %0,%2\n" \
"2:\n" \
- ".section __ex_table,\"a\"\n" \
- " .long 1b - .\n" \
- " lda %0, 2b-1b(%1)\n" \
- ".previous" \
+ EXC(1b,2b,%0,%1) \
: "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err))
#define __get_user_32(addr) \
__asm__("1: ldl %0,%2\n" \
"2:\n" \
- ".section __ex_table,\"a\"\n" \
- " .long 1b - .\n" \
- " lda %0, 2b-1b(%1)\n" \
- ".previous" \
+ EXC(1b,2b,%0,%1) \
: "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err))
@@ -141,20 +140,14 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user_16(addr) \
__asm__("1: ldwu %0,%2\n" \
"2:\n" \
- ".section __ex_table,\"a\"\n" \
- " .long 1b - .\n" \
- " lda %0, 2b-1b(%1)\n" \
- ".previous" \
+ EXC(1b,2b,%0,%1) \
: "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err))
#define __get_user_8(addr) \
__asm__("1: ldbu %0,%2\n" \
"2:\n" \
- ".section __ex_table,\"a\"\n" \
- " .long 1b - .\n" \
- " lda %0, 2b-1b(%1)\n" \
- ".previous" \
+ EXC(1b,2b,%0,%1) \
: "=r"(__gu_val), "=r"(__gu_err) \
: "m"(__m(addr)), "1"(__gu_err))
#else
@@ -170,12 +163,8 @@ struct __large_struct { unsigned long buf[100]; };
" extwh %1,%3,%1\n" \
" or %0,%1,%0\n" \
"3:\n" \
- ".section __ex_table,\"a\"\n" \
- " .long 1b - .\n" \
- " lda %0, 3b-1b(%2)\n" \
- " .long 2b - .\n" \
- " lda %0, 3b-2b(%2)\n" \
- ".previous" \
+ EXC(1b,3b,%0,%2) \
+ EXC(2b,3b,%0,%2) \
: "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
: "r"(addr), "2"(__gu_err)); \
}
@@ -184,10 +173,7 @@ struct __large_struct { unsigned long buf[100]; };
__asm__("1: ldq_u %0,0(%2)\n" \
" extbl %0,%2,%0\n" \
"2:\n" \
- ".section __ex_table,\"a\"\n" \
- " .long 1b - .\n" \
- " lda %0, 2b-1b(%1)\n" \
- ".previous" \
+ EXC(1b,2b,%0,%1) \
: "=&r"(__gu_val), "=r"(__gu_err) \
: "r"(addr), "1"(__gu_err))
#endif
@@ -208,21 +194,21 @@ extern void __put_user_unknown(void);
__pu_err; \
})
-#define __put_user_check(x, ptr, size, segment) \
-({ \
- long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- if (__access_ok((unsigned long)__pu_addr, size, segment)) { \
- __pu_err = 0; \
- switch (size) { \
- case 1: __put_user_8(x, __pu_addr); break; \
- case 2: __put_user_16(x, __pu_addr); break; \
- case 4: __put_user_32(x, __pu_addr); break; \
- case 8: __put_user_64(x, __pu_addr); break; \
- default: __put_user_unknown(); break; \
- } \
- } \
- __pu_err; \
+#define __put_user_check(x, ptr, size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ if (__access_ok((unsigned long)__pu_addr, size)) { \
+ __pu_err = 0; \
+ switch (size) { \
+ case 1: __put_user_8(x, __pu_addr); break; \
+ case 2: __put_user_16(x, __pu_addr); break; \
+ case 4: __put_user_32(x, __pu_addr); break; \
+ case 8: __put_user_64(x, __pu_addr); break; \
+ default: __put_user_unknown(); break; \
+ } \
+ } \
+ __pu_err; \
})
/*
@@ -233,20 +219,14 @@ extern void __put_user_unknown(void);
#define __put_user_64(x, addr) \
__asm__ __volatile__("1: stq %r2,%1\n" \
"2:\n" \
- ".section __ex_table,\"a\"\n" \
- " .long 1b - .\n" \
- " lda $31,2b-1b(%0)\n" \
- ".previous" \
+ EXC(1b,2b,$31,%0) \
: "=r"(__pu_err) \
: "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
#define __put_user_32(x, addr) \
__asm__ __volatile__("1: stl %r2,%1\n" \
"2:\n" \
- ".section __ex_table,\"a\"\n" \
- " .long 1b - .\n" \
- " lda $31,2b-1b(%0)\n" \
- ".previous" \
+ EXC(1b,2b,$31,%0) \
: "=r"(__pu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
@@ -256,20 +236,14 @@ __asm__ __volatile__("1: stl %r2,%1\n" \
#define __put_user_16(x, addr) \
__asm__ __volatile__("1: stw %r2,%1\n" \
"2:\n" \
- ".section __ex_table,\"a\"\n" \
- " .long 1b - .\n" \
- " lda $31,2b-1b(%0)\n" \
- ".previous" \
+ EXC(1b,2b,$31,%0) \
: "=r"(__pu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
#define __put_user_8(x, addr) \
__asm__ __volatile__("1: stb %r2,%1\n" \
"2:\n" \
- ".section __ex_table,\"a\"\n" \
- " .long 1b - .\n" \
- " lda $31,2b-1b(%0)\n" \
- ".previous" \
+ EXC(1b,2b,$31,%0) \
: "=r"(__pu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
#else
@@ -291,16 +265,10 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
"3: stq_u %2,1(%5)\n" \
"4: stq_u %1,0(%5)\n" \
"5:\n" \
- ".section __ex_table,\"a\"\n" \
- " .long 1b - .\n" \
- " lda $31, 5b-1b(%0)\n" \
- " .long 2b - .\n" \
- " lda $31, 5b-2b(%0)\n" \
- " .long 3b - .\n" \
- " lda $31, 5b-3b(%0)\n" \
- " .long 4b - .\n" \
- " lda $31, 5b-4b(%0)\n" \
- ".previous" \
+ EXC(1b,5b,$31,%0) \
+ EXC(2b,5b,$31,%0) \
+ EXC(3b,5b,$31,%0) \
+ EXC(4b,5b,$31,%0) \
: "=r"(__pu_err), "=&r"(__pu_tmp1), \
"=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
"=&r"(__pu_tmp4) \
@@ -317,12 +285,8 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
" or %1,%2,%1\n" \
"2: stq_u %1,0(%4)\n" \
"3:\n" \
- ".section __ex_table,\"a\"\n" \
- " .long 1b - .\n" \
- " lda $31, 3b-1b(%0)\n" \
- " .long 2b - .\n" \
- " lda $31, 3b-2b(%0)\n" \
- ".previous" \
+ EXC(1b,3b,$31,%0) \
+ EXC(2b,3b,$31,%0) \
: "=r"(__pu_err), \
"=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
: "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
@@ -334,96 +298,30 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
* Complex access routines
*/
-/* This little bit of silliness is to get the GP loaded for a function
- that ordinarily wouldn't. Otherwise we could have it done by the macro
- directly, which can be optimized the linker. */
-#ifdef MODULE
-#define __module_address(sym) "r"(sym),
-#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym
-#else
-#define __module_address(sym)
-#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp"
-#endif
-
-extern void __copy_user(void);
+extern long __copy_user(void *to, const void *from, long len);
-extern inline long
-__copy_tofrom_user_nocheck(void *to, const void *from, long len)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long len)
{
- register void * __cu_to __asm__("$6") = to;
- register const void * __cu_from __asm__("$7") = from;
- register long __cu_len __asm__("$0") = len;
-
- __asm__ __volatile__(
- __module_call(28, 3, __copy_user)
- : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
- : __module_address(__copy_user)
- "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
- : "$1", "$2", "$3", "$4", "$5", "$28", "memory");
-
- return __cu_len;
+ return __copy_user(to, (__force const void *)from, len);
}
-#define __copy_to_user(to, from, n) \
-({ \
- __chk_user_ptr(to); \
- __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \
-})
-#define __copy_from_user(to, from, n) \
-({ \
- __chk_user_ptr(from); \
- __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \
-})
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
-extern inline long
-copy_to_user(void __user *to, const void *from, long n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long len)
{
- if (likely(__access_ok((unsigned long)to, n, get_fs())))
- n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
- return n;
+ return __copy_user((__force void *)to, from, len);
}
-extern inline long
-copy_from_user(void *to, const void __user *from, long n)
-{
- long res = n;
- if (likely(__access_ok((unsigned long)from, n, get_fs())))
- res = __copy_from_user_inatomic(to, from, n);
- if (unlikely(res))
- memset(to + (n - res), 0, res);
- return res;
-}
-
-extern void __do_clear_user(void);
-
-extern inline long
-__clear_user(void __user *to, long len)
-{
- register void __user * __cl_to __asm__("$6") = to;
- register long __cl_len __asm__("$0") = len;
- __asm__ __volatile__(
- __module_call(28, 2, __do_clear_user)
- : "=r"(__cl_len), "=r"(__cl_to)
- : __module_address(__do_clear_user)
- "0"(__cl_len), "1"(__cl_to)
- : "$1", "$2", "$3", "$4", "$5", "$28", "memory");
- return __cl_len;
-}
+extern long __clear_user(void __user *to, long len);
extern inline long
clear_user(void __user *to, long len)
{
- if (__access_ok((unsigned long)to, len, get_fs()))
+ if (__access_ok((unsigned long)to, len))
len = __clear_user(to, len);
return len;
}
-#undef __module_address
-#undef __module_call
-
#define user_addr_max() \
(uaccess_kernel() ? ~0UL : TASK_SIZE)
@@ -431,56 +329,6 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
-/*
- * About the exception table:
- *
- * - insn is a 32-bit pc-relative offset from the faulting insn.
- * - nextinsn is a 16-bit offset off of the faulting instruction
- * (not off of the *next* instruction as branches are).
- * - errreg is the register in which to place -EFAULT.
- * - valreg is the final target register for the load sequence
- * and will be zeroed.
- *
- * Either errreg or valreg may be $31, in which case nothing happens.
- *
- * The exception fixup information "just so happens" to be arranged
- * as in a MEM format instruction. This lets us emit our three
- * values like so:
- *
- * lda valreg, nextinsn(errreg)
- *
- */
-
-struct exception_table_entry
-{
- signed int insn;
- union exception_fixup {
- unsigned unit;
- struct {
- signed int nextinsn : 16;
- unsigned int errreg : 5;
- unsigned int valreg : 5;
- } bits;
- } fixup;
-};
-
-/* Returns the new pc */
-#define fixup_exception(map_reg, _fixup, pc) \
-({ \
- if ((_fixup)->fixup.bits.valreg != 31) \
- map_reg((_fixup)->fixup.bits.valreg) = 0; \
- if ((_fixup)->fixup.bits.errreg != 31) \
- map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \
- (pc) + (_fixup)->fixup.bits.nextinsn; \
-})
-
-#define ARCH_HAS_RELATIVE_EXTABLE
-
-#define swap_ex_entry_fixup(a, b, tmp, delta) \
- do { \
- (a)->fixup.unit = (b)->fixup.unit; \
- (b)->fixup.unit = (tmp).fixup.unit; \
- } while (0)
-
+#include <asm/extable.h>
#endif /* __ALPHA_UACCESS_H */
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 0b961093ca5c..6d76e528ab8f 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1290,7 +1290,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
/* copy relevant bits of struct timex. */
if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) -
- offsetof(struct timex32, time)))
+ offsetof(struct timex32, tick)))
return -EFAULT;
ret = do_adjtimex(&txc);
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index b137390e87e7..65bb102d985b 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -482,12 +482,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
" extwl %1,%3,%1\n"
" extwh %2,%3,%2\n"
"3:\n"
- ".section __ex_table,\"a\"\n"
- " .long 1b - .\n"
- " lda %1,3b-1b(%0)\n"
- " .long 2b - .\n"
- " lda %2,3b-2b(%0)\n"
- ".previous"
+ EXC(1b,3b,%1,%0)
+ EXC(2b,3b,%2,%0)
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0));
if (error)
@@ -502,12 +498,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
" extll %1,%3,%1\n"
" extlh %2,%3,%2\n"
"3:\n"
- ".section __ex_table,\"a\"\n"
- " .long 1b - .\n"
- " lda %1,3b-1b(%0)\n"
- " .long 2b - .\n"
- " lda %2,3b-2b(%0)\n"
- ".previous"
+ EXC(1b,3b,%1,%0)
+ EXC(2b,3b,%2,%0)
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0));
if (error)
@@ -522,12 +514,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
" extql %1,%3,%1\n"
" extqh %2,%3,%2\n"
"3:\n"
- ".section __ex_table,\"a\"\n"
- " .long 1b - .\n"
- " lda %1,3b-1b(%0)\n"
- " .long 2b - .\n"
- " lda %2,3b-2b(%0)\n"
- ".previous"
+ EXC(1b,3b,%1,%0)
+ EXC(2b,3b,%2,%0)
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0));
if (error)
@@ -551,16 +539,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
"3: stq_u %2,1(%5)\n"
"4: stq_u %1,0(%5)\n"
"5:\n"
- ".section __ex_table,\"a\"\n"
- " .long 1b - .\n"
- " lda %2,5b-1b(%0)\n"
- " .long 2b - .\n"
- " lda %1,5b-2b(%0)\n"
- " .long 3b - .\n"
- " lda $31,5b-3b(%0)\n"
- " .long 4b - .\n"
- " lda $31,5b-4b(%0)\n"
- ".previous"
+ EXC(1b,5b,%2,%0)
+ EXC(2b,5b,%1,%0)
+ EXC(3b,5b,$31,%0)
+ EXC(4b,5b,$31,%0)
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
"=&r"(tmp3), "=&r"(tmp4)
: "r"(va), "r"(una_reg(reg)), "0"(0));
@@ -581,16 +563,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
"3: stq_u %2,3(%5)\n"
"4: stq_u %1,0(%5)\n"
"5:\n"
- ".section __ex_table,\"a\"\n"
- " .long 1b - .\n"
- " lda %2,5b-1b(%0)\n"
- " .long 2b - .\n"
- " lda %1,5b-2b(%0)\n"
- " .long 3b - .\n"
- " lda $31,5b-3b(%0)\n"
- " .long 4b - .\n"
- " lda $31,5b-4b(%0)\n"
- ".previous"
+ EXC(1b,5b,%2,%0)
+ EXC(2b,5b,%1,%0)
+ EXC(3b,5b,$31,%0)
+ EXC(4b,5b,$31,%0)
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
"=&r"(tmp3), "=&r"(tmp4)
: "r"(va), "r"(una_reg(reg)), "0"(0));
@@ -611,16 +587,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
"3: stq_u %2,7(%5)\n"
"4: stq_u %1,0(%5)\n"
"5:\n"
- ".section __ex_table,\"a\"\n\t"
- " .long 1b - .\n"
- " lda %2,5b-1b(%0)\n"
- " .long 2b - .\n"
- " lda %1,5b-2b(%0)\n"
- " .long 3b - .\n"
- " lda $31,5b-3b(%0)\n"
- " .long 4b - .\n"
- " lda $31,5b-4b(%0)\n"
- ".previous"
+ EXC(1b,5b,%2,%0)
+ EXC(2b,5b,%1,%0)
+ EXC(3b,5b,$31,%0)
+ EXC(4b,5b,$31,%0)
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
"=&r"(tmp3), "=&r"(tmp4)
: "r"(va), "r"(una_reg(reg)), "0"(0));
@@ -802,7 +772,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
/* Don't bother reading ds in the access check since we already
know that this came from the user. Also rely on the fact that
the page at TASK_SIZE is unmapped and so can't be touched anyway. */
- if (!__access_ok((unsigned long)va, 0, USER_DS))
+ if ((unsigned long)va >= TASK_SIZE)
goto give_sigsegv;
++unaligned[1].count;
@@ -835,12 +805,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
" extwl %1,%3,%1\n"
" extwh %2,%3,%2\n"
"3:\n"
- ".section __ex_table,\"a\"\n"
- " .long 1b - .\n"
- " lda %1,3b-1b(%0)\n"
- " .long 2b - .\n"
- " lda %2,3b-2b(%0)\n"
- ".previous"
+ EXC(1b,3b,%1,%0)
+ EXC(2b,3b,%2,%0)
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0));
if (error)
@@ -855,12 +821,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
" extll %1,%3,%1\n"
" extlh %2,%3,%2\n"
"3:\n"
- ".section __ex_table,\"a\"\n"
- " .long 1b - .\n"
- " lda %1,3b-1b(%0)\n"
- " .long 2b - .\n"
- " lda %2,3b-2b(%0)\n"
- ".previous"
+ EXC(1b,3b,%1,%0)
+ EXC(2b,3b,%2,%0)
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0));
if (error)
@@ -875,12 +837,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
" extql %1,%3,%1\n"
" extqh %2,%3,%2\n"
"3:\n"
- ".section __ex_table,\"a\"\n"
- " .long 1b - .\n"
- " lda %1,3b-1b(%0)\n"
- " .long 2b - .\n"
- " lda %2,3b-2b(%0)\n"
- ".previous"
+ EXC(1b,3b,%1,%0)
+ EXC(2b,3b,%2,%0)
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0));
if (error)
@@ -895,12 +853,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
" extll %1,%3,%1\n"
" extlh %2,%3,%2\n"
"3:\n"
- ".section __ex_table,\"a\"\n"
- " .long 1b - .\n"
- " lda %1,3b-1b(%0)\n"
- " .long 2b - .\n"
- " lda %2,3b-2b(%0)\n"
- ".previous"
+ EXC(1b,3b,%1,%0)
+ EXC(2b,3b,%2,%0)
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0));
if (error)
@@ -915,12 +869,8 @@ do_entUnaUser(void __user * va, unsigned long opcode,
" extql %1,%3,%1\n"
" extqh %2,%3,%2\n"
"3:\n"
- ".section __ex_table,\"a\"\n"
- " .long 1b - .\n"
- " lda %1,3b-1b(%0)\n"
- " .long 2b - .\n"
- " lda %2,3b-2b(%0)\n"
- ".previous"
+ EXC(1b,3b,%1,%0)
+ EXC(2b,3b,%2,%0)
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
: "r"(va), "0"(0));
if (error)
@@ -944,16 +894,10 @@ do_entUnaUser(void __user * va, unsigned long opcode,
"3: stq_u %2,1(%5)\n"
"4: stq_u %1,0(%5)\n"
"5:\n"
- ".section __ex_table,\"a\"\n"
- " .long 1b - .\n"
- " lda %2,5b-1b(%0)\n"
- " .long 2b - .\n"
- " lda %1,5b-2b(%0)\n"
- " .long 3b - .\n"
- " lda $31,5b-3b(%0)\n"
- " .long 4b - .\n"
- " lda $31,5b-4b(%0)\n"
- ".previous"
+ EXC(1b,5b,%2,%0)
+ EXC(2b,5b,%1,%0)
+ EXC(3b,5b,$31,%0)
+ EXC(4b,5b,$31,%0)
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
"=&r"(tmp3), "=&r"(tmp4)
: "r"(va), "r"(*reg_addr), "0"(0));
@@ -978,16 +922,10 @@ do_entUnaUser(void __user * va, unsigned long opcode,
"3: stq_u %2,3(%5)\n"
"4: stq_u %1,0(%5)\n"
"5:\n"
- ".section __ex_table,\"a\"\n"
- " .long 1b - .\n"
- " lda %2,5b-1b(%0)\n"
- " .long 2b - .\n"
- " lda %1,5b-2b(%0)\n"
- " .long 3b - .\n"
- " lda $31,5b-3b(%0)\n"
- " .long 4b - .\n"
- " lda $31,5b-4b(%0)\n"
- ".previous"
+ EXC(1b,5b,%2,%0)
+ EXC(2b,5b,%1,%0)
+ EXC(3b,5b,$31,%0)
+ EXC(4b,5b,$31,%0)
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
"=&r"(tmp3), "=&r"(tmp4)
: "r"(va), "r"(*reg_addr), "0"(0));
@@ -1012,16 +950,10 @@ do_entUnaUser(void __user * va, unsigned long opcode,
"3: stq_u %2,7(%5)\n"
"4: stq_u %1,0(%5)\n"
"5:\n"
- ".section __ex_table,\"a\"\n\t"
- " .long 1b - .\n"
- " lda %2,5b-1b(%0)\n"
- " .long 2b - .\n"
- " lda %1,5b-2b(%0)\n"
- " .long 3b - .\n"
- " lda $31,5b-3b(%0)\n"
- " .long 4b - .\n"
- " lda $31,5b-4b(%0)\n"
- ".previous"
+ EXC(1b,5b,%2,%0)
+ EXC(2b,5b,%1,%0)
+ EXC(3b,5b,$31,%0)
+ EXC(4b,5b,$31,%0)
: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
"=&r"(tmp3), "=&r"(tmp4)
: "r"(va), "r"(*reg_addr), "0"(0));
@@ -1047,7 +979,7 @@ give_sigsegv:
/* We need to replicate some of the logic in mm/fault.c,
since we don't have access to the fault code in the
exception handling return path. */
- if (!__access_ok((unsigned long)va, 0, USER_DS))
+ if ((unsigned long)va >= TASK_SIZE)
info.si_code = SEGV_ACCERR;
else {
struct mm_struct *mm = current->mm;
diff --git a/arch/alpha/lib/clear_user.S b/arch/alpha/lib/clear_user.S
index bf5b931866ba..006f469fef73 100644
--- a/arch/alpha/lib/clear_user.S
+++ b/arch/alpha/lib/clear_user.S
@@ -8,21 +8,6 @@
* right "bytes left to zero" value (and that it is updated only _after_
* a successful copy). There is also some rather minor exception setup
* stuff.
- *
- * NOTE! This is not directly C-callable, because the calling semantics
- * are different:
- *
- * Inputs:
- * length in $0
- * destination address in $6
- * exception pointer in $7
- * return address in $28 (exceptions expect it there)
- *
- * Outputs:
- * bytes left to copy in $0
- *
- * Clobbers:
- * $1,$2,$3,$4,$5,$6
*/
#include <asm/export.h>
@@ -38,62 +23,63 @@
.set noreorder
.align 4
- .globl __do_clear_user
- .ent __do_clear_user
- .frame $30, 0, $28
+ .globl __clear_user
+ .ent __clear_user
+ .frame $30, 0, $26
.prologue 0
$loop:
and $1, 3, $4 # e0 :
beq $4, 1f # .. e1 :
-0: EX( stq_u $31, 0($6) ) # e0 : zero one word
+0: EX( stq_u $31, 0($16) ) # e0 : zero one word
subq $0, 8, $0 # .. e1 :
subq $4, 1, $4 # e0 :
- addq $6, 8, $6 # .. e1 :
+ addq $16, 8, $16 # .. e1 :
bne $4, 0b # e1 :
unop # :
1: bic $1, 3, $1 # e0 :
beq $1, $tail # .. e1 :
-2: EX( stq_u $31, 0($6) ) # e0 : zero four words
+2: EX( stq_u $31, 0($16) ) # e0 : zero four words
subq $0, 8, $0 # .. e1 :
- EX( stq_u $31, 8($6) ) # e0 :
+ EX( stq_u $31, 8($16) ) # e0 :
subq $0, 8, $0 # .. e1 :
- EX( stq_u $31, 16($6) ) # e0 :
+ EX( stq_u $31, 16($16) ) # e0 :
subq $0, 8, $0 # .. e1 :
- EX( stq_u $31, 24($6) ) # e0 :
+ EX( stq_u $31, 24($16) ) # e0 :
subq $0, 8, $0 # .. e1 :
subq $1, 4, $1 # e0 :
- addq $6, 32, $6 # .. e1 :
+ addq $16, 32, $16 # .. e1 :
bne $1, 2b # e1 :
$tail:
bne $2, 1f # e1 : is there a tail to do?
- ret $31, ($28), 1 # .. e1 :
+ ret $31, ($26), 1 # .. e1 :
-1: EX( ldq_u $5, 0($6) ) # e0 :
+1: EX( ldq_u $5, 0($16) ) # e0 :
clr $0 # .. e1 :
nop # e1 :
mskqh $5, $0, $5 # e0 :
- EX( stq_u $5, 0($6) ) # e0 :
- ret $31, ($28), 1 # .. e1 :
+ EX( stq_u $5, 0($16) ) # e0 :
+ ret $31, ($26), 1 # .. e1 :
-__do_clear_user:
- and $6, 7, $4 # e0 : find dest misalignment
+__clear_user:
+ and $17, $17, $0
+ and $16, 7, $4 # e0 : find dest misalignment
beq $0, $zerolength # .. e1 :
addq $0, $4, $1 # e0 : bias counter
and $1, 7, $2 # e1 : number of bytes in tail
srl $1, 3, $1 # e0 :
beq $4, $loop # .. e1 :
- EX( ldq_u $5, 0($6) ) # e0 : load dst word to mask back in
+ EX( ldq_u $5, 0($16) ) # e0 : load dst word to mask back in
beq $1, $oneword # .. e1 : sub-word store?
- mskql $5, $6, $5 # e0 : take care of misaligned head
- addq $6, 8, $6 # .. e1 :
- EX( stq_u $5, -8($6) ) # e0 :
+ mskql $5, $16, $5 # e0 : take care of misaligned head
+ addq $16, 8, $16 # .. e1 :
+ EX( stq_u $5, -8($16) ) # e0 :
addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment
subq $1, 1, $1 # e0 :
subq $0, 8, $0 # .. e1 :
@@ -101,15 +87,15 @@ __do_clear_user:
unop # :
$oneword:
- mskql $5, $6, $4 # e0 :
+ mskql $5, $16, $4 # e0 :
mskqh $5, $2, $5 # e0 :
or $5, $4, $5 # e1 :
- EX( stq_u $5, 0($6) ) # e0 :
+ EX( stq_u $5, 0($16) ) # e0 :
clr $0 # .. e1 :
$zerolength:
$exception:
- ret $31, ($28), 1 # .. e1 :
+ ret $31, ($26), 1 # .. e1 :
- .end __do_clear_user
- EXPORT_SYMBOL(__do_clear_user)
+ .end __clear_user
+ EXPORT_SYMBOL(__clear_user)
diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S
index 509f62b65311..159f1b7e6e49 100644
--- a/arch/alpha/lib/copy_user.S
+++ b/arch/alpha/lib/copy_user.S
@@ -9,21 +9,6 @@
* contains the right "bytes left to copy" value (and that it is updated
* only _after_ a successful copy). There is also some rather minor
* exception setup stuff..
- *
- * NOTE! This is not directly C-callable, because the calling semantics are
- * different:
- *
- * Inputs:
- * length in $0
- * destination address in $6
- * source address in $7
- * return address in $28
- *
- * Outputs:
- * bytes left to copy in $0
- *
- * Clobbers:
- * $1,$2,$3,$4,$5,$6,$7
*/
#include <asm/export.h>
@@ -49,58 +34,59 @@
.ent __copy_user
__copy_user:
.prologue 0
- and $6,7,$3
+ and $18,$18,$0
+ and $16,7,$3
beq $0,$35
beq $3,$36
subq $3,8,$3
.align 4
$37:
- EXI( ldq_u $1,0($7) )
- EXO( ldq_u $2,0($6) )
- extbl $1,$7,$1
- mskbl $2,$6,$2
- insbl $1,$6,$1
+ EXI( ldq_u $1,0($17) )
+ EXO( ldq_u $2,0($16) )
+ extbl $1,$17,$1
+ mskbl $2,$16,$2
+ insbl $1,$16,$1
addq $3,1,$3
bis $1,$2,$1
- EXO( stq_u $1,0($6) )
+ EXO( stq_u $1,0($16) )
subq $0,1,$0
- addq $6,1,$6
- addq $7,1,$7
+ addq $16,1,$16
+ addq $17,1,$17
beq $0,$41
bne $3,$37
$36:
- and $7,7,$1
+ and $17,7,$1
bic $0,7,$4
beq $1,$43
beq $4,$48
- EXI( ldq_u $3,0($7) )
+ EXI( ldq_u $3,0($17) )
.align 4
$50:
- EXI( ldq_u $2,8($7) )
+ EXI( ldq_u $2,8($17) )
subq $4,8,$4
- extql $3,$7,$3
- extqh $2,$7,$1
+ extql $3,$17,$3
+ extqh $2,$17,$1
bis $3,$1,$1
- EXO( stq $1,0($6) )
- addq $7,8,$7
+ EXO( stq $1,0($16) )
+ addq $17,8,$17
subq $0,8,$0
- addq $6,8,$6
+ addq $16,8,$16
bis $2,$2,$3
bne $4,$50
$48:
beq $0,$41
.align 4
$57:
- EXI( ldq_u $1,0($7) )
- EXO( ldq_u $2,0($6) )
- extbl $1,$7,$1
- mskbl $2,$6,$2
- insbl $1,$6,$1
+ EXI( ldq_u $1,0($17) )
+ EXO( ldq_u $2,0($16) )
+ extbl $1,$17,$1
+ mskbl $2,$16,$2
+ insbl $1,$16,$1
bis $1,$2,$1
- EXO( stq_u $1,0($6) )
+ EXO( stq_u $1,0($16) )
subq $0,1,$0
- addq $6,1,$6
- addq $7,1,$7
+ addq $16,1,$16
+ addq $17,1,$17
bne $0,$57
br $31,$41
.align 4
@@ -108,27 +94,27 @@ $43:
beq $4,$65
.align 4
$66:
- EXI( ldq $1,0($7) )
+ EXI( ldq $1,0($17) )
subq $4,8,$4
- EXO( stq $1,0($6) )
- addq $7,8,$7
+ EXO( stq $1,0($16) )
+ addq $17,8,$17
subq $0,8,$0
- addq $6,8,$6
+ addq $16,8,$16
bne $4,$66
$65:
beq $0,$41
- EXI( ldq $2,0($7) )
- EXO( ldq $1,0($6) )
+ EXI( ldq $2,0($17) )
+ EXO( ldq $1,0($16) )
mskql $2,$0,$2
mskqh $1,$0,$1
bis $2,$1,$2
- EXO( stq $2,0($6) )
+ EXO( stq $2,0($16) )
bis $31,$31,$0
$41:
$35:
$exitin:
$exitout:
- ret $31,($28),1
+ ret $31,($26),1
.end __copy_user
EXPORT_SYMBOL(__copy_user)
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index 5dfb7975895f..ab42afba1720 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -45,10 +45,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
__asm__ __volatile__( \
"1: ldq_u %0,%2\n" \
"2:\n" \
- ".section __ex_table,\"a\"\n" \
- " .long 1b - .\n" \
- " lda %0,2b-1b(%1)\n" \
- ".previous" \
+ EXC(1b,2b,%0,%1) \
: "=r"(x), "=r"(__guu_err) \
: "m"(__m(ptr)), "1"(0)); \
__guu_err; \
@@ -60,10 +57,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y))
__asm__ __volatile__( \
"1: stq_u %2,%1\n" \
"2:\n" \
- ".section __ex_table,\"a\"\n" \
- " .long 1b - ." \
- " lda $31,2b-1b(%0)\n" \
- ".previous" \
+ EXC(1b,2b,$31,%0) \
: "=r"(__puu_err) \
: "m"(__m(addr)), "rJ"(x), "0"(0)); \
__puu_err; \
diff --git a/arch/alpha/lib/ev6-clear_user.S b/arch/alpha/lib/ev6-clear_user.S
index 05bef6b50598..e179e4757ef8 100644
--- a/arch/alpha/lib/ev6-clear_user.S
+++ b/arch/alpha/lib/ev6-clear_user.S
@@ -9,21 +9,6 @@
* a successful copy). There is also some rather minor exception setup
* stuff.
*
- * NOTE! This is not directly C-callable, because the calling semantics
- * are different:
- *
- * Inputs:
- * length in $0
- * destination address in $6
- * exception pointer in $7
- * return address in $28 (exceptions expect it there)
- *
- * Outputs:
- * bytes left to copy in $0
- *
- * Clobbers:
- * $1,$2,$3,$4,$5,$6
- *
* Much of the information about 21264 scheduling/coding comes from:
* Compiler Writer's Guide for the Alpha 21264
* abbreviated as 'CWG' in other comments here
@@ -56,14 +41,15 @@
.set noreorder
.align 4
- .globl __do_clear_user
- .ent __do_clear_user
- .frame $30, 0, $28
+ .globl __clear_user
+ .ent __clear_user
+ .frame $30, 0, $26
.prologue 0
# Pipeline info : Slotting & Comments
-__do_clear_user:
- and $6, 7, $4 # .. E .. .. : find dest head misalignment
+__clear_user:
+ and $17, $17, $0
+ and $16, 7, $4 # .. E .. .. : find dest head misalignment
beq $0, $zerolength # U .. .. .. : U L U L
addq $0, $4, $1 # .. .. .. E : bias counter
@@ -75,14 +61,14 @@ __do_clear_user:
/*
* Head is not aligned. Write (8 - $4) bytes to head of destination
- * This means $6 is known to be misaligned
+ * This means $16 is known to be misaligned
*/
- EX( ldq_u $5, 0($6) ) # .. .. .. L : load dst word to mask back in
+ EX( ldq_u $5, 0($16) ) # .. .. .. L : load dst word to mask back in
beq $1, $onebyte # .. .. U .. : sub-word store?
- mskql $5, $6, $5 # .. U .. .. : take care of misaligned head
- addq $6, 8, $6 # E .. .. .. : L U U L
+ mskql $5, $16, $5 # .. U .. .. : take care of misaligned head
+ addq $16, 8, $16 # E .. .. .. : L U U L
- EX( stq_u $5, -8($6) ) # .. .. .. L :
+ EX( stq_u $5, -8($16) ) # .. .. .. L :
subq $1, 1, $1 # .. .. E .. :
addq $0, $4, $0 # .. E .. .. : bytes left -= 8 - misalignment
subq $0, 8, $0 # E .. .. .. : U L U L
@@ -93,11 +79,11 @@ __do_clear_user:
* values upon initial entry to the loop
* $1 is number of quadwords to clear (zero is a valid value)
* $2 is number of trailing bytes (0..7) ($2 never used...)
- * $6 is known to be aligned 0mod8
+ * $16 is known to be aligned 0mod8
*/
$headalign:
subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop
- and $6, 0x3f, $2 # .. .. E .. : Forward work for huge loop
+ and $16, 0x3f, $2 # .. .. E .. : Forward work for huge loop
subq $2, 0x40, $3 # .. E .. .. : bias counter (huge loop)
blt $4, $trailquad # U .. .. .. : U L U L
@@ -114,21 +100,21 @@ $headalign:
beq $3, $bigalign # U .. .. .. : U L U L : Aligned 0mod64
$alignmod64:
- EX( stq_u $31, 0($6) ) # .. .. .. L
+ EX( stq_u $31, 0($16) ) # .. .. .. L
addq $3, 8, $3 # .. .. E ..
subq $0, 8, $0 # .. E .. ..
nop # E .. .. .. : U L U L
nop # .. .. .. E
subq $1, 1, $1 # .. .. E ..
- addq $6, 8, $6 # .. E .. ..
+ addq $16, 8, $16 # .. E .. ..
blt $3, $alignmod64 # U .. .. .. : U L U L
$bigalign:
/*
* $0 is the number of bytes left
* $1 is the number of quads left
- * $6 is aligned 0mod64
+ * $16 is aligned 0mod64
* we know that we'll be taking a minimum of one trip through
* CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
* We are _not_ going to update $0 after every single store. That
@@ -145,39 +131,39 @@ $bigalign:
nop # E :
nop # E :
nop # E :
- bis $6,$6,$3 # E : U L U L : Initial wh64 address is dest
+ bis $16,$16,$3 # E : U L U L : Initial wh64 address is dest
/* This might actually help for the current trip... */
$do_wh64:
wh64 ($3) # .. .. .. L1 : memory subsystem hint
subq $1, 16, $4 # .. .. E .. : Forward calculation - repeat the loop?
- EX( stq_u $31, 0($6) ) # .. L .. ..
+ EX( stq_u $31, 0($16) ) # .. L .. ..
subq $0, 8, $0 # E .. .. .. : U L U L
- addq $6, 128, $3 # E : Target address of wh64
- EX( stq_u $31, 8($6) ) # L :
- EX( stq_u $31, 16($6) ) # L :
+ addq $16, 128, $3 # E : Target address of wh64
+ EX( stq_u $31, 8($16) ) # L :
+ EX( stq_u $31, 16($16) ) # L :
subq $0, 16, $0 # E : U L L U
nop # E :
- EX( stq_u $31, 24($6) ) # L :
- EX( stq_u $31, 32($6) ) # L :
+ EX( stq_u $31, 24($16) ) # L :
+ EX( stq_u $31, 32($16) ) # L :
subq $0, 168, $5 # E : U L L U : two trips through the loop left?
/* 168 = 192 - 24, since we've already completed some stores */
subq $0, 16, $0 # E :
- EX( stq_u $31, 40($6) ) # L :
- EX( stq_u $31, 48($6) ) # L :
- cmovlt $5, $6, $3 # E : U L L U : Latency 2, extra mapping cycle
+ EX( stq_u $31, 40($16) ) # L :
+ EX( stq_u $31, 48($16) ) # L :
+ cmovlt $5, $16, $3 # E : U L L U : Latency 2, extra mapping cycle
subq $1, 8, $1 # E :
subq $0, 16, $0 # E :
- EX( stq_u $31, 56($6) ) # L :
+ EX( stq_u $31, 56($16) ) # L :
nop # E : U L U L
nop # E :
subq $0, 8, $0 # E :
- addq $6, 64, $6 # E :
+ addq $16, 64, $16 # E :
bge $4, $do_wh64 # U : U L U L
$trailquad:
@@ -190,14 +176,14 @@ $trailquad:
beq $1, $trailbytes # U .. .. .. : U L U L : Only 0..7 bytes to go
$onequad:
- EX( stq_u $31, 0($6) ) # .. .. .. L
+ EX( stq_u $31, 0($16) ) # .. .. .. L
subq $1, 1, $1 # .. .. E ..
subq $0, 8, $0 # .. E .. ..
nop # E .. .. .. : U L U L
nop # .. .. .. E
nop # .. .. E ..
- addq $6, 8, $6 # .. E .. ..
+ addq $16, 8, $16 # .. E .. ..
bgt $1, $onequad # U .. .. .. : U L U L
# We have an unknown number of bytes left to go.
@@ -211,9 +197,9 @@ $trailbytes:
# so we will use $0 as the loop counter
# We know for a fact that $0 > 0 zero due to previous context
$onebyte:
- EX( stb $31, 0($6) ) # .. .. .. L
+ EX( stb $31, 0($16) ) # .. .. .. L
subq $0, 1, $0 # .. .. E .. :
- addq $6, 1, $6 # .. E .. .. :
+ addq $16, 1, $16 # .. E .. .. :
bgt $0, $onebyte # U .. .. .. : U L U L
$zerolength:
@@ -221,6 +207,6 @@ $exception: # Destination for exception recovery(?)
nop # .. .. .. E :
nop # .. .. E .. :
nop # .. E .. .. :
- ret $31, ($28), 1 # L0 .. .. .. : L U L U
- .end __do_clear_user
- EXPORT_SYMBOL(__do_clear_user)
+ ret $31, ($26), 1 # L0 .. .. .. : L U L U
+ .end __clear_user
+ EXPORT_SYMBOL(__clear_user)
diff --git a/arch/alpha/lib/ev6-copy_user.S b/arch/alpha/lib/ev6-copy_user.S
index be720b518af9..35e6710d0700 100644
--- a/arch/alpha/lib/ev6-copy_user.S
+++ b/arch/alpha/lib/ev6-copy_user.S
@@ -12,21 +12,6 @@
* only _after_ a successful copy). There is also some rather minor
* exception setup stuff..
*
- * NOTE! This is not directly C-callable, because the calling semantics are
- * different:
- *
- * Inputs:
- * length in $0
- * destination address in $6
- * source address in $7
- * return address in $28
- *
- * Outputs:
- * bytes left to copy in $0
- *
- * Clobbers:
- * $1,$2,$3,$4,$5,$6,$7
- *
* Much of the information about 21264 scheduling/coding comes from:
* Compiler Writer's Guide for the Alpha 21264
* abbreviated as 'CWG' in other comments here
@@ -60,10 +45,11 @@
# Pipeline info: Slotting & Comments
__copy_user:
.prologue 0
- subq $0, 32, $1 # .. E .. .. : Is this going to be a small copy?
+ andq $18, $18, $0
+ subq $18, 32, $1 # .. E .. .. : Is this going to be a small copy?
beq $0, $zerolength # U .. .. .. : U L U L
- and $6,7,$3 # .. .. .. E : is leading dest misalignment
+ and $16,7,$3 # .. .. .. E : is leading dest misalignment
ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data
beq $3, $destaligned # .. U .. .. : 2nd (one cycle fetcher stall)
subq $3, 8, $3 # E .. .. .. : L U U L : trip counter
@@ -73,17 +59,17 @@ __copy_user:
* We know we have at least one trip through this loop
*/
$aligndest:
- EXI( ldbu $1,0($7) ) # .. .. .. L : Keep loads separate from stores
- addq $6,1,$6 # .. .. E .. : Section 3.8 in the CWG
+ EXI( ldbu $1,0($17) ) # .. .. .. L : Keep loads separate from stores
+ addq $16,1,$16 # .. .. E .. : Section 3.8 in the CWG
addq $3,1,$3 # .. E .. .. :
nop # E .. .. .. : U L U L
/*
- * the -1 is to compensate for the inc($6) done in a previous quadpack
+ * the -1 is to compensate for the inc($16) done in a previous quadpack
* which allows us zero dependencies within either quadpack in the loop
*/
- EXO( stb $1,-1($6) ) # .. .. .. L :
- addq $7,1,$7 # .. .. E .. : Section 3.8 in the CWG
+ EXO( stb $1,-1($16) ) # .. .. .. L :
+ addq $17,1,$17 # .. .. E .. : Section 3.8 in the CWG
subq $0,1,$0 # .. E .. .. :
bne $3, $aligndest # U .. .. .. : U L U L
@@ -92,29 +78,29 @@ $aligndest:
* If we arrived via branch, we have a minimum of 32 bytes
*/
$destaligned:
- and $7,7,$1 # .. .. .. E : Check _current_ source alignment
+ and $17,7,$1 # .. .. .. E : Check _current_ source alignment
bic $0,7,$4 # .. .. E .. : number bytes as a quadword loop
- EXI( ldq_u $3,0($7) ) # .. L .. .. : Forward fetch for fallthrough code
+ EXI( ldq_u $3,0($17) ) # .. L .. .. : Forward fetch for fallthrough code
beq $1,$quadaligned # U .. .. .. : U L U L
/*
- * In the worst case, we've just executed an ldq_u here from 0($7)
+ * In the worst case, we've just executed an ldq_u here from 0($17)
* and we'll repeat it once if we take the branch
*/
/* Misaligned quadword loop - not unrolled. Leave it that way. */
$misquad:
- EXI( ldq_u $2,8($7) ) # .. .. .. L :
+ EXI( ldq_u $2,8($17) ) # .. .. .. L :
subq $4,8,$4 # .. .. E .. :
- extql $3,$7,$3 # .. U .. .. :
- extqh $2,$7,$1 # U .. .. .. : U U L L
+ extql $3,$17,$3 # .. U .. .. :
+ extqh $2,$17,$1 # U .. .. .. : U U L L
bis $3,$1,$1 # .. .. .. E :
- EXO( stq $1,0($6) ) # .. .. L .. :
- addq $7,8,$7 # .. E .. .. :
+ EXO( stq $1,0($16) ) # .. .. L .. :
+ addq $17,8,$17 # .. E .. .. :
subq $0,8,$0 # E .. .. .. : U L L U
- addq $6,8,$6 # .. .. .. E :
+ addq $16,8,$16 # .. .. .. E :
bis $2,$2,$3 # .. .. E .. :
nop # .. E .. .. :
bne $4,$misquad # U .. .. .. : U L U L
@@ -125,8 +111,8 @@ $misquad:
beq $0,$zerolength # U .. .. .. : U L U L
/* We know we have at least one trip through the byte loop */
- EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad
- addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG)
+ EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad
+ addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG)
nop # .. E .. .. :
br $31, $dirtyentry # L0 .. .. .. : L U U L
/* Do the trailing byte loop load, then hop into the store part of the loop */
@@ -136,8 +122,8 @@ $misquad:
* Based upon the usage context, it's worth the effort to unroll this loop
* $0 - number of bytes to be moved
* $4 - number of bytes to move as quadwords
- * $6 is current destination address
- * $7 is current source address
+ * $16 is current destination address
+ * $17 is current source address
*/
$quadaligned:
subq $4, 32, $2 # .. .. .. E : do not unroll for small stuff
@@ -155,29 +141,29 @@ $quadaligned:
* instruction memory hint instruction).
*/
$unroll4:
- EXI( ldq $1,0($7) ) # .. .. .. L
- EXI( ldq $2,8($7) ) # .. .. L ..
+ EXI( ldq $1,0($17) ) # .. .. .. L
+ EXI( ldq $2,8($17) ) # .. .. L ..
subq $4,32,$4 # .. E .. ..
nop # E .. .. .. : U U L L
- addq $7,16,$7 # .. .. .. E
- EXO( stq $1,0($6) ) # .. .. L ..
- EXO( stq $2,8($6) ) # .. L .. ..
+ addq $17,16,$17 # .. .. .. E
+ EXO( stq $1,0($16) ) # .. .. L ..
+ EXO( stq $2,8($16) ) # .. L .. ..
subq $0,16,$0 # E .. .. .. : U L L U
- addq $6,16,$6 # .. .. .. E
- EXI( ldq $1,0($7) ) # .. .. L ..
- EXI( ldq $2,8($7) ) # .. L .. ..
+ addq $16,16,$16 # .. .. .. E
+ EXI( ldq $1,0($17) ) # .. .. L ..
+ EXI( ldq $2,8($17) ) # .. L .. ..
subq $4, 32, $3 # E .. .. .. : U U L L : is there enough for another trip?
- EXO( stq $1,0($6) ) # .. .. .. L
- EXO( stq $2,8($6) ) # .. .. L ..
+ EXO( stq $1,0($16) ) # .. .. .. L
+ EXO( stq $2,8($16) ) # .. .. L ..
subq $0,16,$0 # .. E .. ..
- addq $7,16,$7 # E .. .. .. : U L L U
+ addq $17,16,$17 # E .. .. .. : U L L U
nop # .. .. .. E
nop # .. .. E ..
- addq $6,16,$6 # .. E .. ..
+ addq $16,16,$16 # .. E .. ..
bgt $3,$unroll4 # U .. .. .. : U L U L
nop
@@ -186,14 +172,14 @@ $unroll4:
beq $4, $noquads
$onequad:
- EXI( ldq $1,0($7) )
+ EXI( ldq $1,0($17) )
subq $4,8,$4
- addq $7,8,$7
+ addq $17,8,$17
nop
- EXO( stq $1,0($6) )
+ EXO( stq $1,0($16) )
subq $0,8,$0
- addq $6,8,$6
+ addq $16,8,$16
bne $4,$onequad
$noquads:
@@ -207,23 +193,23 @@ $noquads:
* There's no point in doing a lot of complex alignment calculations to try to
* to quadword stuff for a small amount of data.
* $0 - remaining number of bytes left to copy
- * $6 - current dest addr
- * $7 - current source addr
+ * $16 - current dest addr
+ * $17 - current source addr
*/
$onebyteloop:
- EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad
- addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG)
+ EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad
+ addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG)
nop # .. E .. .. :
nop # E .. .. .. : U L U L
$dirtyentry:
/*
- * the -1 is to compensate for the inc($6) done in a previous quadpack
+ * the -1 is to compensate for the inc($16) done in a previous quadpack
* which allows us zero dependencies within either quadpack in the loop
*/
- EXO ( stb $2,-1($6) ) # .. .. .. L :
- addq $7,1,$7 # .. .. E .. : quadpack as the load
+ EXO ( stb $2,-1($16) ) # .. .. .. L :
+ addq $17,1,$17 # .. .. E .. : quadpack as the load
subq $0,1,$0 # .. E .. .. : change count _after_ copy
bgt $0,$onebyteloop # U .. .. .. : U L U L
@@ -233,7 +219,7 @@ $exitout: # Destination for exception recovery(?)
nop # .. .. .. E
nop # .. .. E ..
nop # .. E .. ..
- ret $31,($28),1 # L0 .. .. .. : L U L U
+ ret $31,($26),1 # L0 .. .. .. : L U L U
.end __copy_user
EXPORT_SYMBOL(__copy_user)
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index c9f30f4763ab..7e213ff4f01f 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -44,6 +44,7 @@ config ARC
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA
+ select ARCH_HAS_RAW_COPY_USER
config MIGHT_HAVE_PCI
bool
diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h
index 317ff773e1ca..b18fcb606908 100644
--- a/arch/arc/include/asm/hugepage.h
+++ b/arch/arc/include/asm/hugepage.h
@@ -11,6 +11,7 @@
#define _ASM_ARC_HUGEPAGE_H
#include <linux/types.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
static inline pte_t pmd_pte(pmd_t pmd)
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index e94ca72b974e..ee22d40afef4 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -37,6 +37,7 @@
#include <asm/page.h>
#include <asm/mmu.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <linux/const.h>
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index ffd14e630aca..f35974ee7264 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -168,7 +168,7 @@
static inline unsigned long
-__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
long res = 0;
char val;
@@ -394,11 +394,8 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n)
return res;
}
-extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
- unsigned long n);
-
static inline unsigned long
-__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
long res = 0;
char val;
@@ -724,24 +721,20 @@ static inline long __arc_strnlen_user(const char __user *s, long n)
}
#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
-#define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n)
-#define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n)
+
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
+
#define __clear_user(d, n) __arc_clear_user(d, n)
#define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
#define __strnlen_user(s, n) __arc_strnlen_user(s, n)
#else
-extern long arc_copy_from_user_noinline(void *to, const void __user * from,
- unsigned long n);
-extern long arc_copy_to_user_noinline(void __user *to, const void *from,
- unsigned long n);
extern unsigned long arc_clear_user_noinline(void __user *to,
unsigned long n);
extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
long count);
extern long arc_strnlen_user_noinline(const char __user *src, long n);
-#define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n)
-#define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n)
#define __clear_user(d, n) arc_clear_user_noinline(d, n)
#define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
#define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c
index c86906b41bfe..72125a34e780 100644
--- a/arch/arc/mm/extable.c
+++ b/arch/arc/mm/extable.c
@@ -28,20 +28,6 @@ int fixup_exception(struct pt_regs *regs)
#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-long arc_copy_from_user_noinline(void *to, const void __user *from,
- unsigned long n)
-{
- return __arc_copy_from_user(to, from, n);
-}
-EXPORT_SYMBOL(arc_copy_from_user_noinline);
-
-long arc_copy_to_user_noinline(void __user *to, const void *from,
- unsigned long n)
-{
- return __arc_copy_to_user(to, from, n);
-}
-EXPORT_SYMBOL(arc_copy_to_user_noinline);
-
unsigned long arc_clear_user_noinline(void __user *to,
unsigned long n)
{
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0d4e71b42c77..6fab7f34739c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -96,6 +96,7 @@ config ARM
select PERF_USE_VMALLOC
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
+ select ARCH_HAS_RAW_COPY_USER
# Above selects are sorted alphabetically; please add new ones
# according to that. Thanks.
help
diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi
index 02981eae96b9..1ec8e0d80191 100644
--- a/arch/arm/boot/dts/am335x-pcm-953.dtsi
+++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi
@@ -63,14 +63,14 @@
label = "home";
linux,code = <KEY_HOME>;
gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>;
- gpio-key,wakeup;
+ wakeup-source;
};
button@1 {
label = "menu";
linux,code = <KEY_MENU>;
gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>;
- gpio-key,wakeup;
+ wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
index 0d341c545b01..e5ac1d81d15c 100644
--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
@@ -315,6 +315,13 @@
/* ID & VBUS GPIOs provided in board dts */
};
};
+
+ tpic2810: tpic2810@60 {
+ compatible = "ti,tpic2810";
+ reg = <0x60>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
};
&mcspi3 {
@@ -330,13 +337,6 @@
spi-max-frequency = <1000000>;
spi-cpol;
};
-
- tpic2810: tpic2810@60 {
- compatible = "ti,tpic2810";
- reg = <0x60>;
- gpio-controller;
- #gpio-cells = <2>;
- };
};
&uart3 {
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 4fbb089cf5ad..00de62dc0042 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -66,14 +66,14 @@
timer@20200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x20200 0x100>;
- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
clocks = <&periph_clk>;
};
local-timer@20600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x20600 0x100>;
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
clocks = <&periph_clk>;
};
diff --git a/arch/arm/boot/dts/bcm953012k.dts b/arch/arm/boot/dts/bcm953012k.dts
index bfd923096a8c..ae31a5826e91 100644
--- a/arch/arm/boot/dts/bcm953012k.dts
+++ b/arch/arm/boot/dts/bcm953012k.dts
@@ -48,15 +48,14 @@
};
memory {
- reg = <0x00000000 0x10000000>;
+ reg = <0x80000000 0x10000000>;
};
};
&uart0 {
- clock-frequency = <62499840>;
+ status = "okay";
};
&uart1 {
- clock-frequency = <62499840>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts
index 3f04a40eb90c..df05e7f568af 100644
--- a/arch/arm/boot/dts/bcm958522er.dts
+++ b/arch/arm/boot/dts/bcm958522er.dts
@@ -55,6 +55,7 @@
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ open-source;
priority = <200>;
};
};
diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts
index 9fd542200d3d..4a3ab19c6281 100644
--- a/arch/arm/boot/dts/bcm958525er.dts
+++ b/arch/arm/boot/dts/bcm958525er.dts
@@ -55,6 +55,7 @@
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ open-source;
priority = <200>;
};
};
diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts
index 41e7fd350fcd..81f78435d8c7 100644
--- a/arch/arm/boot/dts/bcm958525xmc.dts
+++ b/arch/arm/boot/dts/bcm958525xmc.dts
@@ -55,6 +55,7 @@
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpioa 31 GPIO_ACTIVE_LOW>;
+ open-source;
priority = <200>;
};
};
diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts
index 477c4860db52..c88b8fefcb2f 100644
--- a/arch/arm/boot/dts/bcm958622hr.dts
+++ b/arch/arm/boot/dts/bcm958622hr.dts
@@ -55,6 +55,7 @@
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ open-source;
priority = <200>;
};
};
diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
index c0a499d5ba44..d503fa0dde31 100644
--- a/arch/arm/boot/dts/bcm958623hr.dts
+++ b/arch/arm/boot/dts/bcm958623hr.dts
@@ -55,6 +55,7 @@
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ open-source;
priority = <200>;
};
};
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
index f7eb5854a224..cc0363b843c1 100644
--- a/arch/arm/boot/dts/bcm958625hr.dts
+++ b/arch/arm/boot/dts/bcm958625hr.dts
@@ -55,6 +55,7 @@
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ open-source;
priority = <200>;
};
};
diff --git a/arch/arm/boot/dts/bcm988312hr.dts b/arch/arm/boot/dts/bcm988312hr.dts
index 16666324fda8..74e15a3cd9f8 100644
--- a/arch/arm/boot/dts/bcm988312hr.dts
+++ b/arch/arm/boot/dts/bcm988312hr.dts
@@ -55,6 +55,7 @@
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpioa 15 GPIO_ACTIVE_LOW>;
+ open-source;
priority = <200>;
};
};
diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
index 49f466fe0b1d..dcfc97591433 100644
--- a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
+++ b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
@@ -121,11 +121,6 @@
};
};
-&cpu0 {
- arm-supply = <&sw1a_reg>;
- soc-supply = <&sw1c_reg>;
-};
-
&fec1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet1>;
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 22332be72140..528b4e9c6d3d 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -266,7 +266,7 @@
};
usb1: ohci@00400000 {
- compatible = "atmel,sama5d2-ohci", "usb-ohci";
+ compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00400000 0x100000>;
interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index 82d8c4771293..162e1eb5373d 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -14,6 +14,7 @@
#include <dt-bindings/mfd/dbx500-prcmu.h>
#include <dt-bindings/arm/ux500_pm_domains.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clock/ste-ab8500.h>
#include "skeleton.dtsi"
/ {
@@ -603,6 +604,11 @@
interrupt-controller;
#interrupt-cells = <2>;
+ ab8500_clock: clock-controller {
+ compatible = "stericsson,ab8500-clk";
+ #clock-cells = <1>;
+ };
+
ab8500_gpio: ab8500-gpio {
compatible = "stericsson,ab8500-gpio";
gpio-controller;
@@ -686,6 +692,8 @@
ab8500-pwm {
compatible = "stericsson,ab8500-pwm";
+ clocks = <&ab8500_clock AB8500_SYSCLK_INT>;
+ clock-names = "intclk";
};
ab8500-debugfs {
@@ -700,6 +708,9 @@
V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>;
V-DMIC-supply = <&ab8500_ldo_dmic_reg>;
+ clocks = <&ab8500_clock AB8500_SYSCLK_AUDIO>;
+ clock-names = "audioclk";
+
stericsson,earpeice-cmv = <950>; /* Units in mV. */
};
@@ -1095,6 +1106,14 @@
status = "disabled";
};
+ sound {
+ compatible = "stericsson,snd-soc-mop500";
+ stericsson,cpu-dai = <&msp1 &msp3>;
+ stericsson,audio-codec = <&codec>;
+ clocks = <&prcmu_clk PRCMU_SYSCLK>, <&ab8500_clock AB8500_SYSCLK_ULP>, <&ab8500_clock AB8500_SYSCLK_INT>;
+ clock-names = "sysclk", "ulpclk", "intclk";
+ };
+
msp0: msp@80123000 {
compatible = "stericsson,ux500-msp-i2s";
reg = <0x80123000 0x1000>;
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index f37f9e10713c..9e359e4f342e 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -186,15 +186,6 @@
status = "okay";
};
- sound {
- compatible = "stericsson,snd-soc-mop500";
-
- stericsson,cpu-dai = <&msp1 &msp3>;
- stericsson,audio-codec = <&codec>;
- clocks = <&prcmu_clk PRCMU_SYSCLK>;
- clock-names = "sysclk";
- };
-
msp0: msp@80123000 {
pinctrl-names = "default";
pinctrl-0 = <&msp0_default_mode>;
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index dd5514def604..ade1d0d4e5f4 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -159,15 +159,6 @@
"", "", "", "", "", "", "", "";
};
- sound {
- compatible = "stericsson,snd-soc-mop500";
-
- stericsson,cpu-dai = <&msp1 &msp3>;
- stericsson,audio-codec = <&codec>;
- clocks = <&prcmu_clk PRCMU_SYSCLK>;
- clock-names = "sysclk";
- };
-
msp0: msp@80123000 {
pinctrl-names = "default";
pinctrl-0 = <&msp0_default_mode>;
diff --git a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
index 72ec0d5ae052..bbf1c8cbaac6 100644
--- a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
+++ b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
@@ -167,7 +167,7 @@
reg = <8>;
label = "cpu";
ethernet = <&gmac>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-txid";
fixed-link {
speed = <1000>;
full-duplex;
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index a952cc0703cc..8a3ed21cb7bc 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -495,7 +495,7 @@
resets = <&ccu RST_BUS_GPU>;
assigned-clocks = <&ccu CLK_GPU>;
- assigned-clock-rates = <408000000>;
+ assigned-clock-rates = <384000000>;
};
gic: interrupt-controller@01c81000 {
diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
index 7097c18ff487..d6bd15898db6 100644
--- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
@@ -50,8 +50,6 @@
backlight: backlight {
compatible = "pwm-backlight";
- pinctrl-names = "default";
- pinctrl-0 = <&bl_en_pin>;
pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
default-brightness-level = <8>;
@@ -93,11 +91,6 @@
};
&pio {
- bl_en_pin: bl_en_pin@0 {
- pins = "PH6";
- function = "gpio_in";
- };
-
mmc0_cd_pin: mmc0_cd_pin@0 {
pins = "PB4";
function = "gpio_in";
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index f2462a6bdba6..decd388d613d 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -188,6 +188,7 @@ CONFIG_WL12XX=m
CONFIG_WL18XX=m
CONFIG_WLCORE_SPI=m
CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_MOUSEDEV=m
CONFIG_INPUT_JOYDEV=m
CONFIG_INPUT_EVDEV=m
CONFIG_KEYBOARD_ATKBD=m
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index b14e8c7d71bd..3a36d99ff836 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += early_ioremap.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += extable.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index e22089fb44dc..a3f0b3d50089 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -209,6 +209,7 @@
#define HSR_EC_IABT_HYP (0x21)
#define HSR_EC_DABT (0x24)
#define HSR_EC_DABT_HYP (0x25)
+#define HSR_EC_MAX (0x3f)
#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index cc495d799c67..31ee468ce667 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -30,7 +30,6 @@
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
#define KVM_USER_MEM_SLOTS 32
-#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HAVE_ONE_REG
#define KVM_HALT_POLL_NS_DEFAULT 500000
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index a8d656d9aec7..1c462381c225 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -20,6 +20,7 @@
#else
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index b63527359d52..2577405d082d 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -24,25 +24,7 @@
#define __put_user_unaligned __put_user
#endif
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-extern int fixup_exception(struct pt_regs *regs);
+#include <asm/extable.h>
/*
* These two functions allow hooking accesses to userspace to increase
@@ -473,7 +455,7 @@ extern unsigned long __must_check
arm_copy_from_user(void *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check
-__arch_copy_from_user(void *to, const void __user *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned int __ua_flags;
@@ -489,7 +471,7 @@ extern unsigned long __must_check
__copy_to_user_std(void __user *to, const void *from, unsigned long n);
static inline unsigned long __must_check
-__arch_copy_to_user(void __user *to, const void *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
#ifndef CONFIG_UACCESS_WITH_MEMCPY
unsigned int __ua_flags;
@@ -517,54 +499,22 @@ __clear_user(void __user *addr, unsigned long n)
}
#else
-#define __arch_copy_from_user(to, from, n) \
- (memcpy(to, (void __force *)from, n), 0)
-#define __arch_copy_to_user(to, from, n) \
- (memcpy((void __force *)to, from, n), 0)
-#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
-#endif
-
-static inline unsigned long __must_check
-__copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- check_object_size(to, n, false);
- return __arch_copy_from_user(to, from, n);
-}
-
-static inline unsigned long __must_check
-copy_from_user(void *to, const void __user *from, unsigned long n)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- unsigned long res = n;
-
- check_object_size(to, n, false);
-
- if (likely(access_ok(VERIFY_READ, from, n)))
- res = __arch_copy_from_user(to, from, n);
- if (unlikely(res))
- memset(to + (n - res), 0, res);
- return res;
-}
-
-static inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- check_object_size(from, n, true);
-
- return __arch_copy_to_user(to, from, n);
+ memcpy(to, (const void __force *)from, n);
+ return 0;
}
-
-static inline unsigned long __must_check
-copy_to_user(void __user *to, const void *from, unsigned long n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- check_object_size(from, n, true);
-
- if (access_ok(VERIFY_WRITE, to, n))
- n = __arch_copy_to_user(to, from, n);
- return n;
+ memcpy((void __force *)to, from, n);
+ return 0;
}
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
+#endif
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index c9a2103faeb9..96dba7cd8be7 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -221,6 +221,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
+ case KVM_CAP_NR_MEMSLOTS:
+ r = KVM_USER_MEM_SLOTS;
+ break;
case KVM_CAP_MSI_DEVID:
if (!kvm)
r = -EINVAL;
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 4e40d1955e35..96af65a30d78 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -79,7 +79,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
+ kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
+ hsr);
+
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
+ [0 ... HSR_EC_MAX] = kvm_handle_unknown_ec,
[HSR_EC_WFI] = kvm_handle_wfx,
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
@@ -98,13 +110,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
- if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
- !arm_exit_handlers[hsr_ec]) {
- kvm_err("Unknown exception class: hsr: %#08x\n",
- (unsigned int)kvm_vcpu_get_hsr(vcpu));
- BUG();
- }
-
return arm_exit_handlers[hsr_ec];
}
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 3d89b7905bd9..a277981f414d 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -289,6 +289,22 @@ static void at91_ddr_standby(void)
at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
}
+static void sama5d3_ddr_standby(void)
+{
+ u32 lpr0;
+ u32 saved_lpr0;
+
+ saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
+ lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
+ lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
+
+ at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
+
+ cpu_do_idle();
+
+ at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
+}
+
/* We manage both DDRAM/SDRAM controllers, we need more than one value to
* remember.
*/
@@ -323,7 +339,7 @@ static const struct of_device_id const ramc_ids[] __initconst = {
{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
- { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby },
+ { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
{ /*sentinel*/ }
};
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 093458b62c8d..c89757abb0ae 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -241,6 +241,3 @@ obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o
obj-y += $(onenand-m) $(onenand-y)
-
-nand-$(CONFIG_MTD_NAND_OMAP2) := gpmc-nand.o
-obj-y += $(nand-m) $(nand-y)
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
deleted file mode 100644
index f6ac027f3c3b..000000000000
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * gpmc-nand.c
- *
- * Copyright (C) 2009 Texas Instruments
- * Vimal Singh <vimalsingh@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/omap-gpmc.h>
-#include <linux/mtd/nand.h>
-#include <linux/platform_data/mtd-nand-omap2.h>
-
-#include <asm/mach/flash.h>
-
-#include "soc.h"
-
-/* minimum size for IO mapping */
-#define NAND_IO_SIZE 4
-
-static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
-{
- /* platforms which support all ECC schemes */
- if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() ||
- soc_is_omap54xx() || soc_is_dra7xx())
- return 1;
-
- if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
- ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
- if (cpu_is_omap24xx())
- return 0;
- else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
- return 0;
- else
- return 1;
- }
-
- /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
- * which require H/W based ECC error detection */
- if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
- ((ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
- (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
- return 0;
-
- /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
- if (ecc_opt == OMAP_ECC_HAM1_CODE_HW ||
- ecc_opt == OMAP_ECC_HAM1_CODE_SW)
- return 1;
- else
- return 0;
-}
-
-/* This function will go away once the device-tree convertion is complete */
-static void gpmc_set_legacy(struct omap_nand_platform_data *gpmc_nand_data,
- struct gpmc_settings *s)
-{
- /* Enable RD PIN Monitoring Reg */
- if (gpmc_nand_data->dev_ready) {
- s->wait_on_read = true;
- s->wait_on_write = true;
- }
-
- if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16)
- s->device_width = GPMC_DEVWIDTH_16BIT;
- else
- s->device_width = GPMC_DEVWIDTH_8BIT;
-}
-
-int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data,
- struct gpmc_timings *gpmc_t)
-{
- int err = 0;
- struct gpmc_settings s;
- struct platform_device *pdev;
- struct resource gpmc_nand_res[] = {
- { .flags = IORESOURCE_MEM, },
- { .flags = IORESOURCE_IRQ, },
- { .flags = IORESOURCE_IRQ, },
- };
-
- BUG_ON(gpmc_nand_data->cs >= GPMC_CS_NUM);
-
- err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE,
- (unsigned long *)&gpmc_nand_res[0].start);
- if (err < 0) {
- pr_err("omap2-gpmc: Cannot request GPMC CS %d, error %d\n",
- gpmc_nand_data->cs, err);
- return err;
- }
- gpmc_nand_res[0].end = gpmc_nand_res[0].start + NAND_IO_SIZE - 1;
- gpmc_nand_res[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE);
- gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT);
-
- memset(&s, 0, sizeof(struct gpmc_settings));
- gpmc_set_legacy(gpmc_nand_data, &s);
-
- s.device_nand = true;
-
- if (gpmc_t) {
- err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t, &s);
- if (err < 0) {
- pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n",
- err);
- return err;
- }
- }
-
- err = gpmc_cs_program_settings(gpmc_nand_data->cs, &s);
- if (err < 0)
- goto out_free_cs;
-
- err = gpmc_configure(GPMC_CONFIG_WP, 0);
- if (err < 0)
- goto out_free_cs;
-
- if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) {
- pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n");
- err = -EINVAL;
- goto out_free_cs;
- }
-
-
- pdev = platform_device_alloc("omap2-nand", gpmc_nand_data->cs);
- if (pdev) {
- err = platform_device_add_resources(pdev, gpmc_nand_res,
- ARRAY_SIZE(gpmc_nand_res));
- if (!err)
- pdev->dev.platform_data = gpmc_nand_data;
- } else {
- err = -ENOMEM;
- }
- if (err)
- goto out_free_pdev;
-
- err = platform_device_add(pdev);
- if (err) {
- dev_err(&pdev->dev, "Unable to register NAND device\n");
- goto out_free_pdev;
- }
-
- return 0;
-
-out_free_pdev:
- platform_device_put(pdev);
-out_free_cs:
- gpmc_cs_free(gpmc_nand_data->cs);
-
- return err;
-}
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 8633c703546a..2944af820558 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
return ret;
}
-void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
+int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
{
int err;
struct device *dev = &gpmc_onenand_device.dev;
@@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
if (err < 0) {
dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
gpmc_onenand_data->cs, err);
- return;
+ return err;
}
gpmc_onenand_resource.end = gpmc_onenand_resource.start +
ONENAND_IO_SIZE - 1;
- if (platform_device_register(&gpmc_onenand_device) < 0) {
+ err = platform_device_register(&gpmc_onenand_device);
+ if (err) {
dev_err(dev, "Unable to register OneNAND device\n");
gpmc_cs_free(gpmc_onenand_data->cs);
- return;
}
+
+ return err;
}
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index fe36ce2734d4..4c6f14cf92a8 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -17,6 +17,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <asm/assembler.h>
#include "omap44xx.h"
@@ -66,7 +67,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
cmp r0, r4
bne wait_2
ldr r12, =API_HYP_ENTRY
- adr r0, hyp_boot
+ badr r0, hyp_boot
smc #0
hyp_boot:
b omap_secondary_startup
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 56f917ec8621..1435fee39a89 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -2112,11 +2112,20 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = {
};
/* L4 CORE -> SR1 interface */
+static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = {
+ {
+ .pa_start = OMAP34XX_SR1_BASE,
+ .pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { },
+};
static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_sr1_hwmod,
.clk = "sr_l4_ick",
+ .addr = omap3_sr1_addr_space,
.user = OCP_USER_MPU,
};
@@ -2124,15 +2133,25 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap36xx_sr1_hwmod,
.clk = "sr_l4_ick",
+ .addr = omap3_sr1_addr_space,
.user = OCP_USER_MPU,
};
/* L4 CORE -> SR1 interface */
+static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = {
+ {
+ .pa_start = OMAP34XX_SR2_BASE,
+ .pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { },
+};
static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap34xx_sr2_hwmod,
.clk = "sr_l4_ick",
+ .addr = omap3_sr2_addr_space,
.user = OCP_USER_MPU,
};
@@ -2140,6 +2159,7 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &omap36xx_sr2_hwmod,
.clk = "sr_l4_ick",
+ .addr = omap3_sr2_addr_space,
.user = OCP_USER_MPU,
};
@@ -3111,16 +3131,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
* Return: 0 if device named @dev_name is not likely to be accessible,
* or 1 if it is likely to be accessible.
*/
-static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
- const char *dev_name)
+static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
+ const char *dev_name)
{
+ struct device_node *node;
+ bool available;
+
if (!bus)
- return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0;
+ return omap_type() == OMAP2_DEVICE_TYPE_GP;
- if (of_device_is_available(of_find_node_by_name(bus, dev_name)))
- return 1;
+ node = of_get_child_by_name(bus, dev_name);
+ available = of_device_is_available(node);
+ of_node_put(node);
- return 0;
+ return available;
}
int __init omap3xxx_hwmod_init(void)
@@ -3189,15 +3213,20 @@ int __init omap3xxx_hwmod_init(void)
if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
r = omap_hwmod_register_links(h_sham);
- if (r < 0)
+ if (r < 0) {
+ of_node_put(bus);
return r;
+ }
}
if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
r = omap_hwmod_register_links(h_aes);
- if (r < 0)
+ if (r < 0) {
+ of_node_put(bus);
return r;
+ }
}
+ of_node_put(bus);
/*
* Register hwmod links specific to certain ES levels of a
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 3c2cb5d5adfa..0bb0e9c6376c 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -411,3 +411,4 @@
394 common pkey_mprotect sys_pkey_mprotect
395 common pkey_alloc sys_pkey_alloc
396 common pkey_free sys_pkey_free
+397 common statx sys_statx
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a39029b5414e..3c833ff3303c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -115,6 +115,7 @@ config ARM64
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
+ select ARCH_HAS_RAW_COPY_USER
help
ARM 64-bit (AArch64) Linux support.
@@ -508,6 +509,16 @@ config QCOM_FALKOR_ERRATUM_1009
If unsure, say Y.
+config QCOM_QDF2400_ERRATUM_0065
+ bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size"
+ default y
+ help
+ On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports
+ ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have
+ been indicated as 16Bytes (0xf), not 8Bytes (0x7).
+
+ If unsure, say Y.
+
endmenu
@@ -1063,6 +1074,10 @@ config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
+config KEYS_COMPAT
+ def_bool y
+ depends on COMPAT && KEYS
+
endmenu
menu "Power management options"
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index 9f9e203c09c5..bcb03fc32665 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -114,6 +114,7 @@
pcie0: pcie@20020000 {
compatible = "brcm,iproc-pcie";
reg = <0 0x20020000 0 0x1000>;
+ dma-coherent;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
@@ -144,6 +145,7 @@
pcie4: pcie@50020000 {
compatible = "brcm,iproc-pcie";
reg = <0 0x50020000 0 0x1000>;
+ dma-coherent;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
@@ -174,6 +176,7 @@
pcie8: pcie@60c00000 {
compatible = "brcm,iproc-pcie-paxc";
reg = <0 0x60c00000 0 0x1000>;
+ dma-coherent;
linux,pci-domain = <8>;
bus-range = <0x0 0x1>;
@@ -203,6 +206,7 @@
<0x61030000 0x100>;
reg-names = "amac_base", "idm_base", "nicpm_base";
interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
phy-handle = <&gphy0>;
phy-mode = "rgmii";
status = "disabled";
@@ -213,6 +217,7 @@
reg = <0x612c0000 0x445>; /* PDC FS0 regs */
interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>;
+ dma-coherent;
brcm,rx-status-len = <32>;
brcm,use-bcm-hdr;
};
@@ -222,6 +227,7 @@
reg = <0x612e0000 0x445>; /* PDC FS1 regs */
interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>;
+ dma-coherent;
brcm,rx-status-len = <32>;
brcm,use-bcm-hdr;
};
@@ -231,6 +237,7 @@
reg = <0x61300000 0x445>; /* PDC FS2 regs */
interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>;
+ dma-coherent;
brcm,rx-status-len = <32>;
brcm,use-bcm-hdr;
};
@@ -240,6 +247,7 @@
reg = <0x61320000 0x445>; /* PDC FS3 regs */
interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>;
+ dma-coherent;
brcm,rx-status-len = <32>;
brcm,use-bcm-hdr;
};
@@ -644,6 +652,7 @@
sata: ahci@663f2000 {
compatible = "brcm,iproc-ahci", "generic-ahci";
reg = <0x663f2000 0x1000>;
+ dma-coherent;
reg-names = "ahci";
interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
@@ -667,6 +676,7 @@
compatible = "brcm,sdhci-iproc-cygnus";
reg = <0x66420000 0x100>;
interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
bus-width = <8>;
clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
status = "disabled";
@@ -676,6 +686,7 @@
compatible = "brcm,sdhci-iproc-cygnus";
reg = <0x66430000 0x100>;
interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
bus-width = <8>;
clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>;
status = "disabled";
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 05310ad8c5ab..f31c48d0cd68 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void)
static inline bool system_uses_ttbr0_pan(void)
{
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
- !cpus_have_cap(ARM64_HAS_PAN);
+ !cpus_have_const_cap(ARM64_HAS_PAN);
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
new file mode 100644
index 000000000000..42f50f15a44c
--- /dev/null
+++ b/arch/arm64/include/asm/extable.h
@@ -0,0 +1,25 @@
+#ifndef __ASM_EXTABLE_H
+#define __ASM_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of relative offsets: the first
+ * is the relative offset to an instruction that is allowed to fault,
+ * and the second is the relative offset at which the program should
+ * continue. No registers are modified, so it is entirely up to the
+ * continuation code to figure out what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+ int insn, fixup;
+};
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+extern int fixup_exception(struct pt_regs *regs);
+#endif
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f21fd3894370..e7705e7bb07b 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -30,8 +30,7 @@
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
-#define KVM_USER_MEM_SLOTS 32
-#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_USER_MEM_SLOTS 512
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_HALT_POLL_NS_DEFAULT 500000
diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h
index 69b2fd41503c..345a072b5856 100644
--- a/arch/arm64/include/asm/pgtable-types.h
+++ b/arch/arm64/include/asm/pgtable-types.h
@@ -55,9 +55,13 @@ typedef struct { pteval_t pgprot; } pgprot_t;
#define __pgprot(x) ((pgprot_t) { (x) } )
#if CONFIG_PGTABLE_LEVELS == 2
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#elif CONFIG_PGTABLE_LEVELS == 3
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
+#elif CONFIG_PGTABLE_LEVELS == 4
+#include <asm-generic/5level-fixup.h>
#endif
#endif /* __ASM_PGTABLE_TYPES_H */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 7c514e10a08e..ba497172610d 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -33,28 +33,7 @@
#include <asm/ptrace.h>
#include <asm/memory.h>
#include <asm/compiler.h>
-
-/*
- * The exception table consists of pairs of relative offsets: the first
- * is the relative offset to an instruction that is allowed to fault,
- * and the second is the relative offset at which the program should
- * continue. No registers are modified, so it is entirely up to the
- * continuation code to figure out what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- int insn, fixup;
-};
-
-#define ARCH_HAS_RELATIVE_EXTABLE
-
-extern int fixup_exception(struct pt_regs *regs);
+#include <asm/extable.h>
#define KERNEL_DS (-1UL)
#define get_ds() (KERNEL_DS)
@@ -352,58 +331,13 @@ do { \
})
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+#define raw_copy_from_user __arch_copy_from_user
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
+#define raw_copy_to_user __arch_copy_to_user
+extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-
-static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- kasan_check_write(to, n);
- check_object_size(to, n, false);
- return __arch_copy_from_user(to, from, n);
-}
-
-static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- kasan_check_read(from, n);
- check_object_size(from, n, true);
- return __arch_copy_to_user(to, from, n);
-}
-
-static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- unsigned long res = n;
- kasan_check_write(to, n);
- check_object_size(to, n, false);
-
- if (access_ok(VERIFY_READ, from, n)) {
- res = __arch_copy_from_user(to, from, n);
- }
- if (unlikely(res))
- memset(to + (n - res), 0, res);
- return res;
-}
-
-static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- kasan_check_read(from, n);
- check_object_size(from, n, true);
-
- if (access_ok(VERIFY_WRITE, to, n)) {
- n = __arch_copy_to_user(to, from, n);
- }
- return n;
-}
-
-static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
-{
- if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
- n = __copy_in_user(to, from, n);
- return n;
-}
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index e78ac26324bd..bdbeb06dc11e 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 394
+#define __NR_compat_syscalls 398
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index b7e8ef16ff0d..c66b51aab195 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -809,6 +809,14 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
__SYSCALL(__NR_preadv2, compat_sys_preadv2)
#define __NR_pwritev2 393
__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
+#define __NR_pkey_mprotect 394
+__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
+#define __NR_pkey_alloc 395
+__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
+#define __NR_pkey_free 396
+__SYSCALL(__NR_pkey_free, sys_pkey_free)
+#define __NR_statx 397
+__SYSCALL(__NR_statx, sys_statx)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index e9c4dc9e0ada..67368c7329c0 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -38,7 +38,7 @@ EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(__arch_copy_from_user);
EXPORT_SYMBOL(__arch_copy_to_user);
EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__copy_in_user);
+EXPORT_SYMBOL(raw_copy_in_user);
/* physical memory */
EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index 75a0f8acef66..fd691087dc9a 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu)
}
/**
- * cpu_suspend() - function to enter a low-power idle state
+ * arm_cpuidle_suspend() - function to enter a low-power idle state
* @arg: argument to pass to CPU suspend operations
*
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 769f24ef628c..d7e90d97f5c4 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
/*
* The kernel Image should not extend across a 1GB/32MB/512MB alignment
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
- * happens, increase the KASLR offset by the size of the kernel image.
+ * happens, increase the KASLR offset by the size of the kernel image
+ * rounded up by SWAPPER_BLOCK_SIZE.
*/
if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
- (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT))
- offset = (offset + (u64)(_end - _text)) & mask;
+ (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
+ u64 kimg_sz = _end - _text;
+ offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
+ & mask;
+ }
if (IS_ENABLED(CONFIG_KASAN))
/*
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2a07aae5b8a2..c5c45942fb6e 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
return 0;
}
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- return NOTIFY_DONE;
-}
-
static void __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p, *cur_kprobe;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 1bfe30dfbfe7..fa1b18e364fc 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -135,7 +135,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
return ret;
}
+static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+
+ kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
+ hsr, esr_get_class_string(hsr));
+
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
+ [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
[ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
[ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
@@ -162,13 +174,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
u32 hsr = kvm_vcpu_get_hsr(vcpu);
u8 hsr_ec = ESR_ELx_EC(hsr);
- if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
- !arm_exit_handlers[hsr_ec]) {
- kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
- hsr, esr_get_class_string(hsr));
- BUG();
- }
-
return arm_exit_handlers[hsr_ec];
}
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index e8e7ba2bc11f..9e1d2b75eecd 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -18,14 +18,62 @@
#include <asm/kvm_hyp.h>
#include <asm/tlbflush.h>
+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
+{
+ u64 val;
+
+ /*
+ * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
+ * most TLB operations target EL2/EL0. In order to affect the
+ * guest TLBs (EL1/EL0), we need to change one of these two
+ * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
+ * let's flip TGE before executing the TLB operation.
+ */
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ val = read_sysreg(hcr_el2);
+ val &= ~HCR_TGE;
+ write_sysreg(val, hcr_el2);
+ isb();
+}
+
+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
+{
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ isb();
+}
+
+static hyp_alternate_select(__tlb_switch_to_guest,
+ __tlb_switch_to_guest_nvhe,
+ __tlb_switch_to_guest_vhe,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
+{
+ /*
+ * We're done with the TLB operation, let's restore the host's
+ * view of HCR_EL2.
+ */
+ write_sysreg(0, vttbr_el2);
+ write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+}
+
+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
+{
+ write_sysreg(0, vttbr_el2);
+}
+
+static hyp_alternate_select(__tlb_switch_to_host,
+ __tlb_switch_to_host_nvhe,
+ __tlb_switch_to_host_vhe,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
- isb();
+ __tlb_switch_to_guest()(kvm);
/*
* We could do so much better if we had the VA as well.
@@ -46,7 +94,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
dsb(ish);
isb();
- write_sysreg(0, vttbr_el2);
+ __tlb_switch_to_host()(kvm);
}
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
@@ -55,14 +103,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
- isb();
+ __tlb_switch_to_guest()(kvm);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
- write_sysreg(0, vttbr_el2);
+ __tlb_switch_to_host()(kvm);
}
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
@@ -70,14 +117,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
/* Switch to requested VMID */
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
- isb();
+ __tlb_switch_to_guest()(kvm);
__tlbi(vmalle1);
dsb(nsh);
isb();
- write_sysreg(0, vttbr_el2);
+ __tlb_switch_to_host()(kvm);
}
void __hyp_text __kvm_flush_vm_context(void)
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 47184c3a97da..b24a830419ad 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -64,14 +64,14 @@
.endm
end .req x5
-ENTRY(__copy_in_user)
+ENTRY(raw_copy_in_user)
uaccess_enable_not_uao x3, x4
add end, x0, x2
#include "copy_template.S"
uaccess_disable_not_uao x3
mov x0, #0
ret
-ENDPROC(__copy_in_user)
+ENDPROC(raw_copy_in_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 55d1e9205543..687a358a3733 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -162,7 +162,7 @@ void __init kasan_init(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
- pfn_to_nid(virt_to_pfn(_text)));
+ pfn_to_nid(virt_to_pfn(lm_alias(_text))));
/*
* vmemmap_populate() has populated the shadow region that covers the
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 7e75d45e20cd..8c349f2a9ebb 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -19,6 +19,7 @@ config AVR32
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
select HAVE_NMI
+ select ARCH_HAS_RAW_COPY_USER
help
AVR32 is a high-performance 32-bit RISC microprocessor core,
designed for cost-sensitive embedded applications, with particular
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index 3d7ef2c17a7c..ff7d7393b25c 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -5,6 +5,7 @@ generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += exec.h
+generic-y += extable.h
generic-y += futex.h
generic-y += irq_regs.h
generic-y += irq_work.h
diff --git a/arch/avr32/include/asm/pgtable-2level.h b/arch/avr32/include/asm/pgtable-2level.h
index 425dd567b5b9..d5b1c63993ec 100644
--- a/arch/avr32/include/asm/pgtable-2level.h
+++ b/arch/avr32/include/asm/pgtable-2level.h
@@ -8,6 +8,7 @@
#ifndef __ASM_AVR32_PGTABLE_2LEVEL_H
#define __ASM_AVR32_PGTABLE_2LEVEL_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/*
diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h
index 7ca5cb33369b..619d50e843a5 100644
--- a/arch/avr32/include/asm/uaccess.h
+++ b/arch/avr32/include/asm/uaccess.h
@@ -66,34 +66,18 @@ static inline void set_fs(mm_segment_t s)
extern __kernel_size_t __copy_user(void *to, const void *from,
__kernel_size_t n);
-extern __kernel_size_t copy_to_user(void __user *to, const void *from,
- __kernel_size_t n);
-extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
- __kernel_size_t n);
-
-static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
- __kernel_size_t n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return __copy_user((void __force *)to, from, n);
}
-static inline __kernel_size_t __copy_from_user(void *to,
- const void __user *from,
- __kernel_size_t n)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return __copy_user(to, (const void __force *)from, n);
}
-static inline __kernel_size_t copy_from_user(void *to,
- const void __user *from,
- __kernel_size_t n)
-{
- size_t res = ___copy_from_user(to, from, n);
- if (unlikely(res))
- memset(to + (n - res), 0, res);
- return res;
-}
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
/*
* put_user: - Write a simple value into user space.
@@ -323,9 +307,6 @@ extern long __strnlen_user(const char __user *__s, long __n);
#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
+#include <asm/extable.h>
#endif /* __ASM_AVR32_UACCESS_H */
diff --git a/arch/avr32/kernel/avr32_ksyms.c b/arch/avr32/kernel/avr32_ksyms.c
index 0d05fd095468..0cdae8eb0220 100644
--- a/arch/avr32/kernel/avr32_ksyms.c
+++ b/arch/avr32/kernel/avr32_ksyms.c
@@ -36,8 +36,6 @@ EXPORT_SYMBOL(copy_page);
/*
* Userspace access stuff.
*/
-EXPORT_SYMBOL(___copy_from_user);
-EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user);
diff --git a/arch/avr32/lib/copy_user.S b/arch/avr32/lib/copy_user.S
index 075373471da1..f43275796a0c 100644
--- a/arch/avr32/lib/copy_user.S
+++ b/arch/avr32/lib/copy_user.S
@@ -23,21 +23,6 @@
*/
.text
.align 1
- .global ___copy_from_user
- .type ___copy_from_user, @function
-___copy_from_user:
- branch_if_kernel r8, __copy_user
- ret_if_privileged r8, r11, r10, r10
- rjmp __copy_user
- .size ___copy_from_user, . - ___copy_from_user
-
- .global copy_to_user
- .type copy_to_user, @function
-copy_to_user:
- branch_if_kernel r8, __copy_user
- ret_if_privileged r8, r12, r10, r10
- .size copy_to_user, . - copy_to_user
-
.global __copy_user
.type __copy_user, @function
__copy_user:
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 3c1bd640042a..919dad1436f7 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -41,6 +41,7 @@ config BLACKFIN
select MODULES_USE_ELF_RELA
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_NMI
+ select ARCH_HAS_RAW_COPY_USER
config GENERIC_CSUM
def_bool y
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 625db8ac815e..dc4ef9ac1e83 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += errno.h
+generic-y += extable.h
generic-y += fb.h
generic-y += futex.h
generic-y += hw_irq.h
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index c9fedc3be30c..f54a34f31cea 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -42,22 +42,7 @@ static inline int _access_ok(unsigned long addr, unsigned long size) { return 1;
extern int _access_ok(unsigned long addr, unsigned long size);
#endif
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry {
- unsigned long insn, fixup;
-};
+#include <asm/extable.h>
/*
* These are the main single-value transfer routines. They automatically
@@ -159,41 +144,23 @@ static inline int bad_user_access_length(void)
: "a" (__ptr(ptr))); \
})
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
static inline unsigned long __must_check
-__copy_from_user(void *to, const void __user *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
memcpy(to, (const void __force *)from, n);
return 0;
}
static inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
memcpy((void __force *)to, from, n);
SSYNC();
return 0;
}
-static inline unsigned long __must_check
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- if (likely(access_ok(VERIFY_READ, from, n)))
- return __copy_from_user(to, from, n);
- memset(to, 0, n);
- return n;
-}
-
-static inline unsigned long __must_check
-copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- if (likely(access_ok(VERIFY_WRITE, to, n)))
- return __copy_to_user(to, from, n);
- return n;
-}
-
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
/*
* Copy a null terminated string from userspace.
*/
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 5aa8ea8bad2d..3c7bd9a29f90 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -18,6 +18,7 @@ config C6X
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
select ARCH_NO_COHERENT_DMA_MMAP
+ select ARCH_HAS_RAW_COPY_USER
config MMU
def_bool n
diff --git a/arch/c6x/include/asm/uaccess.h b/arch/c6x/include/asm/uaccess.h
index 79fb6f439c5c..ba6756879f00 100644
--- a/arch/c6x/include/asm/uaccess.h
+++ b/arch/c6x/include/asm/uaccess.h
@@ -13,17 +13,11 @@
#include <linux/compiler.h>
#include <linux/string.h>
-#ifdef CONFIG_ACCESS_CHECK
-#define __access_ok _access_ok
-#endif
-
/*
- * __copy_from_user/copy_to_user are based on ones in asm-generic/uaccess.h
- *
* C6X supports unaligned 32 and 64 bit loads and stores.
*/
-static inline __must_check long __copy_from_user(void *to,
- const void __user *from, unsigned long n)
+static inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
u32 tmp32;
u64 tmp64;
@@ -58,8 +52,8 @@ static inline __must_check long __copy_from_user(void *to,
return 0;
}
-static inline __must_check long __copy_to_user(void __user *to,
- const void *from, unsigned long n)
+static inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
u32 tmp32;
u64 tmp64;
@@ -93,6 +87,8 @@ static inline __must_check long __copy_to_user(void __user *to,
memcpy((void __force *)to, from, n);
return 0;
}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
extern int _access_ok(unsigned long addr, unsigned long size);
#ifdef CONFIG_ACCESS_CHECK
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 71b758dc3a96..36f94c45e3f9 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -71,6 +71,7 @@ config CRIS
select GENERIC_SCHED_CLOCK if ETRAX_ARCH_V32
select HAVE_DEBUG_BUGVERBOSE if ETRAX_ARCH_V32
select HAVE_NMI
+ select ARCH_HAS_RAW_COPY_USER
config HZ
int
diff --git a/arch/cris/arch-v10/lib/usercopy.c b/arch/cris/arch-v10/lib/usercopy.c
index 1ba7cc000dfc..48fa37fe0f9b 100644
--- a/arch/cris/arch-v10/lib/usercopy.c
+++ b/arch/cris/arch-v10/lib/usercopy.c
@@ -188,11 +188,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
}
EXPORT_SYMBOL(__copy_user);
-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
- userland. The return-value is the number of bytes that were
+/* Copy from user to kernel. The return-value is the number of bytes that were
inaccessible. */
-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+unsigned long __copy_user_in(void *pdst, const void __user *psrc,
unsigned long pn)
{
/* We want the parameters put in special registers.
@@ -217,19 +216,17 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
{
__asm_copy_from_user_1 (dst, src, retn);
n--;
+ if (retn)
+ goto exception;
}
if (((unsigned long) src & 2) && n >= 2)
{
__asm_copy_from_user_2 (dst, src, retn);
n -= 2;
+ if (retn)
+ goto exception;
}
-
- /* We only need one check after the unalignment-adjustments, because
- if both adjustments were done, either both or neither reference
- had an exception. */
- if (retn != 0)
- goto copy_exception_bytes;
}
/* Decide which copying method to use. */
@@ -328,7 +325,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
n -= 4;
if (retn)
- goto copy_exception_bytes;
+ goto exception;
}
/* If we get here, there were no memory read faults. */
@@ -356,20 +353,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
bytes. */
return retn;
-copy_exception_bytes:
- /* We already have "retn" bytes cleared, and need to clear the
- remaining "n" bytes. A non-optimized simple byte-for-byte in-line
- memset is preferred here, since this isn't speed-critical code and
- we'd rather have this a leaf-function than calling memset. */
- {
- char *endp;
- for (endp = dst + n; dst < endp; dst++)
- *dst = 0;
- }
-
+exception:
return retn + n;
}
-EXPORT_SYMBOL(__copy_user_zeroing);
+EXPORT_SYMBOL(__copy_user_in);
/* Zero userspace. */
unsigned long __do_clear_user(void __user *pto, unsigned long pn)
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index ae6903d7fdbe..14970f11bbf2 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2086,7 +2086,7 @@ static void cryptocop_job_queue_close(void)
dma_in_cfg.en = regk_dma_no;
REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg);
- /* Disble the cryptocop. */
+ /* Disable the cryptocop. */
rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg);
rw_cfg.en = 0;
REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
diff --git a/arch/cris/arch-v32/lib/usercopy.c b/arch/cris/arch-v32/lib/usercopy.c
index 05e58dab800d..20b608026913 100644
--- a/arch/cris/arch-v32/lib/usercopy.c
+++ b/arch/cris/arch-v32/lib/usercopy.c
@@ -156,10 +156,9 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
}
EXPORT_SYMBOL(__copy_user);
-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
- userland. The return-value is the number of bytes that were
+/* Copy from user to kernel. The return-value is the number of bytes that were
inaccessible. */
-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+unsigned long __copy_user_in(void *pdst, const void __user *psrc,
unsigned long pn)
{
/* We want the parameters put in special registers.
@@ -184,19 +183,18 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
{
__asm_copy_from_user_1 (dst, src, retn);
n--;
+ if (retn != 0)
+ goto exception;
}
if (((unsigned long) src & 2) && n >= 2)
{
__asm_copy_from_user_2 (dst, src, retn);
n -= 2;
+ if (retn != 0)
+ goto exception;
}
- /* We only need one check after the unalignment-adjustments, because
- if both adjustments were done, either both or neither reference
- had an exception. */
- if (retn != 0)
- goto copy_exception_bytes;
}
/* Movem is dirt cheap. The overheap is low enough to always use the
@@ -279,7 +277,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
n -= 4;
if (retn)
- goto copy_exception_bytes;
+ goto exception;
}
/* If we get here, there were no memory read faults. */
@@ -307,20 +305,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
bytes. */
return retn;
-copy_exception_bytes:
- /* We already have "retn" bytes cleared, and need to clear the
- remaining "n" bytes. A non-optimized simple byte-for-byte in-line
- memset is preferred here, since this isn't speed-critical code and
- we'd rather have this a leaf-function than calling memset. */
- {
- char *endp;
- for (endp = dst + n; dst < endp; dst++)
- *dst = 0;
- }
-
+exception:
return retn + n;
}
-EXPORT_SYMBOL(__copy_user_zeroing);
+EXPORT_SYMBOL(__copy_user_in);
/* Zero userspace. */
unsigned long __do_clear_user(void __user *pto, unsigned long pn)
diff --git a/arch/cris/include/arch-v10/arch/uaccess.h b/arch/cris/include/arch-v10/arch/uaccess.h
index 65b02d9b605a..5477c98c2281 100644
--- a/arch/cris/include/arch-v10/arch/uaccess.h
+++ b/arch/cris/include/arch-v10/arch/uaccess.h
@@ -172,16 +172,14 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_user_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"2: move.b $r9,[%0+]\n", \
- "3: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "3: addq 1,%2\n", \
" .dword 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" move.w [%1+],$r9\n" \
"2: move.w $r9,[%0+]\n" COPY, \
- "3: addq 2,%2\n" \
- " clear.w [%0+]\n" FIXUP, \
+ "3: addq 2,%2\n" FIXUP, \
" .dword 2b,3b\n" TENTRY)
#define __asm_copy_from_user_2(to, from, ret) \
@@ -191,16 +189,14 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_2x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"4: move.b $r9,[%0+]\n", \
- "5: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "5: addq 1,%2\n", \
" .dword 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" move.d [%1+],$r9\n" \
"2: move.d $r9,[%0+]\n" COPY, \
- "3: addq 4,%2\n" \
- " clear.d [%0+]\n" FIXUP, \
+ "3: addq 4,%2\n" FIXUP, \
" .dword 2b,3b\n" TENTRY)
#define __asm_copy_from_user_4(to, from, ret) \
@@ -210,8 +206,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_4x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"4: move.b $r9,[%0+]\n", \
- "5: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "5: addq 1,%2\n", \
" .dword 4b,5b\n")
#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -219,7 +214,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.w [%1+],$r9\n" \
"4: move.w $r9,[%0+]\n" COPY, \
"5: addq 2,%2\n" \
- " clear.w [%0+]\n" FIXUP, \
+ FIXUP, \
" .dword 4b,5b\n" TENTRY)
#define __asm_copy_from_user_6(to, from, ret) \
@@ -229,8 +224,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_6x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"6: move.b $r9,[%0+]\n", \
- "7: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "7: addq 1,%2\n", \
" .dword 6b,7b\n")
#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -238,7 +232,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.d [%1+],$r9\n" \
"4: move.d $r9,[%0+]\n" COPY, \
"5: addq 4,%2\n" \
- " clear.d [%0+]\n" FIXUP, \
+ FIXUP, \
" .dword 4b,5b\n" TENTRY)
#define __asm_copy_from_user_8(to, from, ret) \
@@ -248,8 +242,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_8x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"6: move.b $r9,[%0+]\n", \
- "7: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "7: addq 1,%2\n", \
" .dword 6b,7b\n")
#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -257,7 +250,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.w [%1+],$r9\n" \
"6: move.w $r9,[%0+]\n" COPY, \
"7: addq 2,%2\n" \
- " clear.w [%0+]\n" FIXUP, \
+ FIXUP, \
" .dword 6b,7b\n" TENTRY)
#define __asm_copy_from_user_10(to, from, ret) \
@@ -267,8 +260,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_10x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"8: move.b $r9,[%0+]\n", \
- "9: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "9: addq 1,%2\n", \
" .dword 8b,9b\n")
#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -276,7 +268,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.d [%1+],$r9\n" \
"6: move.d $r9,[%0+]\n" COPY, \
"7: addq 4,%2\n" \
- " clear.d [%0+]\n" FIXUP, \
+ FIXUP, \
" .dword 6b,7b\n" TENTRY)
#define __asm_copy_from_user_12(to, from, ret) \
@@ -286,8 +278,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_12x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"8: move.b $r9,[%0+]\n", \
- "9: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "9: addq 1,%2\n", \
" .dword 8b,9b\n")
#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -295,7 +286,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.w [%1+],$r9\n" \
"8: move.w $r9,[%0+]\n" COPY, \
"9: addq 2,%2\n" \
- " clear.w [%0+]\n" FIXUP, \
+ FIXUP, \
" .dword 8b,9b\n" TENTRY)
#define __asm_copy_from_user_14(to, from, ret) \
@@ -305,8 +296,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_14x_cont(to, from, ret, \
" move.b [%1+],$r9\n" \
"10: move.b $r9,[%0+]\n", \
- "11: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "11: addq 1,%2\n", \
" .dword 10b,11b\n")
#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -314,7 +304,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.d [%1+],$r9\n" \
"8: move.d $r9,[%0+]\n" COPY, \
"9: addq 4,%2\n" \
- " clear.d [%0+]\n" FIXUP, \
+ FIXUP, \
" .dword 8b,9b\n" TENTRY)
#define __asm_copy_from_user_16(to, from, ret) \
@@ -325,7 +315,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.d [%1+],$r9\n" \
"10: move.d $r9,[%0+]\n" COPY, \
"11: addq 4,%2\n" \
- " clear.d [%0+]\n" FIXUP, \
+ FIXUP, \
" .dword 10b,11b\n" TENTRY)
#define __asm_copy_from_user_20(to, from, ret) \
@@ -336,7 +326,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.d [%1+],$r9\n" \
"12: move.d $r9,[%0+]\n" COPY, \
"13: addq 4,%2\n" \
- " clear.d [%0+]\n" FIXUP, \
+ FIXUP, \
" .dword 12b,13b\n" TENTRY)
#define __asm_copy_from_user_24(to, from, ret) \
diff --git a/arch/cris/include/arch-v32/arch/uaccess.h b/arch/cris/include/arch-v32/arch/uaccess.h
index 3196019706cb..dc2ce090f624 100644
--- a/arch/cris/include/arch-v32/arch/uaccess.h
+++ b/arch/cris/include/arch-v32/arch/uaccess.h
@@ -178,8 +178,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"2: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \
"3: addq 1,%2\n" \
- " jump 1b\n" \
- " clear.b [%0+]\n", \
+ " jump 1b\n", \
" .dword 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -189,8 +188,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.w $acr,[%0+]\n", \
FIXUP \
"3: addq 2,%2\n" \
- " jump 1b\n" \
- " clear.w [%0+]\n", \
+ " jump 1b\n", \
TENTRY \
" .dword 2b,3b\n")
@@ -201,8 +199,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_2x_cont(to, from, ret, \
"4: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \
- "5: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "5: addq 1,%2\n", \
" .dword 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -212,8 +209,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
" move.d $acr,[%0+]\n", \
FIXUP \
"3: addq 4,%2\n" \
- " jump 1b\n" \
- " clear.d [%0+]\n", \
+ " jump 1b\n", \
TENTRY \
" .dword 2b,3b\n")
@@ -224,8 +220,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_4x_cont(to, from, ret, \
"4: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \
- "5: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "5: addq 1,%2\n", \
" .dword 4b,5b\n")
#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -234,8 +229,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"4: move.w [%1+],$acr\n" \
" move.w $acr,[%0+]\n", \
FIXUP \
- "5: addq 2,%2\n" \
- " clear.w [%0+]\n", \
+ "5: addq 2,%2\n", \
TENTRY \
" .dword 4b,5b\n")
@@ -246,8 +240,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_6x_cont(to, from, ret, \
"6: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \
- "7: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "7: addq 1,%2\n", \
" .dword 6b,7b\n")
#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -256,8 +249,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"4: move.d [%1+],$acr\n" \
" move.d $acr,[%0+]\n", \
FIXUP \
- "5: addq 4,%2\n" \
- " clear.d [%0+]\n", \
+ "5: addq 4,%2\n", \
TENTRY \
" .dword 4b,5b\n")
@@ -268,8 +260,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_8x_cont(to, from, ret, \
"6: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \
- "7: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "7: addq 1,%2\n", \
" .dword 6b,7b\n")
#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -278,8 +269,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"6: move.w [%1+],$acr\n" \
" move.w $acr,[%0+]\n", \
FIXUP \
- "7: addq 2,%2\n" \
- " clear.w [%0+]\n", \
+ "7: addq 2,%2\n", \
TENTRY \
" .dword 6b,7b\n")
@@ -290,8 +280,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_10x_cont(to, from, ret, \
"8: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \
- "9: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "9: addq 1,%2\n", \
" .dword 8b,9b\n")
#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -300,8 +289,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"6: move.d [%1+],$acr\n" \
" move.d $acr,[%0+]\n", \
FIXUP \
- "7: addq 4,%2\n" \
- " clear.d [%0+]\n", \
+ "7: addq 4,%2\n", \
TENTRY \
" .dword 6b,7b\n")
@@ -312,8 +300,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_12x_cont(to, from, ret, \
"8: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \
- "9: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "9: addq 1,%2\n", \
" .dword 8b,9b\n")
#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -322,8 +309,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"8: move.w [%1+],$acr\n" \
" move.w $acr,[%0+]\n", \
FIXUP \
- "9: addq 2,%2\n" \
- " clear.w [%0+]\n", \
+ "9: addq 2,%2\n", \
TENTRY \
" .dword 8b,9b\n")
@@ -334,8 +320,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
__asm_copy_from_user_14x_cont(to, from, ret, \
"10: move.b [%1+],$acr\n" \
" move.b $acr,[%0+]\n", \
- "11: addq 1,%2\n" \
- " clear.b [%0+]\n", \
+ "11: addq 1,%2\n", \
" .dword 10b,11b\n")
#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
@@ -344,8 +329,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"8: move.d [%1+],$acr\n" \
" move.d $acr,[%0+]\n", \
FIXUP \
- "9: addq 4,%2\n" \
- " clear.d [%0+]\n", \
+ "9: addq 4,%2\n", \
TENTRY \
" .dword 8b,9b\n")
@@ -358,8 +342,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"10: move.d [%1+],$acr\n" \
" move.d $acr,[%0+]\n", \
FIXUP \
- "11: addq 4,%2\n" \
- " clear.d [%0+]\n", \
+ "11: addq 4,%2\n", \
TENTRY \
" .dword 10b,11b\n")
@@ -372,8 +355,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count)
"12: move.d [%1+],$acr\n" \
" move.d $acr,[%0+]\n", \
FIXUP \
- "13: addq 4,%2\n" \
- " clear.d [%0+]\n", \
+ "13: addq 4,%2\n", \
TENTRY \
" .dword 12b,13b\n")
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index 0f5132b08896..2890099992a9 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -9,6 +9,7 @@ generic-y += device.h
generic-y += div64.h
generic-y += errno.h
generic-y += exec.h
+generic-y += extable.h
generic-y += emergency-restart.h
generic-y += fcntl.h
generic-y += futex.h
diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h
index 2a3210ba4c72..fa3a73004cc5 100644
--- a/arch/cris/include/asm/pgtable.h
+++ b/arch/cris/include/asm/pgtable.h
@@ -6,6 +6,7 @@
#define _CRIS_PGTABLE_H
#include <asm/page.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__
diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h
index 5f5b8f53d2d7..0d473aec3066 100644
--- a/arch/cris/include/asm/uaccess.h
+++ b/arch/cris/include/asm/uaccess.h
@@ -50,23 +50,7 @@
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
#include <arch/uaccess.h>
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry {
- unsigned long insn, fixup;
-};
+#include <asm/extable.h>
/*
* These are the main single-value transfer routines. They automatically
@@ -185,7 +169,7 @@ extern long __get_user_bad(void);
live in lib/usercopy.c */
extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
+extern unsigned long __copy_user_in(void *to, const void __user *from, unsigned long n);
extern unsigned long __do_clear_user(void __user *to, unsigned long n);
static inline long
@@ -252,7 +236,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
else if (n == 24)
__asm_copy_from_user_24(to, from, ret);
else
- ret = __copy_user_zeroing(to, from, n);
+ ret = __copy_user_in(to, from, n);
return ret;
}
@@ -352,62 +336,33 @@ static inline size_t clear_user(void __user *to, size_t n)
return __do_clear_user(to, n);
}
-static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (unlikely(!access_ok(VERIFY_READ, from, n))) {
- memset(to, 0, n);
- return n;
- }
if (__builtin_constant_p(n))
return __constant_copy_from_user(to, from, n);
else
- return __copy_user_zeroing(to, from, n);
+ return __copy_user_in(to, from, n);
}
-static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
- return n;
if (__builtin_constant_p(n))
return __constant_copy_to_user(to, from, n);
else
return __copy_user(to, from, n);
}
-/* We let the __ versions of copy_from/to_user inline, because they're often
- * used in fast paths and have only a small space overhead.
- */
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
static inline unsigned long
-__generic_copy_from_user_nocheck(void *to, const void __user *from,
- unsigned long n)
-{
- return __copy_user_zeroing(to, from, n);
-}
-
-static inline unsigned long
-__generic_copy_to_user_nocheck(void __user *to, const void *from,
- unsigned long n)
-{
- return __copy_user(to, from, n);
-}
-
-static inline unsigned long
-__generic_clear_user_nocheck(void __user *to, unsigned long n)
+__clear_user(void __user *to, unsigned long n)
{
return __do_clear_user(to, n);
}
-/* without checking */
-
-#define __copy_to_user(to, from, n) \
- __generic_copy_to_user_nocheck((to), (from), (n))
-#define __copy_from_user(to, from, n) \
- __generic_copy_from_user_nocheck((to), (from), (n))
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-#define __clear_user(to, n) __generic_clear_user_nocheck((to), (n))
-
#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
#endif /* _CRIS_UACCESS_H */
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index eefd9a4ed156..e489fef111dd 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -16,6 +16,7 @@ config FRV
select OLD_SIGACTION
select HAVE_DEBUG_STACKOVERFLOW
select ARCH_NO_COHERENT_DMA_MMAP
+ select ARCH_HAS_RAW_COPY_USER
config ZONE_DMA
bool
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index c33b46715f65..cce3bc3603ea 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -1,6 +1,7 @@
generic-y += clkdev.h
generic-y += exec.h
+generic-y += extable.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h
index a0513d463a1f..ab6e7e961b54 100644
--- a/arch/frv/include/asm/pgtable.h
+++ b/arch/frv/include/asm/pgtable.h
@@ -16,6 +16,7 @@
#ifndef _ASM_PGTABLE_H
#define _ASM_PGTABLE_H
+#include <asm-generic/5level-fixup.h>
#include <asm/mem-layout.h>
#include <asm/setup.h>
#include <asm/processor.h>
diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h
index 55b3a69c6c53..e4e33b4cd3ae 100644
--- a/arch/frv/include/asm/uaccess.h
+++ b/arch/frv/include/asm/uaccess.h
@@ -18,6 +18,7 @@
#include <linux/mm.h>
#include <asm/segment.h>
#include <asm/sections.h>
+#include <asm/extable.h>
#define __ptr(x) ((unsigned long __force *)(x))
@@ -59,26 +60,6 @@ static inline int ___range_ok(unsigned long addr, unsigned long size)
#define access_ok(type,addr,size) (__range_ok((void __user *)(addr), (size)) == 0)
#define __access_ok(addr,size) (__range_ok((addr), (size)) == 0)
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-/* Returns 0 if exception not found and fixup otherwise. */
-extern unsigned long search_exception_table(unsigned long);
-
/*
* These are the main single-value transfer routines. They automatically
@@ -252,61 +233,50 @@ do { \
/*
*
*/
+
#define ____force(x) (__force void *)(void __user *)(x)
#ifdef CONFIG_MMU
extern long __memset_user(void *dst, unsigned long count);
extern long __memcpy_user(void *dst, const void *src, unsigned long count);
#define __clear_user(dst,count) __memset_user(____force(dst), (count))
-#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
-#define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n))
#else
#define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
-#define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
-#define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0)
#endif
-static inline unsigned long __must_check
-clear_user(void __user *to, unsigned long n)
-{
- if (likely(__access_ok(to, n)))
- n = __clear_user(to, n);
- return n;
-}
-
-static inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- might_fault();
- return __copy_to_user_inatomic(to, from, n);
-}
-
static inline unsigned long
-__copy_from_user(void *to, const void __user *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- might_fault();
- return __copy_from_user_inatomic(to, from, n);
+#ifdef CONFIG_MMU
+ return __memcpy_user(to, (__force const void *)from, n);
+#else
+ memcpy(to, (__force const void *)from, n);
+ return 0;
+#endif
}
-static inline long copy_from_user(void *to, const void __user *from, unsigned long n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- unsigned long ret = n;
-
- if (likely(__access_ok(from, n)))
- ret = __copy_from_user(to, from, n);
-
- if (unlikely(ret != 0))
- memset(to + (n - ret), 0, ret);
-
- return ret;
+#ifdef CONFIG_MMU
+ return __memcpy_user((__force void *)to, from, n);
+#else
+ memcpy((__force void *)to, from, n);
+ return 0;
+#endif
}
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
-static inline long copy_to_user(void __user *to, const void *from, unsigned long n)
+static inline unsigned long __must_check
+clear_user(void __user *to, unsigned long n)
{
- return likely(__access_ok(to, n)) ? __copy_to_user(to, from, n) : n;
+ if (likely(__access_ok(to, n)))
+ n = __clear_user(to, n);
+ return n;
}
extern long strncpy_from_user(char *dst, const char __user *src, long count);
@@ -314,6 +284,4 @@ extern long strnlen_user(const char __user *src, long count);
#define strlen_user(str) strnlen_user(str, 32767)
-extern unsigned long search_exception_table(unsigned long addr);
-
#endif /* _ASM_UACCESS_H */
diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c
index ce29991e4219..fb08ebe0dab4 100644
--- a/arch/frv/kernel/traps.c
+++ b/arch/frv/kernel/traps.c
@@ -360,13 +360,8 @@ asmlinkage void memory_access_exception(unsigned long esr0,
siginfo_t info;
#ifdef CONFIG_MMU
- unsigned long fixup;
-
- fixup = search_exception_table(__frame->pc);
- if (fixup) {
- __frame->pc = fixup;
+ if (fixup_exception(__frame))
return;
- }
#endif
die_if_kernel("-- Memory Access Exception --\n"
diff --git a/arch/frv/mm/extable.c b/arch/frv/mm/extable.c
index a0e8b3e03e4c..9198ddd16092 100644
--- a/arch/frv/mm/extable.c
+++ b/arch/frv/mm/extable.c
@@ -10,40 +10,39 @@ extern const void __memset_end, __memset_user_error_lr, __memset_user_error_hand
extern const void __memcpy_end, __memcpy_user_error_lr, __memcpy_user_error_handler;
extern spinlock_t modlist_lock;
-
-/*****************************************************************************/
-/*
- * see if there's a fixup handler available to deal with a kernel fault
- */
-unsigned long search_exception_table(unsigned long pc)
+int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *extab;
+ unsigned long pc = regs->pc;
/* determine if the fault lay during a memcpy_user or a memset_user */
- if (__frame->lr == (unsigned long) &__memset_user_error_lr &&
+ if (regs->lr == (unsigned long) &__memset_user_error_lr &&
(unsigned long) &memset <= pc && pc < (unsigned long) &__memset_end
) {
/* the fault occurred in a protected memset
* - we search for the return address (in LR) instead of the program counter
* - it was probably during a clear_user()
*/
- return (unsigned long) &__memset_user_error_handler;
+ regs->pc = (unsigned long) &__memset_user_error_handler;
+ return 1;
}
- if (__frame->lr == (unsigned long) &__memcpy_user_error_lr &&
+ if (regs->lr == (unsigned long) &__memcpy_user_error_lr &&
(unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end
) {
/* the fault occurred in a protected memset
* - we search for the return address (in LR) instead of the program counter
* - it was probably during a copy_to/from_user()
*/
- return (unsigned long) &__memcpy_user_error_handler;
+ regs->pc = (unsigned long) &__memcpy_user_error_handler;
+ return 1;
}
extab = search_exception_tables(pc);
- if (extab)
- return extab->fixup;
+ if (extab) {
+ regs->pc = extab->fixup;
+ return 1;
+ }
return 0;
-
-} /* end search_exception_table() */
+}
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 614a46c413d2..179e79e220e5 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -33,7 +33,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
{
struct vm_area_struct *vma;
struct mm_struct *mm;
- unsigned long _pme, lrai, lrad, fixup;
+ unsigned long _pme, lrai, lrad;
unsigned long flags = 0;
siginfo_t info;
pgd_t *pge;
@@ -201,10 +201,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
no_context:
/* are we prepared to handle this kernel fault? */
- if ((fixup = search_exception_table(__frame->pc)) != 0) {
- __frame->pc = fixup;
+ if (fixup_exception(__frame))
return;
- }
/*
* Oops. The kernel tried to access some bad page. We'll have to
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 3ae852507e57..473883417004 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -22,6 +22,7 @@ config H8300
select HAVE_ARCH_KGDB
select HAVE_ARCH_HASH
select CPU_NO_EFFICIENT_FFS
+ select ARCH_HAS_RAW_COPY_USER
config RWSEM_GENERIC_SPINLOCK
def_bool y
diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h
index 8341db67821d..7d265d28ba5e 100644
--- a/arch/h8300/include/asm/pgtable.h
+++ b/arch/h8300/include/asm/pgtable.h
@@ -1,5 +1,6 @@
#ifndef _H8300_PGTABLE_H
#define _H8300_PGTABLE_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
#include <asm-generic/pgtable.h>
#define pgtable_cache_init() do { } while (0)
diff --git a/arch/h8300/include/asm/uaccess.h b/arch/h8300/include/asm/uaccess.h
index c156a416a279..6f6144a240ce 100644
--- a/arch/h8300/include/asm/uaccess.h
+++ b/arch/h8300/include/asm/uaccess.h
@@ -3,8 +3,8 @@
#include <linux/string.h>
-static inline __must_check long __copy_from_user(void *to,
- const void __user * from, unsigned long n)
+static inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user * from, unsigned long n)
{
if (__builtin_constant_p(n)) {
switch(n) {
@@ -24,8 +24,8 @@ static inline __must_check long __copy_from_user(void *to,
return 0;
}
-static inline __must_check long __copy_to_user(void __user *to,
- const void *from, unsigned long n)
+static inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
switch(n) {
@@ -46,6 +46,8 @@ static inline __must_check long __copy_to_user(void __user *to,
memcpy((void __force *)to, from, n);
return 0;
}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
#include <asm-generic/uaccess.h>
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 1941e4baaee6..0c536a83ea71 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -26,6 +26,7 @@ config HEXAGON
select GENERIC_CLOCKEVENTS_BROADCAST
select MODULES_USE_ELF_RELA
select GENERIC_CPU_DEVICES
+ select ARCH_HAS_RAW_COPY_USER
---help---
Qualcomm Hexagon is a processor architecture designed for high
performance and low power across a wide variety of applications.
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index 49eab8136ec3..24a9177fb897 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -26,6 +26,7 @@
*/
#include <linux/swap.h>
#include <asm/page.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/* A handy thing to have if one has the RAM. Declared in head.S */
diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
index 3a7f818e5ef7..458b69886b34 100644
--- a/arch/hexagon/include/asm/uaccess.h
+++ b/arch/hexagon/include/asm/uaccess.h
@@ -65,19 +65,12 @@
*/
/* Assembly somewhat optimized copy routines */
-unsigned long __copy_from_user_hexagon(void *to, const void __user *from,
+unsigned long raw_copy_from_user(void *to, const void __user *from,
unsigned long n);
-unsigned long __copy_to_user_hexagon(void __user *to, const void *from,
+unsigned long raw_copy_to_user(void __user *to, const void *from,
unsigned long n);
-
-#define __copy_from_user(to, from, n) __copy_from_user_hexagon(to, from, n)
-#define __copy_to_user(to, from, n) __copy_to_user_hexagon(to, from, n)
-
-/*
- * XXX todo: some additonal performance gain is possible by
- * implementing __copy_to/from_user_inatomic, which is much
- * like __copy_to/from_user, but performs slightly less checking.
- */
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
#define __clear_user(a, s) __clear_user_hexagon((a), (s))
@@ -104,10 +97,14 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
return -EFAULT;
if (res > n) {
- copy_from_user(dst, src, n);
+ long left = raw_copy_from_user(dst, src, n);
+ if (unlikely(left))
+ memset(dst + (n - left), 0, left);
return n;
} else {
- copy_from_user(dst, src, res);
+ long left = raw_copy_from_user(dst, src, res);
+ if (unlikely(left))
+ memset(dst + (res - left), 0, left);
return res-1;
}
}
diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c
index af9dec4c28eb..00bcad9cbd8f 100644
--- a/arch/hexagon/kernel/hexagon_ksyms.c
+++ b/arch/hexagon/kernel/hexagon_ksyms.c
@@ -25,8 +25,8 @@
/* Additional functions */
EXPORT_SYMBOL(__clear_user_hexagon);
-EXPORT_SYMBOL(__copy_from_user_hexagon);
-EXPORT_SYMBOL(__copy_to_user_hexagon);
+EXPORT_SYMBOL(raw_copy_from_user);
+EXPORT_SYMBOL(raw_copy_to_user);
EXPORT_SYMBOL(__iounmap);
EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__vmgetie);
diff --git a/arch/hexagon/mm/copy_from_user.S b/arch/hexagon/mm/copy_from_user.S
index 7fc94f3e6642..7da066fbd16f 100644
--- a/arch/hexagon/mm/copy_from_user.S
+++ b/arch/hexagon/mm/copy_from_user.S
@@ -44,7 +44,7 @@
#define bytes r2
#define loopcount r5
-#define FUNCNAME __copy_from_user_hexagon
+#define FUNCNAME raw_copy_from_user
#include "copy_user_template.S"
/* LOAD FAULTS from COPY_FROM_USER */
diff --git a/arch/hexagon/mm/copy_to_user.S b/arch/hexagon/mm/copy_to_user.S
index 0cfbcc09d1d9..a7b7f8db21df 100644
--- a/arch/hexagon/mm/copy_to_user.S
+++ b/arch/hexagon/mm/copy_to_user.S
@@ -43,7 +43,7 @@
#define bytes r2
#define loopcount r5
-#define FUNCNAME __copy_to_user_hexagon
+#define FUNCNAME raw_copy_to_user
#include "copy_user_template.S"
/* STORE FAULTS from COPY_TO_USER */
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 384794e665fc..6cc22c8d8923 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -587,8 +587,10 @@ extern struct page *zero_page_memmap_ptr;
#if CONFIG_PGTABLE_LEVELS == 3
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
#endif
+#include <asm-generic/5level-fixup.h>
#include <asm-generic/pgtable.h>
#endif /* _ASM_IA64_PGTABLE_H */
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 95474460b367..b3e82bdd6db0 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -19,6 +19,7 @@ config M32R
select HAVE_DEBUG_STACKOVERFLOW
select CPU_NO_EFFICIENT_FFS
select DMA_NOOP_OPS
+ select ARCH_HAS_RAW_COPY_USER
config SBUS
bool
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index deb298777df2..c000ffac8586 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -2,6 +2,7 @@
generic-y += clkdev.h
generic-y += current.h
generic-y += exec.h
+generic-y += extable.h
generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
index 96b0efdb5f22..07be349c00ad 100644
--- a/arch/m32r/include/asm/uaccess.h
+++ b/arch/m32r/include/asm/uaccess.h
@@ -13,6 +13,7 @@
*/
#include <asm/page.h>
#include <asm/setup.h>
+#include <linux/prefetch.h>
/*
* The fs value determines whether argument validity checking should be
@@ -109,25 +110,7 @@ static inline int access_ok(int type, const void *addr, unsigned long size)
}
#endif /* CONFIG_MMU */
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-extern int fixup_exception(struct pt_regs *regs);
+#include <asm/extable.h>
/*
* These are the main single-value transfer routines. They automatically
@@ -478,174 +461,25 @@ do { \
: "r14", "memory"); \
} while (0)
-#define __copy_user_zeroing(to, from, size) \
-do { \
- unsigned long __dst, __src, __c; \
- __asm__ __volatile__ ( \
- " mv r14, %0\n" \
- " or r14, %1\n" \
- " beq %0, %1, 9f\n" \
- " beqz %2, 9f\n" \
- " and3 r14, r14, #3\n" \
- " bnez r14, 2f\n" \
- " and3 %2, %2, #3\n" \
- " beqz %3, 2f\n" \
- " addi %0, #-4 ; word_copy \n" \
- " .fillinsn\n" \
- "0: ld r14, @%1+\n" \
- " addi %3, #-1\n" \
- " .fillinsn\n" \
- "1: st r14, @+%0\n" \
- " bnez %3, 0b\n" \
- " beqz %2, 9f\n" \
- " addi %0, #4\n" \
- " .fillinsn\n" \
- "2: ldb r14, @%1 ; byte_copy \n" \
- " .fillinsn\n" \
- "3: stb r14, @%0\n" \
- " addi %1, #1\n" \
- " addi %2, #-1\n" \
- " addi %0, #1\n" \
- " bnez %2, 2b\n" \
- " .fillinsn\n" \
- "9:\n" \
- ".section .fixup,\"ax\"\n" \
- " .balign 4\n" \
- "5: addi %3, #1\n" \
- " addi %1, #-4\n" \
- " .fillinsn\n" \
- "6: slli %3, #2\n" \
- " add %2, %3\n" \
- " addi %0, #4\n" \
- " .fillinsn\n" \
- "7: ldi r14, #0 ; store zero \n" \
- " .fillinsn\n" \
- "8: addi %2, #-1\n" \
- " stb r14, @%0 ; ACE? \n" \
- " addi %0, #1\n" \
- " bnez %2, 8b\n" \
- " seth r14, #high(9b)\n" \
- " or3 r14, r14, #low(9b)\n" \
- " jmp r14\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .balign 4\n" \
- " .long 0b,6b\n" \
- " .long 1b,5b\n" \
- " .long 2b,7b\n" \
- " .long 3b,7b\n" \
- ".previous\n" \
- : "=&r" (__dst), "=&r" (__src), "=&r" (size), \
- "=&r" (__c) \
- : "0" (to), "1" (from), "2" (size), "3" (size / 4) \
- : "r14", "memory"); \
-} while (0)
-
-
/* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead.
*/
-static inline unsigned long __generic_copy_from_user_nocheck(void *to,
- const void __user *from, unsigned long n)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- __copy_user_zeroing(to, from, n);
+ prefetchw(to);
+ __copy_user(to, from, n);
return n;
}
-static inline unsigned long __generic_copy_to_user_nocheck(void __user *to,
- const void *from, unsigned long n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
+ prefetch(from);
__copy_user(to, from, n);
return n;
}
-unsigned long __generic_copy_to_user(void __user *, const void *, unsigned long);
-unsigned long __generic_copy_from_user(void *, const void __user *, unsigned long);
-
-/**
- * __copy_to_user: - Copy a block of data into user space, with less checking.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-#define __copy_to_user(to, from, n) \
- __generic_copy_to_user_nocheck((to), (from), (n))
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
-/**
- * copy_to_user: - Copy a block of data into user space.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-#define copy_to_user(to, from, n) \
-({ \
- might_fault(); \
- __generic_copy_to_user((to), (from), (n)); \
-})
-
-/**
- * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-#define __copy_from_user(to, from, n) \
- __generic_copy_from_user_nocheck((to), (from), (n))
-
-/**
- * copy_from_user: - Copy a block of data from user space.
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-#define copy_from_user(to, from, n) \
-({ \
- might_fault(); \
- __generic_copy_from_user((to), (from), (n)); \
-})
-
long __must_check strncpy_from_user(char *dst, const char __user *src,
long count);
long __must_check __strncpy_from_user(char *dst,
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
index d763f0bd2106..a4d43b5cc102 100644
--- a/arch/m32r/kernel/m32r_ksyms.c
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -26,8 +26,6 @@ EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__generic_copy_from_user);
-EXPORT_SYMBOL(__generic_copy_to_user);
EXPORT_SYMBOL(strnlen_user);
#ifdef CONFIG_SMP
diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
index fd03f2731f20..b3ef2c899f96 100644
--- a/arch/m32r/lib/usercopy.c
+++ b/arch/m32r/lib/usercopy.c
@@ -11,27 +11,6 @@
#include <linux/thread_info.h>
#include <linux/uaccess.h>
-unsigned long
-__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- prefetch(from);
- if (access_ok(VERIFY_WRITE, to, n))
- __copy_user(to,from,n);
- return n;
-}
-
-unsigned long
-__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- prefetchw(to);
- if (access_ok(VERIFY_READ, from, n))
- __copy_user_zeroing(to,from,n);
- else
- memset(to, 0, n);
- return n;
-}
-
-
/*
* Copy a null terminated string from userspace.
*/
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index d140206d5d29..7d345758ea16 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -22,6 +22,7 @@ config M68K
select MODULES_USE_ELF_RELA
select OLD_SIGSUSPEND3
select OLD_SIGACTION
+ select ARCH_HAS_RAW_COPY_USER
config RWSEM_GENERIC_SPINLOCK
bool
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 048bf076f7df..531cb9eb3319 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -60,6 +61,7 @@ CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -71,6 +73,7 @@ CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -101,6 +104,7 @@ CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -371,6 +377,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -383,6 +390,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_A2065=y
CONFIG_ARIADNE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -404,7 +412,6 @@ CONFIG_ZORRO8390=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -564,6 +571,8 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -594,6 +603,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -605,6 +615,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -629,4 +640,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index d4de24963f5f..ca91d39555da 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -362,6 +369,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -378,7 +386,6 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -523,6 +530,8 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -553,6 +562,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -564,6 +574,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -588,4 +599,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index fc0fd3f871f3..23a3d8a691e2 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -362,6 +368,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -372,6 +379,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_ATARILANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -389,7 +397,6 @@ CONFIG_NE2000=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -544,6 +551,8 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -574,6 +583,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -585,6 +595,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -609,4 +620,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 52e984a0aa69..95deb95140fe 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_BVME6000_NET=y
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index aaeed4422cc9..afae6958db2d 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -69,6 +71,7 @@ CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -363,6 +370,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_HPLANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -379,7 +387,6 @@ CONFIG_HPLANCE=y
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -525,6 +532,8 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -555,6 +564,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -566,6 +576,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -590,4 +601,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 3bbc9b2f0dac..b010734729a7 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -57,6 +58,7 @@ CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -68,6 +70,7 @@ CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -98,6 +101,7 @@ CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -369,6 +375,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -379,6 +386,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_MACMACE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -398,7 +406,6 @@ CONFIG_MAC8390=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -547,6 +554,8 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -577,6 +586,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -588,6 +598,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -612,4 +623,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 8f2c0decb2f8..0e414549b235 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -21,6 +21,7 @@ CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -67,6 +68,7 @@ CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -78,6 +80,7 @@ CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -108,6 +111,7 @@ CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -308,6 +312,8 @@ CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -402,6 +408,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -419,6 +426,7 @@ CONFIG_HPLANCE=y
CONFIG_MVME147_NET=y
CONFIG_SUN3LANCE=y
CONFIG_MACMACE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -444,7 +452,6 @@ CONFIG_ZORRO8390=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PLIP=m
@@ -627,6 +634,8 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -657,6 +666,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -668,6 +678,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -692,4 +703,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index c743dd22e96f..b2e687a0ec3d 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68030=y
@@ -55,6 +56,7 @@ CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -66,6 +68,7 @@ CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -96,6 +99,7 @@ CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -293,6 +297,8 @@ CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -351,6 +357,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_MVME147_NET=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_MVME147_NET=y
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 2ccaca858f05..cbd8ee24d1bc 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -377,7 +385,6 @@ CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 5599f3fd5fcd..1e82cc944339 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
@@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -67,6 +69,7 @@ CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -358,6 +364,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -369,6 +376,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -388,7 +396,6 @@ CONFIG_NE2000=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PLIP=m
@@ -538,6 +545,8 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -568,6 +577,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -579,6 +589,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -603,4 +614,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 313bf0a562ad..f9e77f57a972 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_SUN3=y
@@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -64,6 +66,7 @@ CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -359,6 +366,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_SUN3LANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -375,7 +383,6 @@ CONFIG_SUN3_82586=y
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -546,6 +555,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -557,6 +567,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -581,4 +592,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 38b61365f769..3c394fcfb368 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
# CONFIG_EFI_PARTITION is not set
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_SUN3X=y
@@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m
CONFIG_NET_FOU_IP_TUNNELS=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
+CONFIG_INET_ESP_OFFLOAD=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -64,6 +66,7 @@ CONFIG_IPV6=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
+CONFIG_INET6_ESP_OFFLOAD=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_ILA=m
CONFIG_IPV6_VTI=m
@@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m
CONFIG_NFT_CT=m
CONFIG_NFT_SET_RBTREE=m
CONFIG_NFT_SET_HASH=m
+CONFIG_NFT_SET_BITMAP=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
@@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m
CONFIG_NET_L3_MASTER_DEV=y
CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
+CONFIG_PSAMPLE=m
+CONFIG_NET_IFE=m
CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
@@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_IPVLAN=m
+CONFIG_IPVTAP=m
CONFIG_VXLAN=m
CONFIG_GENEVE=m
CONFIG_GTP=m
@@ -359,6 +366,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_SUN3LANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -375,7 +383,6 @@ CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
@@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
CONFIG_TEST_STRING_HELPERS=m
@@ -547,6 +556,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
@@ -558,6 +568,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -582,4 +593,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index d4f9ccbfa85c..82005d2ff717 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -5,6 +5,7 @@ generic-y += device.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += extable.h
generic-y += futex.h
generic-y += hw_irq.h
generic-y += ioctl.h
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index b4a9b0d5928d..dda58cfe8c22 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -148,7 +148,7 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
-static inline int test_bit(int nr, const unsigned long *vaddr)
+static inline int test_bit(int nr, const volatile unsigned long *vaddr)
{
return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
}
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index f5f790c31bf8..77239e81379b 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -122,16 +122,6 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc,
wrusp(usp);
}
-#ifdef CONFIG_MMU
-extern int handle_kernel_fault(struct pt_regs *regs);
-#else
-static inline int handle_kernel_fault(struct pt_regs *regs)
-{
- /* Any fault in kernel is fatal on non-mmu */
- return 0;
-}
-#endif
-
/* Forward declaration, a strange C thing */
struct task_struct;
diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h
index 3fadc4a93d97..67b3481d6020 100644
--- a/arch/m68k/include/asm/uaccess.h
+++ b/arch/m68k/include/asm/uaccess.h
@@ -4,6 +4,7 @@
#include <asm/uaccess_mm.h>
#endif
+#include <asm/extable.h>
#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
#include <asm-generic/uaccess-unaligned.h>
#else
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
index 14054a4e4216..ef856ffeffdf 100644
--- a/arch/m68k/include/asm/uaccess_mm.h
+++ b/arch/m68k/include/asm/uaccess_mm.h
@@ -31,24 +31,6 @@ static inline int access_ok(int type, const void __user *addr,
#define MOVES "move"
#endif
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
extern int __put_user_bad(void);
extern int __get_user_bad(void);
@@ -197,39 +179,55 @@ asm volatile ("\n" \
unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
-#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
+#define __suffix0
+#define __suffix1 b
+#define __suffix2 w
+#define __suffix4 l
+
+#define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
asm volatile ("\n" \
"1: "MOVES"."#s1" (%2)+,%3\n" \
" move."#s1" %3,(%1)+\n" \
+ " .ifnc \""#s2"\",\"\"\n" \
"2: "MOVES"."#s2" (%2)+,%3\n" \
" move."#s2" %3,(%1)+\n" \
" .ifnc \""#s3"\",\"\"\n" \
"3: "MOVES"."#s3" (%2)+,%3\n" \
" move."#s3" %3,(%1)+\n" \
" .endif\n" \
+ " .endif\n" \
"4:\n" \
" .section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 1b,10f\n" \
+ " .ifnc \""#s2"\",\"\"\n" \
" .long 2b,20f\n" \
" .ifnc \""#s3"\",\"\"\n" \
" .long 3b,30f\n" \
" .endif\n" \
+ " .endif\n" \
" .previous\n" \
"\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
- "10: clr."#s1" (%1)+\n" \
- "20: clr."#s2" (%1)+\n" \
+ "10: addq.l #"#n1",%0\n" \
+ " .ifnc \""#s2"\",\"\"\n" \
+ "20: addq.l #"#n2",%0\n" \
" .ifnc \""#s3"\",\"\"\n" \
- "30: clr."#s3" (%1)+\n" \
+ "30: addq.l #"#n3",%0\n" \
+ " .endif\n" \
" .endif\n" \
- " moveq.l #"#n",%0\n" \
" jra 4b\n" \
" .previous\n" \
: "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
: : "memory")
+#define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
+ ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)
+#define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3) \
+ ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, \
+ __suffix##n1, __suffix##n2, __suffix##n3)
+
static __always_inline unsigned long
__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
{
@@ -237,37 +235,37 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
switch (n) {
case 1:
- __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1);
+ __constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0);
break;
case 2:
- __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, r, 2);
+ __constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0);
break;
case 3:
- __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
+ __constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0);
break;
case 4:
- __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, r, 4);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0);
break;
case 5:
- __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0);
break;
case 6:
- __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0);
break;
case 7:
- __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1);
break;
case 8:
- __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0);
break;
case 9:
- __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1);
break;
case 10:
- __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2);
break;
case 12:
- __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l);
+ __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4);
break;
default:
/* we limit the inlined version to 3 moves */
@@ -358,21 +356,23 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
return res;
}
-#define __copy_from_user(to, from, n) \
-(__builtin_constant_p(n) ? \
- __constant_copy_from_user(to, from, n) : \
- __generic_copy_from_user(to, from, n))
-
-#define __copy_to_user(to, from, n) \
-(__builtin_constant_p(n) ? \
- __constant_copy_to_user(to, from, n) : \
- __generic_copy_to_user(to, from, n))
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ if (__builtin_constant_p(n))
+ return __constant_copy_from_user(to, from, n);
+ return __generic_copy_from_user(to, from, n);
+}
-#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
-#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (__builtin_constant_p(n))
+ return __constant_copy_to_user(to, from, n);
+ return __generic_copy_to_user(to, from, n);
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
#define user_addr_max() \
(uaccess_kernel() ? ~0UL : TASK_SIZE)
diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h
index e77ce66c14d5..e482c3899ff1 100644
--- a/arch/m68k/include/asm/uaccess_no.h
+++ b/arch/m68k/include/asm/uaccess_no.h
@@ -23,25 +23,6 @@ static inline int _access_ok(unsigned long addr, unsigned long size)
}
/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-
-/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*/
@@ -120,13 +101,21 @@ extern int __get_user_bad(void);
: "=d" (x) \
: "m" (*__ptr(ptr)))
-#define copy_from_user(to, from, n) (memcpy(to, from, n), 0)
-#define copy_to_user(to, from, n) (memcpy(to, from, n), 0)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ memcpy(to, (__force const void *)from, n);
+ return 0;
+}
-#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
-#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ memcpy((__force void *)to, from, n);
+ return 0;
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
/*
* Copy a null terminated string from userspace.
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index a857d82ec509..aab1edd0d4ba 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 379
+#define NR_syscalls 380
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 9fe674bf911f..25589f5b8669 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -384,5 +384,6 @@
#define __NR_copy_file_range 376
#define __NR_preadv2 377
#define __NR_pwritev2 378
+#define __NR_statx 379
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 093b7c42fb85..6f945bb5ffbd 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -88,7 +88,7 @@ static inline int frame_extra_sizes(int f)
return frame_size_change[f];
}
-int handle_kernel_fault(struct pt_regs *regs)
+int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
struct pt_regs *tregs;
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index d6fd6d9ced24..8c9fcfafe0dd 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -399,3 +399,4 @@ ENTRY(sys_call_table)
.long sys_copy_file_range
.long sys_preadv2
.long sys_pwritev2
+ .long sys_statx
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index a926d2c88898..c1cc4e99aa94 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -1016,8 +1016,13 @@ asmlinkage void trap_c(struct frame *fp)
/* traced a trapping instruction on a 68020/30,
* real exception will be executed afterwards.
*/
- } else if (!handle_kernel_fault(&fp->ptregs))
- bad_super_trap(fp);
+ return;
+ }
+#ifdef CONFIG_MMU
+ if (fixup_exception(&fp->ptregs))
+ return;
+#endif
+ bad_super_trap(fp);
return;
}
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c
index a76b73abaf64..7646e461aa62 100644
--- a/arch/m68k/lib/uaccess.c
+++ b/arch/m68k/lib/uaccess.c
@@ -30,19 +30,13 @@ unsigned long __generic_copy_from_user(void *to, const void __user *from,
"6:\n"
" .section .fixup,\"ax\"\n"
" .even\n"
- "10: move.l %0,%3\n"
- "7: clr.l (%2)+\n"
- " subq.l #1,%3\n"
- " jne 7b\n"
- " lsl.l #2,%0\n"
+ "10: lsl.l #2,%0\n"
" btst #1,%5\n"
" jeq 8f\n"
- "30: clr.w (%2)+\n"
- " addq.l #2,%0\n"
+ "30: addq.l #2,%0\n"
"8: btst #0,%5\n"
" jeq 6b\n"
- "50: clr.b (%2)+\n"
- " addq.l #1,%0\n"
+ "50: addq.l #1,%0\n"
" jra 6b\n"
" .previous\n"
"\n"
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index bd66a0b20c6b..2795e4ca09d7 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -32,7 +32,7 @@ int send_fault_sig(struct pt_regs *regs)
force_sig_info(siginfo.si_signo,
&siginfo, current);
} else {
- if (handle_kernel_fault(regs))
+ if (fixup_exception(regs))
return -1;
//if (siginfo.si_signo == SIGBUS)
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
index 5b7a45d99cfb..ecce0c5ec8e8 100644
--- a/arch/metag/Kconfig
+++ b/arch/metag/Kconfig
@@ -1,5 +1,6 @@
config METAG
def_bool y
+ select ARCH_HAS_RAW_COPY_USER
select EMBEDDED
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index f9b9df5d6de9..8f940553a579 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -8,6 +8,7 @@ generic-y += dma.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += extable.h
generic-y += fb.h
generic-y += fcntl.h
generic-y += futex.h
diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h
index ffa3a3a2ecad..0c151e5af079 100644
--- a/arch/metag/include/asm/pgtable.h
+++ b/arch/metag/include/asm/pgtable.h
@@ -6,6 +6,7 @@
#define _METAG_PGTABLE_H
#include <asm/pgtable-bits.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 83de7554d2b3..5ebc2850690e 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -47,28 +47,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \
(unsigned long)(size))
-static inline int verify_area(int type, const void *addr, unsigned long size)
-{
- return access_ok(type, addr, size) ? 0 : -EFAULT;
-}
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-struct exception_table_entry {
- unsigned long insn, fixup;
-};
-
-extern int fixup_exception(struct pt_regs *regs);
+#include <asm/extable.h>
/*
* These are the main single-value transfer routines. They automatically
@@ -193,36 +172,10 @@ extern long __must_check strnlen_user(const char __user *src, long count);
#define strlen_user(str) strnlen_user(str, 32767)
-extern unsigned long __must_check __copy_user_zeroing(void *to,
- const void __user *from,
- unsigned long n);
-
-static inline unsigned long
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- if (likely(access_ok(VERIFY_READ, from, n)))
- return __copy_user_zeroing(to, from, n);
- memset(to, 0, n);
- return n;
-}
-
-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
-#define __copy_from_user_inatomic __copy_from_user
-
-extern unsigned long __must_check __copy_user(void __user *to,
- const void *from,
- unsigned long n);
-
-static inline unsigned long copy_to_user(void __user *to, const void *from,
- unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n))
- return __copy_user(to, from, n);
- return n;
-}
-
-#define __copy_to_user(to, from, n) __copy_user(to, from, n)
-#define __copy_to_user_inatomic __copy_to_user
+extern unsigned long raw_copy_from_user(void *to, const void __user *from,
+ unsigned long n);
+extern unsigned long raw_copy_to_user(void __user *to, const void *from,
+ unsigned long n);
/*
* Zero Userspace
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index b3ebfe9c8e88..e8a4ea83cabb 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -29,7 +29,6 @@
COPY \
"1:\n" \
" .section .fixup,\"ax\"\n" \
- " MOV D1Ar1,#0\n" \
FIXUP \
" MOVT D1Ar1,#HI(1b)\n" \
" JUMP D1Ar1,#LO(1b)\n" \
@@ -260,27 +259,31 @@
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #32\n" \
"23:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "SUB %3, %3, #32\n" \
"24:\n" \
+ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "25:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "26:\n" \
"SUB %3, %3, #32\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
- "25:\n" \
+ "27:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "26:\n" \
+ "28:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "29:\n" \
"SUB %3, %3, #32\n" \
- "27:\n" \
+ "30:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "28:\n" \
+ "31:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "32:\n" \
"SUB %0, %0, #8\n" \
- "29:\n" \
+ "33:\n" \
"SETL [%0++], D0.7, D1.7\n" \
"SUB %3, %3, #32\n" \
"1:" \
@@ -312,11 +315,15 @@
" .long 26b,3b\n" \
" .long 27b,3b\n" \
" .long 28b,3b\n" \
- " .long 29b,4b\n" \
+ " .long 29b,3b\n" \
+ " .long 30b,3b\n" \
+ " .long 31b,3b\n" \
+ " .long 32b,3b\n" \
+ " .long 33b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
- : "D1Ar1", "D0Ar2", "memory")
+ : "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
@@ -342,7 +349,7 @@
#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
- "AND D0Ar2, D0Ar2, #0x7\n" \
+ "ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
@@ -403,47 +410,55 @@
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #16\n" \
"23:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "24:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
- "25:\n" \
+ "24:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "26:\n" \
+ "25:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "26:\n" \
"SUB %3, %3, #16\n" \
"27:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"28:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "29:\n" \
+ "SUB %3, %3, #16\n" \
+ "30:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "31:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "32:\n" \
"SUB %3, %3, #16\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
- "29:\n" \
+ "33:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "30:\n" \
+ "34:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "35:\n" \
"SUB %3, %3, #16\n" \
- "31:\n" \
+ "36:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "32:\n" \
+ "37:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "38:\n" \
"SUB %3, %3, #16\n" \
- "33:\n" \
+ "39:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "34:\n" \
+ "40:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "41:\n" \
"SUB %3, %3, #16\n" \
- "35:\n" \
+ "42:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "36:\n" \
+ "43:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "44:\n" \
"SUB %0, %0, #4\n" \
- "37:\n" \
+ "45:\n" \
"SETD [%0++], D0.7\n" \
"SUB %3, %3, #16\n" \
"1:" \
@@ -483,11 +498,19 @@
" .long 34b,3b\n" \
" .long 35b,3b\n" \
" .long 36b,3b\n" \
- " .long 37b,4b\n" \
+ " .long 37b,3b\n" \
+ " .long 38b,3b\n" \
+ " .long 39b,3b\n" \
+ " .long 40b,3b\n" \
+ " .long 41b,3b\n" \
+ " .long 42b,3b\n" \
+ " .long 43b,3b\n" \
+ " .long 44b,3b\n" \
+ " .long 45b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
- : "D1Ar1", "D0Ar2", "memory")
+ : "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
@@ -513,7 +536,7 @@
#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
- "AND D0Ar2, D0Ar2, #0x7\n" \
+ "ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
@@ -525,8 +548,8 @@
"SUB %1, %1, D0Ar2\n" \
"SUB %3, %3, D1Ar1\n")
-unsigned long __copy_user(void __user *pdst, const void *psrc,
- unsigned long n)
+unsigned long raw_copy_to_user(void __user *pdst, const void *psrc,
+ unsigned long n)
{
register char __user *dst asm ("A0.2") = pdst;
register const char *src asm ("A1.2") = psrc;
@@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
if ((unsigned long) src & 1) {
__asm_copy_to_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
while (n > 0) {
__asm_copy_to_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
while (n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
}
@@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
+ if (retn)
+ return retn + n;
}
}
if (n >= RAPF_MIN_BUF_SIZE) {
@@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
+ if (retn)
+ return retn + n;
}
}
#endif
@@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
while (n >= 16) {
__asm_copy_to_user_16(dst, src, retn);
n -= 16;
+ if (retn)
+ return retn + n;
}
while (n >= 4) {
__asm_copy_to_user_4(dst, src, retn);
n -= 4;
+ if (retn)
+ return retn + n;
}
switch (n) {
@@ -609,24 +648,26 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
break;
}
+ /*
+ * If we get here, retn correctly reflects the number of failing
+ * bytes.
+ */
return retn;
}
-EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(raw_copy_to_user);
#define __asm_copy_from_user_1(to, from, ret) \
__asm_copy_user_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"2: SETB [%0++],D1Ar1\n", \
- "3: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
+ "3: ADD %2,%2,#1\n", \
" .long 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"2: SETW [%0++],D1Ar1\n" COPY, \
- "3: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
+ "3: ADD %2,%2,#2\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_2(to, from, ret) \
@@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
__asm_copy_from_user_2x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"4: SETB [%0++],D1Ar1\n", \
- "5: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
+ "5: ADD %2,%2,#1\n", \
" .long 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"2: SETD [%0++],D1Ar1\n" COPY, \
- "3: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
+ "3: ADD %2,%2,#4\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_4(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
-#define __asm_copy_from_user_5(to, from, ret) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "4: SETB [%0++],D1Ar1\n", \
- "5: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 4b,5b\n")
-
-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "4: SETW [%0++],D1Ar1\n" COPY, \
- "5: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 4b,5b\n" TENTRY)
-
-#define __asm_copy_from_user_6(to, from, ret) \
- __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_7(to, from, ret) \
- __asm_copy_from_user_6x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "6: SETB [%0++],D1Ar1\n", \
- "7: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 6b,7b\n")
-
-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "4: SETD [%0++],D1Ar1\n" COPY, \
- "5: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 4b,5b\n" TENTRY)
-
-#define __asm_copy_from_user_8(to, from, ret) \
- __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_9(to, from, ret) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "6: SETB [%0++],D1Ar1\n", \
- "7: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 6b,7b\n")
-
-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "6: SETW [%0++],D1Ar1\n" COPY, \
- "7: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 6b,7b\n" TENTRY)
-
-#define __asm_copy_from_user_10(to, from, ret) \
- __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_11(to, from, ret) \
- __asm_copy_from_user_10x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "8: SETB [%0++],D1Ar1\n", \
- "9: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 8b,9b\n")
-
-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "6: SETD [%0++],D1Ar1\n" COPY, \
- "7: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 6b,7b\n" TENTRY)
-
-#define __asm_copy_from_user_12(to, from, ret) \
- __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_13(to, from, ret) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "8: SETB [%0++],D1Ar1\n", \
- "9: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 8b,9b\n")
-
-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "8: SETW [%0++],D1Ar1\n" COPY, \
- "9: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 8b,9b\n" TENTRY)
-
-#define __asm_copy_from_user_14(to, from, ret) \
- __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_15(to, from, ret) \
- __asm_copy_from_user_14x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "10: SETB [%0++],D1Ar1\n", \
- "11: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 10b,11b\n")
-
-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "8: SETD [%0++],D1Ar1\n" COPY, \
- "9: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 8b,9b\n" TENTRY)
-
-#define __asm_copy_from_user_16(to, from, ret) \
- __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
-
#define __asm_copy_from_user_8x64(to, from, ret) \
asm volatile ( \
" GETL D0Ar2,D1Ar1,[%1++]\n" \
"2: SETL [%0++],D0Ar2,D1Ar1\n" \
"1:\n" \
" .section .fixup,\"ax\"\n" \
- " MOV D1Ar1,#0\n" \
- " MOV D0Ar2,#0\n" \
"3: ADD %2,%2,#8\n" \
- " SETL [%0++],D0Ar2,D1Ar1\n" \
" MOVT D0Ar2,#HI(1b)\n" \
" JUMP D0Ar2,#LO(1b)\n" \
" .previous\n" \
@@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
- * source. Since the fault is at a single address, we only
- * need to rewind by 8 bytes.
+ * source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
- * corrected.
+ * corrected, but the source must be rewound to the beginning of
+ * the block, which is LSM_STEP*8 bytes.
+ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
+ * and stored in D0Ar2
+ *
+ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
+ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
+ * a fault happens at the 4th write, LSM_STEP will be 0
+ * instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
- "SUB %1, %1, #8\n")
+ "LSR D0Ar2, D0Ar2, #5\n" \
+ "ANDS D0Ar2, D0Ar2, #0x38\n" \
+ "ADDZ D0Ar2, D0Ar2, #32\n" \
+ "SUB %1, %1, D0Ar2\n")
/* rewind 'from' pointer when a fault occurs
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
- * source. Since the fault is at a single address, we only
- * need to rewind by 4 bytes.
+ * source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
- * corrected.
+ * corrected, but the source must be rewound to the beginning of
+ * the block, which is LSM_STEP*4 bytes.
+ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
+ * and stored in D0Ar2
+ *
+ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
+ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
+ * a fault happens at the 4th write, LSM_STEP will be 0
+ * instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
- "SUB %1, %1, #4\n")
+ "LSR D0Ar2, D0Ar2, #6\n" \
+ "ANDS D0Ar2, D0Ar2, #0x1c\n" \
+ "ADDZ D0Ar2, D0Ar2, #16\n" \
+ "SUB %1, %1, D0Ar2\n")
-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
- userland. The return-value is the number of bytes that were
- inaccessible. */
-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
- unsigned long n)
+/*
+ * Copy from user to kernel. The return-value is the number of bytes that were
+ * inaccessible.
+ */
+unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
+ unsigned long n)
{
register char *dst asm ("A0.2") = pdst;
register const char __user *src asm ("A1.2") = psrc;
@@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
if ((unsigned long) src & 1) {
__asm_copy_from_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
@@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_1(dst, src, retn);
n--;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
@@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
- /* We only need one check after the unalignment-adjustments,
- because if both adjustments were done, either both or
- neither reference had an exception. */
- if (retn != 0)
- goto copy_exception_bytes;
-
#ifdef USE_RAPF
/* 64 bit copy loop */
if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
@@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
@@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
#endif
@@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
n -= 4;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
/* If we get here, there were no memory read faults. */
@@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
/* If we get here, retn correctly reflects the number of failing
bytes. */
return retn;
-
- copy_exception_bytes:
- /* We already have "retn" bytes cleared, and need to clear the
- remaining "n" bytes. A non-optimized simple byte-for-byte in-line
- memset is preferred here, since this isn't speed-critical code and
- we'd rather have this a leaf-function than calling memset. */
- {
- char *endp;
- for (endp = dst + n; dst < endp; dst++)
- *dst = 0;
- }
-
- return retn + n;
}
-EXPORT_SYMBOL(__copy_user_zeroing);
+EXPORT_SYMBOL(raw_copy_from_user);
#define __asm_clear_8x64(to, ret) \
asm volatile ( \
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 85885a501dce..1aff3658a104 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -34,6 +34,7 @@ config MICROBLAZE
select TRACING_SUPPORT
select VIRT_TO_BUS
select CPU_NO_EFFICIENT_FFS
+ select ARCH_HAS_RAW_COPY_USER
config SWAP
def_bool n
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 1732ec13b211..56830ff65333 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -3,6 +3,7 @@ generic-y += barrier.h
generic-y += clkdev.h
generic-y += device.h
generic-y += exec.h
+generic-y += extable.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index fd850879854d..d506bb0893f9 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -95,7 +95,8 @@ typedef struct { unsigned long pgd; } pgd_t;
# else /* CONFIG_MMU */
typedef struct { unsigned long ste[64]; } pmd_t;
typedef struct { pmd_t pue[1]; } pud_t;
-typedef struct { pud_t pge[1]; } pgd_t;
+typedef struct { pud_t p4e[1]; } p4d_t;
+typedef struct { p4d_t pge[1]; } pgd_t;
# endif /* CONFIG_MMU */
# define pte_val(x) ((x).pte)
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index b132cd301e66..38f2c9ccef10 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -17,6 +17,7 @@
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/extable.h>
#include <linux/string.h>
/*
@@ -47,22 +48,6 @@
# define segment_eq(a, b) ((a).seg == (b).seg)
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-struct exception_table_entry {
- unsigned long insn, fixup;
-};
-
#ifndef CONFIG_MMU
/* Check against bounds of physical memory */
@@ -351,39 +336,19 @@ extern long __user_bad(void);
__gu_err; \
})
-
-/* copy_to_from_user */
-#define __copy_from_user(to, from, n) \
- __copy_tofrom_user((__force void __user *)(to), \
- (void __user *)(from), (n))
-#define __copy_from_user_inatomic(to, from, n) \
- __copy_from_user((to), (from), (n))
-
-static inline long copy_from_user(void *to,
- const void __user *from, unsigned long n)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- unsigned long res = n;
- might_fault();
- if (likely(access_ok(VERIFY_READ, from, n)))
- res = __copy_from_user(to, from, n);
- if (unlikely(res))
- memset(to + (n - res), 0, res);
- return res;
+ return __copy_tofrom_user((__force void __user *)to, from, n);
}
-#define __copy_to_user(to, from, n) \
- __copy_tofrom_user((void __user *)(to), \
- (__force const void __user *)(from), (n))
-#define __copy_to_user_inatomic(to, from, n) __copy_to_user((to), (from), (n))
-
-static inline long copy_to_user(void __user *to,
- const void *from, unsigned long n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- might_fault();
- if (access_ok(VERIFY_WRITE, to, n))
- return __copy_to_user(to, from, n);
- return n;
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
/*
* Copy a null terminated string from userspace.
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a008a9f03072..aff5633bfe4d 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -69,6 +69,7 @@ config MIPS
select HAVE_EXIT_THREAD
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_ARCH_HARDENED_USERCOPY
+ select ARCH_HAS_RAW_COPY_USER
menu "Machine selection"
diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c
index a5b427909b5c..036d56cc4591 100644
--- a/arch/mips/cavium-octeon/cpu.c
+++ b/arch/mips/cavium-octeon/cpu.c
@@ -10,7 +10,9 @@
#include <linux/irqflags.h>
#include <linux/notifier.h>
#include <linux/prefetch.h>
+#include <linux/ptrace.h>
#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
#include <asm/cop2.h>
#include <asm/current.h>
diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.c b/arch/mips/cavium-octeon/crypto/octeon-crypto.c
index 4d22365844af..cfb4a146cf17 100644
--- a/arch/mips/cavium-octeon/crypto/octeon-crypto.c
+++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.c
@@ -9,6 +9,7 @@
#include <asm/cop2.h>
#include <linux/export.h>
#include <linux/interrupt.h>
+#include <linux/sched/task_stack.h>
#include "octeon-crypto.h"
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index cfd97f6448bb..0a7c9834b81c 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -140,15 +140,6 @@
.set noat
/*
- * t7 is used as a flag to note inatomic mode.
- */
-LEAF(__copy_user_inatomic)
-EXPORT_SYMBOL(__copy_user_inatomic)
- b __copy_user_common
- li t7, 1
- END(__copy_user_inatomic)
-
-/*
* A combined memcpy/__copy_user
* __copy_user sets len to 0 for success; else to an upper bound of
* the number of uncopied bytes.
@@ -161,8 +152,6 @@ EXPORT_SYMBOL(memcpy)
__memcpy:
FEXPORT(__copy_user)
EXPORT_SYMBOL(__copy_user)
- li t7, 0 /* not inatomic */
-__copy_user_common:
/*
* Note: dst & src may be unaligned, len may be 0
* Temps
@@ -414,25 +403,7 @@ l_exc:
LOAD t0, TI_TASK($28)
LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
SUB len, AT, t0 # len number of uncopied bytes
- bnez t7, 2f /* Skip the zeroing out part if inatomic */
- /*
- * Here's where we rely on src and dst being incremented in tandem,
- * See (3) above.
- * dst += (fault addr - src) to put dst at first byte to clear
- */
- ADD dst, t0 # compute start address in a1
- SUB dst, src
- /*
- * Clear len bytes starting at dst. Can't call __bzero because it
- * might modify len. An inefficient loop for these rare times...
- */
- beqz len, done
- SUB src, len, 1
-1: sb zero, 0(dst)
- ADD dst, dst, 1
- bnez src, 1b
- SUB src, src, 1
-2: jr ra
+ jr ra
nop
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 4b94b7fbafa3..3de786545ded 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -12,6 +12,7 @@
#include <linux/kernel_stat.h>
#include <linux/sched.h>
#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
#include <linux/init.h>
#include <linux/export.h>
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 321752bcbab6..f94455f964ec 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
+#include <linux/ptrace.h>
#include <linux/thread_info.h>
#include <linux/bitops.h>
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index d21f3da7bdb6..6f94bed571c4 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -16,6 +16,7 @@
#include <asm/cachectl.h>
#include <asm/fixmap.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
extern int temp_tlb_entry;
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 514cbc0a6a67..130a2a6c1531 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -17,6 +17,7 @@
#include <asm/cachectl.h>
#include <asm/fixmap.h>
+#define __ARCH_USE_5LEVEL_HACK
#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
#include <asm-generic/pgtable-nopmd.h>
#else
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 70ca8eee166a..99e629a590a5 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -128,23 +128,14 @@ static inline bool eva_kernel_access(void)
* this function, memory access functions may still return -EFAULT.
*/
-#define __access_mask get_fs().seg
-
-#define __access_ok(addr, size, mask) \
-({ \
- unsigned long __addr = (unsigned long) (addr); \
- unsigned long __size = size; \
- unsigned long __mask = mask; \
- unsigned long __ok; \
- \
- __chk_user_ptr(addr); \
- __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
- __ua_size(__size))); \
- __ok == 0; \
-})
+static inline int __access_ok(const void __user *p, unsigned long size)
+{
+ unsigned long addr = (unsigned long)p;
+ return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
+}
#define access_ok(type, addr, size) \
- likely(__access_ok((addr), (size), __access_mask))
+ likely(__access_ok((addr), (size)))
/*
* put_user: - Write a simple value into user space.
@@ -806,157 +797,7 @@ extern void __put_user_unaligned_unknown(void);
extern size_t __copy_user(void *__to, const void *__from, size_t __n);
-#ifndef CONFIG_EVA
-#define __invoke_copy_to_user(to, from, n) \
-({ \
- register void __user *__cu_to_r __asm__("$4"); \
- register const void *__cu_from_r __asm__("$5"); \
- register long __cu_len_r __asm__("$6"); \
- \
- __cu_to_r = (to); \
- __cu_from_r = (from); \
- __cu_len_r = (n); \
- __asm__ __volatile__( \
- __MODULE_JAL(__copy_user) \
- : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
- : \
- : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
- DADDI_SCRATCH, "memory"); \
- __cu_len_r; \
-})
-
-#define __invoke_copy_to_kernel(to, from, n) \
- __invoke_copy_to_user(to, from, n)
-
-#endif
-
-/*
- * __copy_to_user: - Copy a block of data into user space, with less checking.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-#define __copy_to_user(to, from, n) \
-({ \
- void __user *__cu_to; \
- const void *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- \
- check_object_size(__cu_from, __cu_len, true); \
- might_fault(); \
- \
- if (eva_kernel_access()) \
- __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
- __cu_len); \
- else \
- __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
- __cu_len); \
- __cu_len; \
-})
-
-extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
-
-#define __copy_to_user_inatomic(to, from, n) \
-({ \
- void __user *__cu_to; \
- const void *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- \
- check_object_size(__cu_from, __cu_len, true); \
- \
- if (eva_kernel_access()) \
- __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
- __cu_len); \
- else \
- __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
- __cu_len); \
- __cu_len; \
-})
-
-#define __copy_from_user_inatomic(to, from, n) \
-({ \
- void *__cu_to; \
- const void __user *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- \
- check_object_size(__cu_to, __cu_len, false); \
- \
- if (eva_kernel_access()) \
- __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
- __cu_from,\
- __cu_len);\
- else \
- __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
- __cu_from, \
- __cu_len); \
- __cu_len; \
-})
-
-/*
- * copy_to_user: - Copy a block of data into user space.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-#define copy_to_user(to, from, n) \
-({ \
- void __user *__cu_to; \
- const void *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- \
- check_object_size(__cu_from, __cu_len, true); \
- \
- if (eva_kernel_access()) { \
- __cu_len = __invoke_copy_to_kernel(__cu_to, \
- __cu_from, \
- __cu_len); \
- } else { \
- if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
- might_fault(); \
- __cu_len = __invoke_copy_to_user(__cu_to, \
- __cu_from, \
- __cu_len); \
- } \
- } \
- __cu_len; \
-})
-
-#ifndef CONFIG_EVA
-
-#define __invoke_copy_from_user(to, from, n) \
+#define __invoke_copy_from(func, to, from, n) \
({ \
register void *__cu_to_r __asm__("$4"); \
register const void __user *__cu_from_r __asm__("$5"); \
@@ -967,7 +808,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len_r = (n); \
__asm__ __volatile__( \
".set\tnoreorder\n\t" \
- __MODULE_JAL(__copy_user) \
+ __MODULE_JAL(func) \
".set\tnoat\n\t" \
__UA_ADDU "\t$1, %1, %2\n\t" \
".set\tat\n\t" \
@@ -979,33 +820,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len_r; \
})
-#define __invoke_copy_from_kernel(to, from, n) \
- __invoke_copy_from_user(to, from, n)
-
-/* For userland <-> userland operations */
-#define ___invoke_copy_in_user(to, from, n) \
- __invoke_copy_from_user(to, from, n)
-
-/* For kernel <-> kernel operations */
-#define ___invoke_copy_in_kernel(to, from, n) \
- __invoke_copy_from_user(to, from, n)
-
-#define __invoke_copy_from_user_inatomic(to, from, n) \
+#define __invoke_copy_to(func, to, from, n) \
({ \
- register void *__cu_to_r __asm__("$4"); \
- register const void __user *__cu_from_r __asm__("$5"); \
+ register void __user *__cu_to_r __asm__("$4"); \
+ register const void *__cu_from_r __asm__("$5"); \
register long __cu_len_r __asm__("$6"); \
\
__cu_to_r = (to); \
__cu_from_r = (from); \
__cu_len_r = (n); \
__asm__ __volatile__( \
- ".set\tnoreorder\n\t" \
- __MODULE_JAL(__copy_user_inatomic) \
- ".set\tnoat\n\t" \
- __UA_ADDU "\t$1, %1, %2\n\t" \
- ".set\tat\n\t" \
- ".set\treorder" \
+ __MODULE_JAL(func) \
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
: \
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
@@ -1013,228 +838,79 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len_r; \
})
-#define __invoke_copy_from_kernel_inatomic(to, from, n) \
- __invoke_copy_from_user_inatomic(to, from, n) \
+#define __invoke_copy_from_kernel(to, from, n) \
+ __invoke_copy_from(__copy_user, to, from, n)
+
+#define __invoke_copy_to_kernel(to, from, n) \
+ __invoke_copy_to(__copy_user, to, from, n)
+
+#define ___invoke_copy_in_kernel(to, from, n) \
+ __invoke_copy_from(__copy_user, to, from, n)
+
+#ifndef CONFIG_EVA
+#define __invoke_copy_from_user(to, from, n) \
+ __invoke_copy_from(__copy_user, to, from, n)
+
+#define __invoke_copy_to_user(to, from, n) \
+ __invoke_copy_to(__copy_user, to, from, n)
+
+#define ___invoke_copy_in_user(to, from, n) \
+ __invoke_copy_from(__copy_user, to, from, n)
#else
/* EVA specific functions */
-extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
- size_t __n);
extern size_t __copy_from_user_eva(void *__to, const void *__from,
size_t __n);
extern size_t __copy_to_user_eva(void *__to, const void *__from,
size_t __n);
extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
-#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \
-({ \
- register void *__cu_to_r __asm__("$4"); \
- register const void __user *__cu_from_r __asm__("$5"); \
- register long __cu_len_r __asm__("$6"); \
- \
- __cu_to_r = (to); \
- __cu_from_r = (from); \
- __cu_len_r = (n); \
- __asm__ __volatile__( \
- ".set\tnoreorder\n\t" \
- __MODULE_JAL(func_ptr) \
- ".set\tnoat\n\t" \
- __UA_ADDU "\t$1, %1, %2\n\t" \
- ".set\tat\n\t" \
- ".set\treorder" \
- : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
- : \
- : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
- DADDI_SCRATCH, "memory"); \
- __cu_len_r; \
-})
-
-#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \
-({ \
- register void *__cu_to_r __asm__("$4"); \
- register const void __user *__cu_from_r __asm__("$5"); \
- register long __cu_len_r __asm__("$6"); \
- \
- __cu_to_r = (to); \
- __cu_from_r = (from); \
- __cu_len_r = (n); \
- __asm__ __volatile__( \
- __MODULE_JAL(func_ptr) \
- : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
- : \
- : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
- DADDI_SCRATCH, "memory"); \
- __cu_len_r; \
-})
-
/*
* Source or destination address is in userland. We need to go through
* the TLB
*/
#define __invoke_copy_from_user(to, from, n) \
- __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
-
-#define __invoke_copy_from_user_inatomic(to, from, n) \
- __invoke_copy_from_user_eva_generic(to, from, n, \
- __copy_user_inatomic_eva)
+ __invoke_copy_from(__copy_from_user_eva, to, from, n)
#define __invoke_copy_to_user(to, from, n) \
- __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
+ __invoke_copy_to(__copy_to_user_eva, to, from, n)
#define ___invoke_copy_in_user(to, from, n) \
- __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
-
-/*
- * Source or destination address in the kernel. We are not going through
- * the TLB
- */
-#define __invoke_copy_from_kernel(to, from, n) \
- __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
-
-#define __invoke_copy_from_kernel_inatomic(to, from, n) \
- __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
-
-#define __invoke_copy_to_kernel(to, from, n) \
- __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
-
-#define ___invoke_copy_in_kernel(to, from, n) \
- __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
+ __invoke_copy_from(__copy_in_user_eva, to, from, n)
#endif /* CONFIG_EVA */
-/*
- * __copy_from_user: - Copy a block of data from user space, with less checking.
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-#define __copy_from_user(to, from, n) \
-({ \
- void *__cu_to; \
- const void __user *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- \
- check_object_size(__cu_to, __cu_len, false); \
- \
- if (eva_kernel_access()) { \
- __cu_len = __invoke_copy_from_kernel(__cu_to, \
- __cu_from, \
- __cu_len); \
- } else { \
- might_fault(); \
- __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
- } \
- __cu_len; \
-})
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (eva_kernel_access())
+ return __invoke_copy_to_kernel(to, from, n);
+ else
+ return __invoke_copy_to_user(to, from, n);
+}
-/*
- * copy_from_user: - Copy a block of data from user space.
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-#define copy_from_user(to, from, n) \
-({ \
- void *__cu_to; \
- const void __user *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- \
- check_object_size(__cu_to, __cu_len, false); \
- \
- if (eva_kernel_access()) { \
- __cu_len = __invoke_copy_from_kernel(__cu_to, \
- __cu_from, \
- __cu_len); \
- } else { \
- if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
- might_fault(); \
- __cu_len = __invoke_copy_from_user(__cu_to, \
- __cu_from, \
- __cu_len); \
- } else { \
- memset(__cu_to, 0, __cu_len); \
- } \
- } \
- __cu_len; \
-})
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ if (eva_kernel_access())
+ return __invoke_copy_from_kernel(to, from, n);
+ else
+ return __invoke_copy_from_user(to, from, n);
+}
-#define __copy_in_user(to, from, n) \
-({ \
- void __user *__cu_to; \
- const void __user *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- if (eva_kernel_access()) { \
- __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
- __cu_len); \
- } else { \
- might_fault(); \
- __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
- __cu_len); \
- } \
- __cu_len; \
-})
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
-#define copy_in_user(to, from, n) \
-({ \
- void __user *__cu_to; \
- const void __user *__cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- if (eva_kernel_access()) { \
- __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
- __cu_len); \
- } else { \
- if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
- access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
- might_fault(); \
- __cu_len = ___invoke_copy_in_user(__cu_to, \
- __cu_from, \
- __cu_len); \
- } \
- } \
- __cu_len; \
-})
+static inline unsigned long
+raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
+{
+ if (eva_kernel_access())
+ return ___invoke_copy_in_kernel(to, from, n);
+ else
+ return ___invoke_copy_in_user(to, from, n);
+}
extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index d8f1cf1ec370..550e7d03090a 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -1200,7 +1200,7 @@ fpu_emul:
case lwl_op:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_READ, vaddr, 4)) {
+ if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1273,7 +1273,7 @@ fpu_emul:
case lwr_op:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_READ, vaddr, 4)) {
+ if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1347,7 +1347,7 @@ fpu_emul:
case swl_op:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1417,7 +1417,7 @@ fpu_emul:
case swr_op:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1492,7 +1492,7 @@ fpu_emul:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_READ, vaddr, 8)) {
+ if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1611,7 +1611,7 @@ fpu_emul:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_READ, vaddr, 8)) {
+ if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1730,7 +1730,7 @@ fpu_emul:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1848,7 +1848,7 @@ fpu_emul:
rt = regs->regs[MIPSInst_RT(inst)];
vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
- if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGSEGV;
break;
@@ -1965,7 +1965,7 @@ fpu_emul:
err = SIGBUS;
break;
}
- if (!access_ok(VERIFY_READ, vaddr, 4)) {
+ if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
@@ -2021,7 +2021,7 @@ fpu_emul:
err = SIGBUS;
break;
}
- if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
@@ -2084,7 +2084,7 @@ fpu_emul:
err = SIGBUS;
break;
}
- if (!access_ok(VERIFY_READ, vaddr, 8)) {
+ if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
@@ -2145,7 +2145,7 @@ fpu_emul:
err = SIGBUS;
break;
}
- if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+ if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) {
current->thread.cp0_baduaddr = vaddr;
err = SIGBUS;
break;
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 3daa2cae50b0..1b070a76fcdd 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/smp.h>
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index e077ea3e11fb..e398cbc3d776 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/mips-gic.h>
#include <linux/compiler.h>
+#include <linux/sched/task_stack.h>
#include <linux/smp.h>
#include <linux/atomic.h>
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index f1d17ece4181..1dfa7f5796c7 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -98,7 +98,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
if (unlikely(addr & 3))
return -EINVAL;
- if (unlikely(!access_ok(VERIFY_WRITE, addr, 4)))
+ if (unlikely(!access_ok(VERIFY_WRITE, (const void __user *)addr, 4)))
return -EINVAL;
if (cpu_has_llsc && R10000_LLSC_WAR) {
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index c3031f18c572..3114a2ed1f4e 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -562,39 +562,9 @@
LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
nop
SUB len, AT, t0 # len number of uncopied bytes
- bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */
- /*
- * Here's where we rely on src and dst being incremented in tandem,
- * See (3) above.
- * dst += (fault addr - src) to put dst at first byte to clear
- */
- ADD dst, t0 # compute start address in a1
- SUB dst, src
- /*
- * Clear len bytes starting at dst. Can't call __bzero because it
- * might modify len. An inefficient loop for these rare times...
- */
- .set reorder /* DADDI_WAR */
- SUB src, len, 1
- beqz len, .Ldone\@
- .set noreorder
-1: sb zero, 0(dst)
- ADD dst, dst, 1
-#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
- bnez src, 1b
- SUB src, src, 1
-#else
- .set push
- .set noat
- li v1, 1
- bnez src, 1b
- SUB src, src, v1
- .set pop
-#endif
jr ra
nop
-
#define SEXC(n) \
.set reorder; /* DADDI_WAR */ \
.Ls_exc_p ## n ## u\@: \
@@ -673,15 +643,6 @@ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
END(__rmemcpy)
/*
- * t6 is used as a flag to note inatomic mode.
- */
-LEAF(__copy_user_inatomic)
-EXPORT_SYMBOL(__copy_user_inatomic)
- b __copy_user_common
- li t6, 1
- END(__copy_user_inatomic)
-
-/*
* A combined memcpy/__copy_user
* __copy_user sets len to 0 for success; else to an upper bound of
* the number of uncopied bytes.
@@ -694,8 +655,6 @@ EXPORT_SYMBOL(memcpy)
.L__memcpy:
FEXPORT(__copy_user)
EXPORT_SYMBOL(__copy_user)
- li t6, 0 /* not inatomic */
-__copy_user_common:
/* Legacy Mode, user <-> user */
__BUILD_COPY_USER LEGACY_MODE USEROP USEROP
@@ -708,20 +667,12 @@ __copy_user_common:
* space
*/
-LEAF(__copy_user_inatomic_eva)
-EXPORT_SYMBOL(__copy_user_inatomic_eva)
- b __copy_from_user_common
- li t6, 1
- END(__copy_user_inatomic_eva)
-
/*
* __copy_from_user (EVA)
*/
LEAF(__copy_from_user_eva)
EXPORT_SYMBOL(__copy_from_user_eva)
- li t6, 0 /* not inatomic */
-__copy_from_user_common:
__BUILD_COPY_USER EVA_MODE USEROP KERNELOP
END(__copy_from_user_eva)
diff --git a/arch/mips/loongson64/loongson-3/cop2-ex.c b/arch/mips/loongson64/loongson-3/cop2-ex.c
index ea13764d0a03..621d6af5f6eb 100644
--- a/arch/mips/loongson64/loongson-3/cop2-ex.c
+++ b/arch/mips/loongson64/loongson-3/cop2-ex.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/notifier.h>
+#include <linux/ptrace.h>
#include <asm/fpu.h>
#include <asm/cop2.h>
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 10d86d54880a..bddf1ef553a4 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/sched/task_stack.h>
#include <linux/smp.h>
#include <linux/irq.h>
diff --git a/arch/mips/netlogic/xlp/cop2-ex.c b/arch/mips/netlogic/xlp/cop2-ex.c
index 52bc5de42005..21e439b3db70 100644
--- a/arch/mips/netlogic/xlp/cop2-ex.c
+++ b/arch/mips/netlogic/xlp/cop2-ex.c
@@ -9,11 +9,14 @@
* Copyright (C) 2009 Wind River Systems,
* written by Ralf Baechle <ralf@linux-mips.org>
*/
+#include <linux/capability.h>
#include <linux/init.h>
#include <linux/irqflags.h>
#include <linux/notifier.h>
#include <linux/prefetch.h>
+#include <linux/ptrace.h>
#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
#include <asm/cop2.h>
#include <asm/current.h>
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
index 5e645c9a3162..16ace558cd9d 100644
--- a/arch/mips/oprofile/backtrace.c
+++ b/arch/mips/oprofile/backtrace.c
@@ -18,7 +18,7 @@ struct stackframe {
static inline int get_mem(unsigned long addr, unsigned long *result)
{
unsigned long *address = (unsigned long *) addr;
- if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long)))
+ if (!access_ok(VERIFY_READ, address, sizeof(unsigned long)))
return -1;
if (__copy_from_user_inatomic(result, address, sizeof(unsigned long)))
return -3;
diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c
index 1f2a5bc4779e..75460e1e106b 100644
--- a/arch/mips/sgi-ip22/ip28-berr.c
+++ b/arch/mips/sgi-ip22/ip28-berr.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
#include <linux/seq_file.h>
#include <asm/addrspace.h>
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
index d12879eb2b1f..83efe03d5c60 100644
--- a/arch/mips/sgi-ip27/ip27-berr.c
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -12,7 +12,9 @@
#include <linux/signal.h> /* for SIGBUS */
#include <linux/sched.h> /* schow_regs(), force_sig() */
#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
+#include <asm/ptrace.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/sn0/hub.h>
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index f5ed45e8f442..4cd47d23d81a 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -8,10 +8,13 @@
*/
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
+
#include <asm/page.h>
#include <asm/processor.h>
+#include <asm/ptrace.h>
#include <asm/sn/arch.h>
#include <asm/sn/gda.h>
#include <asm/sn/intr.h>
diff --git a/arch/mips/sgi-ip32/ip32-berr.c b/arch/mips/sgi-ip32/ip32-berr.c
index 57d8c7486fe6..c1f12a9cf305 100644
--- a/arch/mips/sgi-ip32/ip32-berr.c
+++ b/arch/mips/sgi-ip32/ip32-berr.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
#include <asm/addrspace.h>
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index 8bd415c8729f..b3b442def423 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/notifier.h>
#include <linux/delay.h>
#include <linux/rtc/ds1685.h>
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 38e3494bfb63..a96f3dcb0119 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -16,6 +16,7 @@ config MN10300
select OLD_SIGACTION
select HAVE_DEBUG_STACKOVERFLOW
select ARCH_NO_COHERENT_DMA_MMAP
+ select ARCH_HAS_RAW_COPY_USER
config AM33_2
def_bool n
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index 97f64c723a0c..ed810e7206e8 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -2,6 +2,7 @@
generic-y += barrier.h
generic-y += clkdev.h
generic-y += exec.h
+generic-y += extable.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
diff --git a/arch/mn10300/include/asm/page.h b/arch/mn10300/include/asm/page.h
index 3810a6f740fd..dfe730a5ede0 100644
--- a/arch/mn10300/include/asm/page.h
+++ b/arch/mn10300/include/asm/page.h
@@ -57,6 +57,7 @@ typedef struct page *pgtable_t;
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 2da7b0fed4aa..c6966474827f 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -66,26 +66,7 @@ static inline int ___range_ok(unsigned long addr, unsigned int size)
#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
#define __access_ok(addr, size) (__range_ok((addr), (size)) == 0)
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-/* Returns 0 if exception not found and fixup otherwise. */
-extern int fixup_exception(struct pt_regs *regs);
+#include <asm/extable.h>
#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
@@ -294,170 +275,19 @@ do { \
} \
} while (0)
-#define __copy_user_zeroing(to, from, size) \
-do { \
- if (size) { \
- void *__to = to; \
- const void *__from = from; \
- int w; \
- asm volatile( \
- "0: movbu (%0),%3;\n" \
- "1: movbu %3,(%1);\n" \
- " inc %0;\n" \
- " inc %1;\n" \
- " add -1,%2;\n" \
- " bne 0b;\n" \
- "2:\n" \
- " .section .fixup,\"ax\"\n" \
- "3:\n" \
- " mov %2,%0\n" \
- " clr %3\n" \
- "4: movbu %3,(%1);\n" \
- " inc %1;\n" \
- " add -1,%2;\n" \
- " bne 4b;\n" \
- " mov %0,%2\n" \
- " jmp 2b\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .balign 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,3b\n" \
- " .previous\n" \
- : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
- : "0"(__from), "1"(__to), "2"(size) \
- : "cc", "memory"); \
- } \
-} while (0)
-
-/* We let the __ versions of copy_from/to_user inline, because they're often
- * used in fast paths and have only a small space overhead.
- */
-static inline
-unsigned long __generic_copy_from_user_nocheck(void *to, const void *from,
- unsigned long n)
-{
- __copy_user_zeroing(to, from, n);
- return n;
-}
-
-static inline
-unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
- unsigned long n)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
__copy_user(to, from, n);
return n;
}
-
-#if 0
-#error "don't use - these macros don't increment to & from pointers"
-/* Optimize just a little bit when we know the size of the move. */
-#define __constant_copy_user(to, from, size) \
-do { \
- asm volatile( \
- " mov %0,a0;\n" \
- "0: movbu (%1),d3;\n" \
- "1: movbu d3,(%2);\n" \
- " add -1,a0;\n" \
- " bne 0b;\n" \
- "2:;" \
- ".section .fixup,\"ax\"\n" \
- "3: jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .balign 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,3b\n" \
- ".previous" \
- : \
- : "d"(size), "d"(to), "d"(from) \
- : "d3", "a0"); \
-} while (0)
-
-/* Optimize just a little bit when we know the size of the move. */
-#define __constant_copy_user_zeroing(to, from, size) \
-do { \
- asm volatile( \
- " mov %0,a0;\n" \
- "0: movbu (%1),d3;\n" \
- "1: movbu d3,(%2);\n" \
- " add -1,a0;\n" \
- " bne 0b;\n" \
- "2:;" \
- ".section .fixup,\"ax\"\n" \
- "3: jmp 2b\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .balign 4\n" \
- " .long 0b,3b\n" \
- " .long 1b,3b\n" \
- ".previous" \
- : \
- : "d"(size), "d"(to), "d"(from) \
- : "d3", "a0"); \
-} while (0)
-
-static inline
-unsigned long __constant_copy_to_user(void *to, const void *from,
- unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n))
- __constant_copy_user(to, from, n);
- return n;
-}
-
-static inline
-unsigned long __constant_copy_from_user(void *to, const void *from,
- unsigned long n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (access_ok(VERIFY_READ, from, n))
- __constant_copy_user_zeroing(to, from, n);
- return n;
-}
-
-static inline
-unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
- unsigned long n)
-{
- __constant_copy_user(to, from, n);
- return n;
-}
-
-static inline
-unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
- unsigned long n)
-{
- __constant_copy_user_zeroing(to, from, n);
+ __copy_user(to, from, n);
return n;
}
-#endif
-
-extern unsigned long __generic_copy_to_user(void __user *, const void *,
- unsigned long);
-extern unsigned long __generic_copy_from_user(void *, const void __user *,
- unsigned long);
-
-#define __copy_to_user_inatomic(to, from, n) \
- __generic_copy_to_user_nocheck((to), (from), (n))
-#define __copy_from_user_inatomic(to, from, n) \
- __generic_copy_from_user_nocheck((to), (from), (n))
-
-#define __copy_to_user(to, from, n) \
-({ \
- might_fault(); \
- __copy_to_user_inatomic((to), (from), (n)); \
-})
-
-#define __copy_from_user(to, from, n) \
-({ \
- might_fault(); \
- __copy_from_user_inatomic((to), (from), (n)); \
-})
-
-
-#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
-#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern long __strncpy_from_user(char *dst, const char __user *src, long count);
diff --git a/arch/mn10300/kernel/mn10300_ksyms.c b/arch/mn10300/kernel/mn10300_ksyms.c
index ec6c4f8f93a6..5e9f919635f0 100644
--- a/arch/mn10300/kernel/mn10300_ksyms.c
+++ b/arch/mn10300/kernel/mn10300_ksyms.c
@@ -26,8 +26,6 @@ EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__generic_copy_from_user);
-EXPORT_SYMBOL(__generic_copy_to_user);
EXPORT_SYMBOL(strnlen_user);
extern u64 __ashrdi3(u64, unsigned);
diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c
index ce8899e5e171..cece1799cc32 100644
--- a/arch/mn10300/lib/usercopy.c
+++ b/arch/mn10300/lib/usercopy.c
@@ -11,24 +11,6 @@
*/
#include <linux/uaccess.h>
-unsigned long
-__generic_copy_to_user(void *to, const void *from, unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n))
- __copy_user(to, from, n);
- return n;
-}
-
-unsigned long
-__generic_copy_from_user(void *to, const void *from, unsigned long n)
-{
- if (access_ok(VERIFY_READ, from, n))
- __copy_user_zeroing(to, from, n);
- else
- memset(to, 0, n);
- return n;
-}
-
/*
* Copy a null terminated string from userspace.
*/
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 51a56c8b04b4..45b4727e3136 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -16,6 +16,7 @@ config NIOS2
select SPARSE_IRQ
select USB_ARCH_HAS_HCD if USB_SUPPORT
select CPU_NO_EFFICIENT_FFS
+ select ARCH_HAS_RAW_COPY_USER
config GENERIC_CSUM
def_bool y
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index aaa3c218b56c..87e70f2b463f 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += dma.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += extable.h
generic-y += fb.h
generic-y += fcntl.h
generic-y += ftrace.h
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 298393c3cb42..db4f7d179220 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -22,6 +22,7 @@
#include <asm/tlbflush.h>
#include <asm/pgtable-bits.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#define FIRST_USER_ADDRESS 0UL
diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h
index 198bbf15f644..727bd9504899 100644
--- a/arch/nios2/include/asm/uaccess.h
+++ b/arch/nios2/include/asm/uaccess.h
@@ -17,24 +17,7 @@
#include <asm/page.h>
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-struct exception_table_entry {
- unsigned long insn;
- unsigned long fixup;
-};
-
-extern int fixup_exception(struct pt_regs *regs);
+#include <asm/extable.h>
/*
* Segment stuff
@@ -90,36 +73,17 @@ static inline unsigned long __must_check clear_user(void __user *to,
return __clear_user(to, n);
}
-extern long __copy_from_user(void *to, const void __user *from,
- unsigned long n);
-extern long __copy_to_user(void __user *to, const void *from, unsigned long n);
-
-static inline long copy_from_user(void *to, const void __user *from,
- unsigned long n)
-{
- unsigned long res = n;
- if (access_ok(VERIFY_READ, from, n))
- res = __copy_from_user(to, from, n);
- if (unlikely(res))
- memset(to + (n - res), 0, res);
- return res;
-}
-
-static inline long copy_to_user(void __user *to, const void *from,
- unsigned long n)
-{
- if (!access_ok(VERIFY_WRITE, to, n))
- return n;
- return __copy_to_user(to, from, n);
-}
+extern unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n);
+extern unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n);
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
extern long strncpy_from_user(char *__to, const char __user *__from,
long __len);
extern long strnlen_user(const char __user *s, long n);
-#define __copy_from_user_inatomic __copy_from_user
-#define __copy_to_user_inatomic __copy_to_user
-
/* Optimized macros */
#define __get_user_asm(val, insn, addr, err) \
{ \
diff --git a/arch/nios2/mm/uaccess.c b/arch/nios2/mm/uaccess.c
index 7663e156ff4f..804983317766 100644
--- a/arch/nios2/mm/uaccess.c
+++ b/arch/nios2/mm/uaccess.c
@@ -10,9 +10,9 @@
#include <linux/export.h>
#include <linux/uaccess.h>
-asm(".global __copy_from_user\n"
- " .type __copy_from_user, @function\n"
- "__copy_from_user:\n"
+asm(".global raw_copy_from_user\n"
+ " .type raw_copy_from_user, @function\n"
+ "raw_copy_from_user:\n"
" movi r2,7\n"
" mov r3,r4\n"
" bge r2,r6,1f\n"
@@ -65,12 +65,12 @@ asm(".global __copy_from_user\n"
".word 7b,13b\n"
".previous\n"
);
-EXPORT_SYMBOL(__copy_from_user);
+EXPORT_SYMBOL(raw_copy_from_user);
asm(
- " .global __copy_to_user\n"
- " .type __copy_to_user, @function\n"
- "__copy_to_user:\n"
+ " .global raw_copy_to_user\n"
+ " .type raw_copy_to_user, @function\n"
+ "raw_copy_to_user:\n"
" movi r2,7\n"
" mov r3,r4\n"
" bge r2,r6,1f\n"
@@ -127,7 +127,7 @@ asm(
".word 11b,13b\n"
".word 12b,13b\n"
".previous\n");
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(raw_copy_to_user);
long strncpy_from_user(char *__to, const char __user *__from, long __len)
{
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 1e95920b0737..38954181fa96 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -28,6 +28,7 @@ config OPENRISC
select OR1K_PIC
select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1
select NO_BOOTMEM
+ select ARCH_HAS_RAW_COPY_USER
config MMU
def_bool y
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index fb01873a5aad..df8e2f7bc7dd 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += dma.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += extable.h
generic-y += fb.h
generic-y += fcntl.h
generic-y += ftrace.h
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index 5fcb9ac72693..f0a5d8b844d6 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -77,7 +77,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
return val;
}
-#define xchg(ptr, with) \
- ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr))))
+#define xchg(ptr, with) \
+ ({ \
+ (__typeof__(*(ptr))) __xchg((unsigned long)(with), \
+ (ptr), \
+ sizeof(*(ptr))); \
+ })
#endif /* __ASM_OPENRISC_CMPXCHG_H */
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 3567aa7be555..ff97374ca069 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -25,6 +25,7 @@
#ifndef __ASM_OPENRISC_PGTABLE_H
#define __ASM_OPENRISC_PGTABLE_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index 227af1acb8bd..a557a7cd0232 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -25,6 +25,7 @@
#include <linux/prefetch.h>
#include <linux/string.h>
#include <asm/page.h>
+#include <asm/extable.h>
/*
* The fs value determines whether argument validity checking should be
@@ -61,23 +62,6 @@
__range_ok((unsigned long)addr, (unsigned long)size)
/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry {
- unsigned long insn, fixup;
-};
-
-/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*
@@ -206,7 +190,7 @@ do { \
case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
- case 8: __get_user_asm2(x, ptr, retval); \
+ case 8: __get_user_asm2(x, ptr, retval); break; \
default: (x) = __get_user_bad(); \
} \
} while (0)
@@ -252,34 +236,18 @@ do { \
extern unsigned long __must_check
__copy_tofrom_user(void *to, const void *from, unsigned long size);
-
-#define __copy_from_user(to, from, size) \
- __copy_tofrom_user(to, from, size)
-#define __copy_to_user(to, from, size) \
- __copy_tofrom_user(to, from, size)
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
static inline unsigned long
-copy_from_user(void *to, const void *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long size)
{
- unsigned long res = n;
-
- if (likely(access_ok(VERIFY_READ, from, n)))
- res = __copy_tofrom_user(to, from, n);
- if (unlikely(res))
- memset(to + (n - res), 0, res);
- return res;
+ return __copy_tofrom_user(to, (__force const void *)from, size);
}
-
static inline unsigned long
-copy_to_user(void *to, const void *from, unsigned long n)
+raw_copy_to_user(void *to, const void __user *from, unsigned long size)
{
- if (likely(access_ok(VERIFY_WRITE, to, n)))
- n = __copy_tofrom_user(to, from, n);
- return n;
+ return __copy_tofrom_user((__force void *)to, from, size);
}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
extern unsigned long __clear_user(void *addr, unsigned long size);
diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c
index 5c4695d13542..ee3e604959e1 100644
--- a/arch/openrisc/kernel/or32_ksyms.c
+++ b/arch/openrisc/kernel/or32_ksyms.c
@@ -30,6 +30,7 @@
#include <asm/hardirq.h>
#include <asm/delay.h>
#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
@@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3);
DECLARE_EXPORT(__ashrdi3);
DECLARE_EXPORT(__ashldi3);
DECLARE_EXPORT(__lshrdi3);
+DECLARE_EXPORT(__ucmpdi2);
+EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(__copy_tofrom_user);
+EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(memset);
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 828a29110459..f8da545854f9 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -90,6 +90,7 @@ void arch_cpu_idle(void)
}
void (*pm_power_off) (void) = machine_power_off;
+EXPORT_SYMBOL(pm_power_off);
/*
* When a process does an "exec", machine state like FPU and debug
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index ad294b3fb90b..15b7b279a169 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -41,6 +41,7 @@ config PARISC
select GENERIC_CLOCKEVENTS
select ARCH_NO_COHERENT_DMA_MMAP
select CPU_NO_EFFICIENT_FFS
+ select ARCH_HAS_RAW_COPY_USER
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 19c9c3c5f267..c7e15cc5c668 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
-/* vmap range flushes and invalidates. Architecturally, we don't need
- * the invalidate, because the CPU should refuse to speculate once an
- * area has been flushed, so invalidate is left empty */
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
-{
- unsigned long start = (unsigned long)vaddr;
-
- flush_kernel_dcache_range_asm(start, start + size);
-}
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
-{
- unsigned long start = (unsigned long)vaddr;
- void *cursor = vaddr;
- for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
- struct page *page = vmalloc_to_page(cursor);
-
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
- flush_kernel_dcache_page(page);
- }
- flush_kernel_dcache_range_asm(start, start + size);
-}
+void flush_kernel_vmap_range(void *vaddr, int size);
+void invalidate_kernel_vmap_range(void *vaddr, int size);
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index a0b461336b6a..852322f2d1f9 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -27,7 +27,8 @@
* that put_user is the same as __put_user, etc.
*/
-#define access_ok(type, uaddr, size) (1)
+#define access_ok(type, uaddr, size) \
+ ( (uaddr) == (uaddr) )
#define put_user __put_user
#define get_user __get_user
@@ -59,6 +60,15 @@ struct exception_table_entry {
".previous\n"
/*
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
+ * (with lowest bit set) for which the fault handler in fixup_exception() will
+ * load -EFAULT into %r8 for a read or write fault, and zeroes the target
+ * register in case of a read fault in get_user().
+ */
+#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
+ ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
+
+/*
* The page fault handler stores, in a per-cpu area, the following information
* if a fixup routine is available.
*/
@@ -85,7 +95,7 @@ struct exception_data {
#define __get_user(x, ptr) \
({ \
register long __gu_err __asm__ ("r8") = 0; \
- register long __gu_val __asm__ ("r9") = 0; \
+ register long __gu_val; \
\
load_sr2(); \
switch (sizeof(*(ptr))) { \
@@ -101,22 +111,23 @@ struct exception_data {
})
#define __get_user_asm(ldx, ptr) \
- __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
+ __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err) \
- : "r1");
+ : "r"(ptr), "1"(__gu_err));
#if !defined(CONFIG_64BIT)
#define __get_user_asm64(ptr) \
- __asm__("\n1:\tldw 0(%%sr2,%2),%0" \
- "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
- ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
+ __asm__(" copy %%r0,%R0\n" \
+ "1: ldw 0(%%sr2,%2),%0\n" \
+ "2: ldw 4(%%sr2,%2),%R0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err) \
- : "r1");
+ : "r"(ptr), "1"(__gu_err));
#endif /* !defined(CONFIG_64BIT) */
@@ -142,32 +153,31 @@ struct exception_data {
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
* instead of writing. This is because they do not write to any memory
* gcc knows about, so there are no aliasing issues. These macros must
- * also be aware that "fixup_put_user_skip_[12]" are executed in the
- * context of the fault, and any registers used there must be listed
- * as clobbers. In this case only "r1" is used by the current routines.
- * r8/r9 are already listed as err/val.
+ * also be aware that fixups are executed in the context of the fault,
+ * and any registers used there must be listed as clobbers.
+ * r8 is already listed as err.
*/
#define __put_user_asm(stx, x, ptr) \
__asm__ __volatile__ ( \
- "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
+ "1: " stx " %2,0(%%sr2,%1)\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__pu_err) \
- : "r"(ptr), "r"(x), "0"(__pu_err) \
- : "r1")
+ : "r"(ptr), "r"(x), "0"(__pu_err))
#if !defined(CONFIG_64BIT)
#define __put_user_asm64(__val, ptr) do { \
__asm__ __volatile__ ( \
- "\n1:\tstw %2,0(%%sr2,%1)" \
- "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
- ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
+ "1: stw %2,0(%%sr2,%1)\n" \
+ "2: stw %R2,4(%%sr2,%1)\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=r"(__pu_err) \
- : "r"(ptr), "r"(__val), "0"(__pu_err) \
- : "r1"); \
+ : "r"(ptr), "r"(__val), "0"(__pu_err)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */
@@ -177,9 +187,6 @@ struct exception_data {
* Complex access routines -- external declarations
*/
-extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
-extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
-extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
extern long strncpy_from_user(char *, const char __user *, long);
extern unsigned lclear_user(void __user *, unsigned long);
extern long lstrnlen_user(const char __user *, long);
@@ -193,59 +200,14 @@ extern long lstrnlen_user(const char __user *, long);
#define clear_user lclear_user
#define __clear_user lclear_user
-unsigned long __must_check __copy_to_user(void __user *dst, const void *src,
- unsigned long len);
-unsigned long __must_check __copy_from_user(void *dst, const void __user *src,
- unsigned long len);
-unsigned long copy_in_user(void __user *dst, const void __user *src,
- unsigned long len);
-#define __copy_in_user copy_in_user
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
-extern void __compiletime_error("usercopy buffer size is too small")
-__bad_copy_user(void);
-
-static inline void copy_user_overflow(int size, unsigned long count)
-{
- WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
-}
-
-static __always_inline unsigned long __must_check
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- int sz = __compiletime_object_size(to);
- unsigned long ret = n;
-
- if (likely(sz < 0 || sz >= n)) {
- check_object_size(to, n, false);
- ret = __copy_from_user(to, from, n);
- } else if (!__builtin_constant_p(n))
- copy_user_overflow(sz, n);
- else
- __bad_copy_user();
-
- if (unlikely(ret))
- memset(to + (n - ret), 0, ret);
-
- return ret;
-}
-
-static __always_inline unsigned long __must_check
-copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- int sz = __compiletime_object_size(from);
-
- if (likely(sz < 0 || sz >= n)) {
- check_object_size(from, n, true);
- n = __copy_to_user(to, from, n);
- } else if (!__builtin_constant_p(n))
- copy_user_overflow(sz, n);
- else
- __bad_copy_user();
-
- return n;
-}
+unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src,
+ unsigned long len);
+unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
+ unsigned long len);
+unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src,
+ unsigned long len);
+#define INLINE_COPY_TO_USER
+#define INLINE_COPY_FROM_USER
struct pt_regs;
int fixup_exception(struct pt_regs *regs);
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 6b0741e7a7ed..667c99421003 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -362,8 +362,9 @@
#define __NR_copy_file_range (__NR_Linux + 346)
#define __NR_preadv2 (__NR_Linux + 347)
#define __NR_pwritev2 (__NR_Linux + 348)
+#define __NR_statx (__NR_Linux + 349)
-#define __NR_Linux_syscalls (__NR_pwritev2 + 1)
+#define __NR_Linux_syscalls (__NR_statx + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 0dc72d5de861..c32a09095216 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
+
+void flush_kernel_vmap_range(void *vaddr, int size)
+{
+ unsigned long start = (unsigned long)vaddr;
+
+ if ((unsigned long)size > parisc_cache_flush_threshold)
+ flush_data_cache();
+ else
+ flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(flush_kernel_vmap_range);
+
+void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+ unsigned long start = (unsigned long)vaddr;
+
+ if ((unsigned long)size > parisc_cache_flush_threshold)
+ flush_data_cache();
+ else
+ flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(invalidate_kernel_vmap_range);
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index a0ecdb4abcc8..c66c943d9322 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
*/
*loc = fsel(val, addend);
break;
+ case R_PARISC_SECREL32:
+ /* 32-bit section relative address. */
+ *loc = fsel(val, addend);
+ break;
case R_PARISC_DPREL21L:
/* left 21 bit of relative address */
val = lrsel(val - dp, addend);
@@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
*/
*loc = fsel(val, addend);
break;
+ case R_PARISC_SECREL32:
+ /* 32-bit section relative address. */
+ *loc = fsel(val, addend);
+ break;
case R_PARISC_FPTR64:
/* 64-bit function address */
if(in_local(me, (void *)(val + addend))) {
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 7484b3d11e0d..c6d6272a934f 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -47,16 +47,6 @@ EXPORT_SYMBOL(__cmpxchg_u64);
EXPORT_SYMBOL(lclear_user);
EXPORT_SYMBOL(lstrnlen_user);
-/* Global fixups - defined as int to avoid creation of function pointers */
-extern int fixup_get_user_skip_1;
-extern int fixup_get_user_skip_2;
-extern int fixup_put_user_skip_1;
-extern int fixup_put_user_skip_2;
-EXPORT_SYMBOL(fixup_get_user_skip_1);
-EXPORT_SYMBOL(fixup_get_user_skip_2);
-EXPORT_SYMBOL(fixup_put_user_skip_1);
-EXPORT_SYMBOL(fixup_put_user_skip_2);
-
#ifndef CONFIG_64BIT
/* Needed so insmod can set dp value */
extern int $global$;
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index e282a5131d77..6017a5af2e6e 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -39,7 +39,7 @@
* the PDC INTRIGUE calls. This is done to eliminate bugs introduced
* in various PDC revisions. The code is much more maintainable
* and reliable this way vs having to debug on every version of PDC
- * on every box.
+ * on every box.
*/
#include <linux/capability.h>
@@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
static int perf_release(struct inode *inode, struct file *file);
static int perf_open(struct inode *inode, struct file *file);
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
- loff_t *ppos);
+static ssize_t perf_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static void perf_start_counters(void);
static int perf_stop_counters(uint32_t *raddr);
@@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void);
/*
* configure:
*
- * Configure the cpu with a given data image. First turn off the counters,
+ * Configure the cpu with a given data image. First turn off the counters,
* then download the image, then turn the counters back on.
*/
static int perf_config(uint32_t *image_ptr)
@@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr)
error = perf_stop_counters(raddr);
if (error != 0) {
printk("perf_config: perf_stop_counters = %ld\n", error);
- return -EINVAL;
+ return -EINVAL;
}
printk("Preparing to write image\n");
@@ -242,7 +242,7 @@ printk("Preparing to write image\n");
error = perf_write_image((uint64_t *)image_ptr);
if (error != 0) {
printk("perf_config: DOWNLOAD = %ld\n", error);
- return -EINVAL;
+ return -EINVAL;
}
printk("Preparing to start counters\n");
@@ -254,7 +254,7 @@ printk("Preparing to start counters\n");
}
/*
- * Open the device and initialize all of its memory. The device is only
+ * Open the device and initialize all of its memory. The device is only
* opened once, but can be "queried" by multiple processes that know its
* file descriptor.
*/
@@ -298,19 +298,19 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
* called on the processor that the download should happen
* on.
*/
-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
- loff_t *ppos)
+static ssize_t perf_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
size_t image_size;
uint32_t image_type;
uint32_t interface_type;
uint32_t test;
- if (perf_processor_interface == ONYX_INTF)
+ if (perf_processor_interface == ONYX_INTF)
image_size = PCXU_IMAGE_SIZE;
- else if (perf_processor_interface == CUDA_INTF)
+ else if (perf_processor_interface == CUDA_INTF)
image_size = PCXW_IMAGE_SIZE;
- else
+ else
return -EFAULT;
if (!capable(CAP_SYS_ADMIN))
@@ -330,22 +330,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
/* First check the machine type is correct for
the requested image */
- if (((perf_processor_interface == CUDA_INTF) &&
- (interface_type != CUDA_INTF)) ||
- ((perf_processor_interface == ONYX_INTF) &&
- (interface_type != ONYX_INTF)))
+ if (((perf_processor_interface == CUDA_INTF) &&
+ (interface_type != CUDA_INTF)) ||
+ ((perf_processor_interface == ONYX_INTF) &&
+ (interface_type != ONYX_INTF)))
return -EINVAL;
/* Next check to make sure the requested image
is valid */
- if (((interface_type == CUDA_INTF) &&
+ if (((interface_type == CUDA_INTF) &&
(test >= MAX_CUDA_IMAGES)) ||
- ((interface_type == ONYX_INTF) &&
- (test >= MAX_ONYX_IMAGES)))
+ ((interface_type == ONYX_INTF) &&
+ (test >= MAX_ONYX_IMAGES)))
return -EINVAL;
/* Copy the image into the processor */
- if (interface_type == CUDA_INTF)
+ if (interface_type == CUDA_INTF)
return perf_config(cuda_images[test]);
else
return perf_config(onyx_images[test]);
@@ -359,7 +359,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
static void perf_patch_images(void)
{
#if 0 /* FIXME!! */
-/*
+/*
* NOTE: this routine is VERY specific to the current TLB image.
* If the image is changed, this routine might also need to be changed.
*/
@@ -367,9 +367,9 @@ static void perf_patch_images(void)
extern void $i_dtlb_miss_2_0();
extern void PA2_0_iva();
- /*
+ /*
* We can only use the lower 32-bits, the upper 32-bits should be 0
- * anyway given this is in the kernel
+ * anyway given this is in the kernel
*/
uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
@@ -377,21 +377,21 @@ static void perf_patch_images(void)
if (perf_processor_interface == ONYX_INTF) {
/* clear last 2 bytes */
- onyx_images[TLBMISS][15] &= 0xffffff00;
+ onyx_images[TLBMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBMISS][17] = itlb_addr;
/* clear last 2 bytes */
- onyx_images[TLBHANDMISS][15] &= 0xffffff00;
+ onyx_images[TLBHANDMISS][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
onyx_images[TLBHANDMISS][17] = itlb_addr;
/* clear last 2 bytes */
- onyx_images[BIG_CPI][15] &= 0xffffff00;
+ onyx_images[BIG_CPI][15] &= 0xffffff00;
/* set 2 bytes */
onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
@@ -404,24 +404,24 @@ static void perf_patch_images(void)
} else if (perf_processor_interface == CUDA_INTF) {
/* Cuda interface */
- cuda_images[TLBMISS][16] =
+ cuda_images[TLBMISS][16] =
(cuda_images[TLBMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[TLBMISS][17] =
+ cuda_images[TLBMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
- cuda_images[TLBHANDMISS][16] =
+ cuda_images[TLBHANDMISS][16] =
(cuda_images[TLBHANDMISS][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[TLBHANDMISS][17] =
+ cuda_images[TLBHANDMISS][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
- cuda_images[BIG_CPI][16] =
+ cuda_images[BIG_CPI][16] =
(cuda_images[BIG_CPI][16]&0xffff0000) |
((dtlb_addr >> 8)&0x0000ffff);
- cuda_images[BIG_CPI][17] =
+ cuda_images[BIG_CPI][17] =
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
} else {
@@ -433,7 +433,7 @@ static void perf_patch_images(void)
/*
* ioctl routine
- * All routines effect the processor that they are executed on. Thus you
+ * All routines effect the processor that they are executed on. Thus you
* must be running on the processor that you wish to change.
*/
@@ -459,7 +459,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
/* copy out the Counters */
- if (copy_to_user((void __user *)arg, raddr,
+ if (copy_to_user((void __user *)arg, raddr,
sizeof (raddr)) != 0) {
error = -EFAULT;
break;
@@ -487,7 +487,7 @@ static const struct file_operations perf_fops = {
.open = perf_open,
.release = perf_release
};
-
+
static struct miscdevice perf_dev = {
MISC_DYNAMIC_MINOR,
PA_PERF_DEV,
@@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr)
/* OR sticky2 (bit 1496) to counter2 bit 32 */
tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
raddr[2] = (uint32_t)tmp64;
-
+
/* Counter3 is bits 1497 to 1528 */
tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
/* OR sticky3 (bit 1529) to counter3 bit 32 */
@@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr)
userbuf[22] = 0;
userbuf[23] = 0;
- /*
+ /*
* Write back the zeroed bytes + the image given
* the read was destructive.
*/
@@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr)
} else {
/*
- * Read RDR-15 which contains the counters and sticky bits
+ * Read RDR-15 which contains the counters and sticky bits
*/
if (!perf_rdr_read_ubuf(15, userbuf)) {
return -13;
}
- /*
+ /*
* Clear out the counters
*/
perf_rdr_clear(15);
@@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr)
raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
}
-
+
return 0;
}
@@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
i = tentry->num_words;
while (i--) {
buffer[i] = 0;
- }
+ }
/* Check for bits an even number of 64 */
if ((xbits = width & 0x03f) != 0) {
@@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr)
}
runway = ioremap_nocache(cpu_device->hpa.start, 4096);
+ if (!runway) {
+ pr_err("perf_write_image: ioremap failed!\n");
+ return -ENOMEM;
+ }
/* Merge intrigue bits into Runway STATUS 0 */
tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
- __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
+ __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
runway + RUNWAY_STATUS);
-
+
/* Write RUNWAY DEBUG registers */
for (i = 0; i < 8; i++) {
__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
}
- return 0;
+ return 0;
}
/*
@@ -843,7 +847,7 @@ printk("perf_rdr_write\n");
perf_rdr_shift_out_U(rdr_num, buffer[i]);
} else {
perf_rdr_shift_out_W(rdr_num, buffer[i]);
- }
+ }
}
printk("perf_rdr_write done\n");
}
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 06f7ca7fe70b..4516a5b53f38 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -142,6 +142,10 @@ void machine_power_off(void)
printk(KERN_EMERG "System shut down completed.\n"
"Please power this system off now.");
+
+ /* prevent soft lockup/stalled CPU messages for endless loop. */
+ rcu_sysrq_start();
+ for (;;);
}
void (*pm_power_off)(void) = machine_power_off;
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 3cfef1de8061..44aeaa9c039f 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -444,6 +444,7 @@
ENTRY_SAME(copy_file_range)
ENTRY_COMP(preadv2)
ENTRY_COMP(pwritev2)
+ ENTRY_SAME(statx)
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
index 8fa92b8d839a..f2dac4d73b1b 100644
--- a/arch/parisc/lib/Makefile
+++ b/arch/parisc/lib/Makefile
@@ -2,7 +2,7 @@
# Makefile for parisc-specific library files
#
-lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
+lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
ucmpdi2.o delay.o
obj-y := iomap.o
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
deleted file mode 100644
index a5b72f22c7a6..000000000000
--- a/arch/parisc/lib/fixup.S
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Linux/PA-RISC Project (http://www.parisc-linux.org/)
- *
- * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Fixup routines for kernel exception handling.
- */
-#include <asm/asm-offsets.h>
-#include <asm/assembly.h>
-#include <asm/errno.h>
-#include <linux/linkage.h>
-
-#ifdef CONFIG_SMP
- .macro get_fault_ip t1 t2
- loadgp
- addil LT%__per_cpu_offset,%r27
- LDREG RT%__per_cpu_offset(%r1),\t1
- /* t2 = smp_processor_id() */
- mfctl 30,\t2
- ldw TI_CPU(\t2),\t2
-#ifdef CONFIG_64BIT
- extrd,u \t2,63,32,\t2
-#endif
- /* t2 = &__per_cpu_offset[smp_processor_id()]; */
- LDREGX \t2(\t1),\t2
- addil LT%exception_data,%r27
- LDREG RT%exception_data(%r1),\t1
- /* t1 = this_cpu_ptr(&exception_data) */
- add,l \t1,\t2,\t1
- /* %r27 = t1->fault_gp - restore gp */
- LDREG EXCDATA_GP(\t1), %r27
- /* t1 = t1->fault_ip */
- LDREG EXCDATA_IP(\t1), \t1
- .endm
-#else
- .macro get_fault_ip t1 t2
- loadgp
- /* t1 = this_cpu_ptr(&exception_data) */
- addil LT%exception_data,%r27
- LDREG RT%exception_data(%r1),\t2
- /* %r27 = t2->fault_gp - restore gp */
- LDREG EXCDATA_GP(\t2), %r27
- /* t1 = t2->fault_ip */
- LDREG EXCDATA_IP(\t2), \t1
- .endm
-#endif
-
- .level LEVEL
-
- .text
- .section .fixup, "ax"
-
- /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
-ENTRY_CFI(fixup_get_user_skip_1)
- get_fault_ip %r1,%r8
- ldo 4(%r1), %r1
- ldi -EFAULT, %r8
- bv %r0(%r1)
- copy %r0, %r9
-ENDPROC_CFI(fixup_get_user_skip_1)
-
-ENTRY_CFI(fixup_get_user_skip_2)
- get_fault_ip %r1,%r8
- ldo 8(%r1), %r1
- ldi -EFAULT, %r8
- bv %r0(%r1)
- copy %r0, %r9
-ENDPROC_CFI(fixup_get_user_skip_2)
-
- /* put_user() fixups, store -EFAULT in r8 */
-ENTRY_CFI(fixup_put_user_skip_1)
- get_fault_ip %r1,%r8
- ldo 4(%r1), %r1
- bv %r0(%r1)
- ldi -EFAULT, %r8
-ENDPROC_CFI(fixup_put_user_skip_1)
-
-ENTRY_CFI(fixup_put_user_skip_2)
- get_fault_ip %r1,%r8
- ldo 8(%r1), %r1
- bv %r0(%r1)
- ldi -EFAULT, %r8
-ENDPROC_CFI(fixup_put_user_skip_2)
-
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index 56845de6b5df..f01188c044ee 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -5,6 +5,8 @@
* Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
* Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
+ * Copyright (C) 2017 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
*
*
* This program is free software; you can redistribute it and/or modify
@@ -132,4 +134,320 @@ ENDPROC_CFI(lstrnlen_user)
.procend
+
+
+/*
+ * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+ *
+ * Inputs:
+ * - sr1 already contains space of source region
+ * - sr2 already contains space of destination region
+ *
+ * Returns:
+ * - number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * This code is based on a C-implementation of a copy routine written by
+ * Randolph Chung, which in turn was derived from the glibc.
+ *
+ * Several strategies are tried to try to get the best performance for various
+ * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
+ * at a time using general registers. Unaligned copies are handled either by
+ * aligning the destination and then using shift-and-write method, or in a few
+ * cases by falling back to a byte-at-a-time copy.
+ *
+ * Testing with various alignments and buffer sizes shows that this code is
+ * often >10x faster than a simple byte-at-a-time copy, even for strangely
+ * aligned operands. It is interesting to note that the glibc version of memcpy
+ * (written in C) is actually quite fast already. This routine is able to beat
+ * it by 30-40% for aligned copies because of the loop unrolling, but in some
+ * cases the glibc version is still slightly faster. This lends more
+ * credibility that gcc can generate very good code as long as we are careful.
+ *
+ * Possible optimizations:
+ * - add cache prefetching
+ * - try not to use the post-increment address modifiers; they may create
+ * additional interlocks. Assumption is that those were only efficient on old
+ * machines (pre PA8000 processors)
+ */
+
+ dst = arg0
+ src = arg1
+ len = arg2
+ end = arg3
+ t1 = r19
+ t2 = r20
+ t3 = r21
+ t4 = r22
+ srcspc = sr1
+ dstspc = sr2
+
+ t0 = r1
+ a1 = t1
+ a2 = t2
+ a3 = t3
+ a0 = t4
+
+ save_src = ret0
+ save_dst = ret1
+ save_len = r31
+
+ENTRY_CFI(pa_memcpy)
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ /* Last destination address */
+ add dst,len,end
+
+ /* short copy with less than 16 bytes? */
+ cmpib,>>=,n 15,len,.Lbyte_loop
+
+ /* same alignment? */
+ xor src,dst,t0
+ extru t0,31,2,t1
+ cmpib,<>,n 0,t1,.Lunaligned_copy
+
+#ifdef CONFIG_64BIT
+ /* only do 64-bit copies if we can get aligned. */
+ extru t0,31,3,t1
+ cmpib,<>,n 0,t1,.Lalign_loop32
+
+ /* loop until we are 64-bit aligned */
+.Lalign_loop64:
+ extru dst,31,3,t1
+ cmpib,=,n 0,t1,.Lcopy_loop_16
+20: ldb,ma 1(srcspc,src),t1
+21: stb,ma t1,1(dstspc,dst)
+ b .Lalign_loop64
+ ldo -1(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+ ldi 31,t0
+.Lcopy_loop_16:
+ cmpb,COND(>>=),n t0,len,.Lword_loop
+
+10: ldd 0(srcspc,src),t1
+11: ldd 8(srcspc,src),t2
+ ldo 16(src),src
+12: std,ma t1,8(dstspc,dst)
+13: std,ma t2,8(dstspc,dst)
+14: ldd 0(srcspc,src),t1
+15: ldd 8(srcspc,src),t2
+ ldo 16(src),src
+16: std,ma t1,8(dstspc,dst)
+17: std,ma t2,8(dstspc,dst)
+
+ ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
+ ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
+ ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+ b .Lcopy_loop_16
+ ldo -32(len),len
+
+.Lword_loop:
+ cmpib,COND(>>=),n 3,len,.Lbyte_loop
+20: ldw,ma 4(srcspc,src),t1
+21: stw,ma t1,4(dstspc,dst)
+ b .Lword_loop
+ ldo -4(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+#endif /* CONFIG_64BIT */
+
+ /* loop until we are 32-bit aligned */
+.Lalign_loop32:
+ extru dst,31,2,t1
+ cmpib,=,n 0,t1,.Lcopy_loop_4
+20: ldb,ma 1(srcspc,src),t1
+21: stb,ma t1,1(dstspc,dst)
+ b .Lalign_loop32
+ ldo -1(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+
+.Lcopy_loop_4:
+ cmpib,COND(>>=),n 15,len,.Lbyte_loop
+
+10: ldw 0(srcspc,src),t1
+11: ldw 4(srcspc,src),t2
+12: stw,ma t1,4(dstspc,dst)
+13: stw,ma t2,4(dstspc,dst)
+14: ldw 8(srcspc,src),t1
+15: ldw 12(srcspc,src),t2
+ ldo 16(src),src
+16: stw,ma t1,4(dstspc,dst)
+17: stw,ma t2,4(dstspc,dst)
+
+ ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
+ ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
+ ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+ b .Lcopy_loop_4
+ ldo -16(len),len
+
+.Lbyte_loop:
+ cmpclr,COND(<>) len,%r0,%r0
+ b,n .Lcopy_done
+20: ldb 0(srcspc,src),t1
+ ldo 1(src),src
+21: stb,ma t1,1(dstspc,dst)
+ b .Lbyte_loop
+ ldo -1(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_done:
+ bv %r0(%r2)
+ sub end,dst,ret0
+
+
+ /* src and dst are not aligned the same way. */
+ /* need to go the hard way */
+.Lunaligned_copy:
+ /* align until dst is 32bit-word-aligned */
+ extru dst,31,2,t1
+ cmpib,COND(=),n 0,t1,.Lcopy_dstaligned
+20: ldb 0(srcspc,src),t1
+ ldo 1(src),src
+21: stb,ma t1,1(dstspc,dst)
+ b .Lunaligned_copy
+ ldo -1(len),len
+
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_dstaligned:
+
+ /* store src, dst and len in safe place */
+ copy src,save_src
+ copy dst,save_dst
+ copy len,save_len
+
+ /* len now needs give number of words to copy */
+ SHRREG len,2,len
+
+ /*
+ * Copy from a not-aligned src to an aligned dst using shifts.
+ * Handles 4 words per loop.
+ */
+
+ depw,z src,28,2,t0
+ subi 32,t0,t0
+ mtsar t0
+ extru len,31,2,t0
+ cmpib,= 2,t0,.Lcase2
+ /* Make src aligned by rounding it down. */
+ depi 0,31,2,src
+
+ cmpiclr,<> 3,t0,%r0
+ b,n .Lcase3
+ cmpiclr,<> 1,t0,%r0
+ b,n .Lcase1
+.Lcase0:
+ cmpb,= %r0,len,.Lcda_finish
+ nop
+
+1: ldw,ma 4(srcspc,src), a3
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1: ldw,ma 4(srcspc,src), a0
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ b,n .Ldo3
+.Lcase1:
+1: ldw,ma 4(srcspc,src), a2
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1: ldw,ma 4(srcspc,src), a3
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ ldo -1(len),len
+ cmpb,=,n %r0,len,.Ldo0
+.Ldo4:
+1: ldw,ma 4(srcspc,src), a0
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ shrpw a2, a3, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo3:
+1: ldw,ma 4(srcspc,src), a1
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ shrpw a3, a0, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo2:
+1: ldw,ma 4(srcspc,src), a2
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ shrpw a0, a1, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo1:
+1: ldw,ma 4(srcspc,src), a3
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ shrpw a1, a2, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+ ldo -4(len),len
+ cmpb,<> %r0,len,.Ldo4
+ nop
+.Ldo0:
+ shrpw a2, a3, %sar, t0
+1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+
+.Lcda_rdfault:
+.Lcda_finish:
+ /* calculate new src, dst and len and jump to byte-copy loop */
+ sub dst,save_dst,t0
+ add save_src,t0,src
+ b .Lbyte_loop
+ sub save_len,t0,len
+
+.Lcase3:
+1: ldw,ma 4(srcspc,src), a0
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1: ldw,ma 4(srcspc,src), a1
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ b .Ldo2
+ ldo 1(len),len
+.Lcase2:
+1: ldw,ma 4(srcspc,src), a1
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1: ldw,ma 4(srcspc,src), a2
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ b .Ldo1
+ ldo 2(len),len
+
+
+ /* fault exception fixup handlers: */
+#ifdef CONFIG_64BIT
+.Lcopy16_fault:
+10: b .Lcopy_done
+ std,ma t1,8(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+#endif
+
+.Lcopy8_fault:
+10: b .Lcopy_done
+ stw,ma t1,4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+
+ .exit
+ENDPROC_CFI(pa_memcpy)
+ .procend
+
.end
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index 66f5160136c2..99115cd9e790 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -2,7 +2,7 @@
* Optimized memory copy routines.
*
* Copyright (C) 2004 Randolph Chung <tausq@debian.org>
- * Copyright (C) 2013 Helge Deller <deller@gmx.de>
+ * Copyright (C) 2013-2017 Helge Deller <deller@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,493 +21,40 @@
* Portions derived from the GNU C Library
* Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
*
- * Several strategies are tried to try to get the best performance for various
- * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using
- * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using
- * general registers. Unaligned copies are handled either by aligning the
- * destination and then using shift-and-write method, or in a few cases by
- * falling back to a byte-at-a-time copy.
- *
- * I chose to implement this in C because it is easier to maintain and debug,
- * and in my experiments it appears that the C code generated by gcc (3.3/3.4
- * at the time of writing) is fairly optimal. Unfortunately some of the
- * semantics of the copy routine (exception handling) is difficult to express
- * in C, so we have to play some tricks to get it to work.
- *
- * All the loads and stores are done via explicit asm() code in order to use
- * the right space registers.
- *
- * Testing with various alignments and buffer sizes shows that this code is
- * often >10x faster than a simple byte-at-a-time copy, even for strangely
- * aligned operands. It is interesting to note that the glibc version
- * of memcpy (written in C) is actually quite fast already. This routine is
- * able to beat it by 30-40% for aligned copies because of the loop unrolling,
- * but in some cases the glibc version is still slightly faster. This lends
- * more credibility that gcc can generate very good code as long as we are
- * careful.
- *
- * TODO:
- * - cache prefetching needs more experimentation to get optimal settings
- * - try not to use the post-increment address modifiers; they create additional
- * interlocks
- * - replace byte-copy loops with stybs sequences
*/
-#ifdef __KERNEL__
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
-#define s_space "%%sr1"
-#define d_space "%%sr2"
-#else
-#include "memcpy.h"
-#define s_space "%%sr0"
-#define d_space "%%sr0"
-#define pa_memcpy new2_copy
-#endif
DECLARE_PER_CPU(struct exception_data, exception_data);
-#define preserve_branch(label) do { \
- volatile int dummy = 0; \
- /* The following branch is never taken, it's just here to */ \
- /* prevent gcc from optimizing away our exception code. */ \
- if (unlikely(dummy != dummy)) \
- goto label; \
-} while (0)
-
#define get_user_space() (uaccess_kernel() ? 0 : mfsp(3))
#define get_kernel_space() (0)
-#define MERGE(w0, sh_1, w1, sh_2) ({ \
- unsigned int _r; \
- asm volatile ( \
- "mtsar %3\n" \
- "shrpw %1, %2, %%sar, %0\n" \
- : "=r"(_r) \
- : "r"(w0), "r"(w1), "r"(sh_2) \
- ); \
- _r; \
-})
-#define THRESHOLD 16
-
-#ifdef DEBUG_MEMCPY
-#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
-#else
-#define DPRINTF(fmt, args...)
-#endif
-
-#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
- __asm__ __volatile__ ( \
- "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
- : _tt(_t), "+r"(_a) \
- : \
- : "r8")
-
-#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
- __asm__ __volatile__ ( \
- "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
- : "+r"(_a) \
- : _tt(_t) \
- : "r8")
-
-#define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e)
-#define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e)
-#define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e)
-#define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e)
-#define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e)
-#define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e)
-
-#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \
- __asm__ __volatile__ ( \
- "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
- : _tt(_t) \
- : "r"(_a) \
- : "r8")
-
-#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \
- __asm__ __volatile__ ( \
- "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
- : \
- : _tt(_t), "r"(_a) \
- : "r8")
-
-#define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e)
-#define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e)
-
-#ifdef CONFIG_PREFETCH
-static inline void prefetch_src(const void *addr)
-{
- __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
-}
-
-static inline void prefetch_dst(const void *addr)
-{
- __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
-}
-#else
-#define prefetch_src(addr) do { } while(0)
-#define prefetch_dst(addr) do { } while(0)
-#endif
-
-#define PA_MEMCPY_OK 0
-#define PA_MEMCPY_LOAD_ERROR 1
-#define PA_MEMCPY_STORE_ERROR 2
-
-/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
- * per loop. This code is derived from glibc.
- */
-static noinline unsigned long copy_dstaligned(unsigned long dst,
- unsigned long src, unsigned long len)
-{
- /* gcc complains that a2 and a3 may be uninitialized, but actually
- * they cannot be. Initialize a2/a3 to shut gcc up.
- */
- register unsigned int a0, a1, a2 = 0, a3 = 0;
- int sh_1, sh_2;
-
- /* prefetch_src((const void *)src); */
-
- /* Calculate how to shift a word read at the memory operation
- aligned srcp to make it aligned for copy. */
- sh_1 = 8 * (src % sizeof(unsigned int));
- sh_2 = 8 * sizeof(unsigned int) - sh_1;
-
- /* Make src aligned by rounding it down. */
- src &= -sizeof(unsigned int);
-
- switch (len % 4)
- {
- case 2:
- /* a1 = ((unsigned int *) src)[0];
- a2 = ((unsigned int *) src)[1]; */
- ldw(s_space, 0, src, a1, cda_ldw_exc);
- ldw(s_space, 4, src, a2, cda_ldw_exc);
- src -= 1 * sizeof(unsigned int);
- dst -= 3 * sizeof(unsigned int);
- len += 2;
- goto do1;
- case 3:
- /* a0 = ((unsigned int *) src)[0];
- a1 = ((unsigned int *) src)[1]; */
- ldw(s_space, 0, src, a0, cda_ldw_exc);
- ldw(s_space, 4, src, a1, cda_ldw_exc);
- src -= 0 * sizeof(unsigned int);
- dst -= 2 * sizeof(unsigned int);
- len += 1;
- goto do2;
- case 0:
- if (len == 0)
- return PA_MEMCPY_OK;
- /* a3 = ((unsigned int *) src)[0];
- a0 = ((unsigned int *) src)[1]; */
- ldw(s_space, 0, src, a3, cda_ldw_exc);
- ldw(s_space, 4, src, a0, cda_ldw_exc);
- src -=-1 * sizeof(unsigned int);
- dst -= 1 * sizeof(unsigned int);
- len += 0;
- goto do3;
- case 1:
- /* a2 = ((unsigned int *) src)[0];
- a3 = ((unsigned int *) src)[1]; */
- ldw(s_space, 0, src, a2, cda_ldw_exc);
- ldw(s_space, 4, src, a3, cda_ldw_exc);
- src -=-2 * sizeof(unsigned int);
- dst -= 0 * sizeof(unsigned int);
- len -= 1;
- if (len == 0)
- goto do0;
- goto do4; /* No-op. */
- }
-
- do
- {
- /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */
-do4:
- /* a0 = ((unsigned int *) src)[0]; */
- ldw(s_space, 0, src, a0, cda_ldw_exc);
- /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
- stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
-do3:
- /* a1 = ((unsigned int *) src)[1]; */
- ldw(s_space, 4, src, a1, cda_ldw_exc);
- /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */
- stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc);
-do2:
- /* a2 = ((unsigned int *) src)[2]; */
- ldw(s_space, 8, src, a2, cda_ldw_exc);
- /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */
- stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc);
-do1:
- /* a3 = ((unsigned int *) src)[3]; */
- ldw(s_space, 12, src, a3, cda_ldw_exc);
- /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */
- stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc);
-
- src += 4 * sizeof(unsigned int);
- dst += 4 * sizeof(unsigned int);
- len -= 4;
- }
- while (len != 0);
-
-do0:
- /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
- stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
-
- preserve_branch(handle_load_error);
- preserve_branch(handle_store_error);
-
- return PA_MEMCPY_OK;
-
-handle_load_error:
- __asm__ __volatile__ ("cda_ldw_exc:\n");
- return PA_MEMCPY_LOAD_ERROR;
-
-handle_store_error:
- __asm__ __volatile__ ("cda_stw_exc:\n");
- return PA_MEMCPY_STORE_ERROR;
-}
-
-
-/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
- * In case of an access fault the faulty address can be read from the per_cpu
- * exception data struct. */
-static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
- unsigned long len)
-{
- register unsigned long src, dst, t1, t2, t3;
- register unsigned char *pcs, *pcd;
- register unsigned int *pws, *pwd;
- register double *pds, *pdd;
- unsigned long ret;
-
- src = (unsigned long)srcp;
- dst = (unsigned long)dstp;
- pcs = (unsigned char *)srcp;
- pcd = (unsigned char *)dstp;
-
- /* prefetch_src((const void *)srcp); */
-
- if (len < THRESHOLD)
- goto byte_copy;
-
- /* Check alignment */
- t1 = (src ^ dst);
- if (unlikely(t1 & (sizeof(double)-1)))
- goto unaligned_copy;
-
- /* src and dst have same alignment. */
-
- /* Copy bytes till we are double-aligned. */
- t2 = src & (sizeof(double) - 1);
- if (unlikely(t2 != 0)) {
- t2 = sizeof(double) - t2;
- while (t2 && len) {
- /* *pcd++ = *pcs++; */
- ldbma(s_space, pcs, t3, pmc_load_exc);
- len--;
- stbma(d_space, t3, pcd, pmc_store_exc);
- t2--;
- }
- }
-
- pds = (double *)pcs;
- pdd = (double *)pcd;
-
-#if 0
- /* Copy 8 doubles at a time */
- while (len >= 8*sizeof(double)) {
- register double r1, r2, r3, r4, r5, r6, r7, r8;
- /* prefetch_src((char *)pds + L1_CACHE_BYTES); */
- flddma(s_space, pds, r1, pmc_load_exc);
- flddma(s_space, pds, r2, pmc_load_exc);
- flddma(s_space, pds, r3, pmc_load_exc);
- flddma(s_space, pds, r4, pmc_load_exc);
- fstdma(d_space, r1, pdd, pmc_store_exc);
- fstdma(d_space, r2, pdd, pmc_store_exc);
- fstdma(d_space, r3, pdd, pmc_store_exc);
- fstdma(d_space, r4, pdd, pmc_store_exc);
-
-#if 0
- if (L1_CACHE_BYTES <= 32)
- prefetch_src((char *)pds + L1_CACHE_BYTES);
-#endif
- flddma(s_space, pds, r5, pmc_load_exc);
- flddma(s_space, pds, r6, pmc_load_exc);
- flddma(s_space, pds, r7, pmc_load_exc);
- flddma(s_space, pds, r8, pmc_load_exc);
- fstdma(d_space, r5, pdd, pmc_store_exc);
- fstdma(d_space, r6, pdd, pmc_store_exc);
- fstdma(d_space, r7, pdd, pmc_store_exc);
- fstdma(d_space, r8, pdd, pmc_store_exc);
- len -= 8*sizeof(double);
- }
-#endif
-
- pws = (unsigned int *)pds;
- pwd = (unsigned int *)pdd;
-
-word_copy:
- while (len >= 8*sizeof(unsigned int)) {
- register unsigned int r1,r2,r3,r4,r5,r6,r7,r8;
- /* prefetch_src((char *)pws + L1_CACHE_BYTES); */
- ldwma(s_space, pws, r1, pmc_load_exc);
- ldwma(s_space, pws, r2, pmc_load_exc);
- ldwma(s_space, pws, r3, pmc_load_exc);
- ldwma(s_space, pws, r4, pmc_load_exc);
- stwma(d_space, r1, pwd, pmc_store_exc);
- stwma(d_space, r2, pwd, pmc_store_exc);
- stwma(d_space, r3, pwd, pmc_store_exc);
- stwma(d_space, r4, pwd, pmc_store_exc);
-
- ldwma(s_space, pws, r5, pmc_load_exc);
- ldwma(s_space, pws, r6, pmc_load_exc);
- ldwma(s_space, pws, r7, pmc_load_exc);
- ldwma(s_space, pws, r8, pmc_load_exc);
- stwma(d_space, r5, pwd, pmc_store_exc);
- stwma(d_space, r6, pwd, pmc_store_exc);
- stwma(d_space, r7, pwd, pmc_store_exc);
- stwma(d_space, r8, pwd, pmc_store_exc);
- len -= 8*sizeof(unsigned int);
- }
-
- while (len >= 4*sizeof(unsigned int)) {
- register unsigned int r1,r2,r3,r4;
- ldwma(s_space, pws, r1, pmc_load_exc);
- ldwma(s_space, pws, r2, pmc_load_exc);
- ldwma(s_space, pws, r3, pmc_load_exc);
- ldwma(s_space, pws, r4, pmc_load_exc);
- stwma(d_space, r1, pwd, pmc_store_exc);
- stwma(d_space, r2, pwd, pmc_store_exc);
- stwma(d_space, r3, pwd, pmc_store_exc);
- stwma(d_space, r4, pwd, pmc_store_exc);
- len -= 4*sizeof(unsigned int);
- }
-
- pcs = (unsigned char *)pws;
- pcd = (unsigned char *)pwd;
-
-byte_copy:
- while (len) {
- /* *pcd++ = *pcs++; */
- ldbma(s_space, pcs, t3, pmc_load_exc);
- stbma(d_space, t3, pcd, pmc_store_exc);
- len--;
- }
-
- return PA_MEMCPY_OK;
-
-unaligned_copy:
- /* possibly we are aligned on a word, but not on a double... */
- if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
- t2 = src & (sizeof(unsigned int) - 1);
-
- if (unlikely(t2 != 0)) {
- t2 = sizeof(unsigned int) - t2;
- while (t2) {
- /* *pcd++ = *pcs++; */
- ldbma(s_space, pcs, t3, pmc_load_exc);
- stbma(d_space, t3, pcd, pmc_store_exc);
- len--;
- t2--;
- }
- }
-
- pws = (unsigned int *)pcs;
- pwd = (unsigned int *)pcd;
- goto word_copy;
- }
-
- /* Align the destination. */
- if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) {
- t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1));
- while (t2) {
- /* *pcd++ = *pcs++; */
- ldbma(s_space, pcs, t3, pmc_load_exc);
- stbma(d_space, t3, pcd, pmc_store_exc);
- len--;
- t2--;
- }
- dst = (unsigned long)pcd;
- src = (unsigned long)pcs;
- }
-
- ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
- if (ret)
- return ret;
-
- pcs += (len & -sizeof(unsigned int));
- pcd += (len & -sizeof(unsigned int));
- len %= sizeof(unsigned int);
-
- preserve_branch(handle_load_error);
- preserve_branch(handle_store_error);
-
- goto byte_copy;
-
-handle_load_error:
- __asm__ __volatile__ ("pmc_load_exc:\n");
- return PA_MEMCPY_LOAD_ERROR;
-
-handle_store_error:
- __asm__ __volatile__ ("pmc_store_exc:\n");
- return PA_MEMCPY_STORE_ERROR;
-}
-
-
/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
-static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
-{
- unsigned long ret, fault_addr, reference;
- struct exception_data *d;
-
- ret = pa_memcpy_internal(dstp, srcp, len);
- if (likely(ret == PA_MEMCPY_OK))
- return 0;
-
- /* if a load or store fault occured we can get the faulty addr */
- d = this_cpu_ptr(&exception_data);
- fault_addr = d->fault_addr;
-
- /* error in load or store? */
- if (ret == PA_MEMCPY_LOAD_ERROR)
- reference = (unsigned long) srcp;
- else
- reference = (unsigned long) dstp;
-
- DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
- ret, len, fault_addr, reference);
+extern unsigned long pa_memcpy(void *dst, const void *src,
+ unsigned long len);
- if (fault_addr >= reference)
- return len - (fault_addr - reference);
- else
- return len;
-}
-
-#ifdef __KERNEL__
-unsigned long __copy_to_user(void __user *dst, const void *src,
- unsigned long len)
+unsigned long raw_copy_to_user(void __user *dst, const void *src,
+ unsigned long len)
{
mtsp(get_kernel_space(), 1);
mtsp(get_user_space(), 2);
return pa_memcpy((void __force *)dst, src, len);
}
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(raw_copy_to_user);
-unsigned long __copy_from_user(void *dst, const void __user *src,
+unsigned long raw_copy_from_user(void *dst, const void __user *src,
unsigned long len)
{
mtsp(get_user_space(), 1);
mtsp(get_kernel_space(), 2);
return pa_memcpy(dst, (void __force *)src, len);
}
-EXPORT_SYMBOL(__copy_from_user);
+EXPORT_SYMBOL(raw_copy_from_user);
-unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len)
+unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long len)
{
mtsp(get_user_space(), 1);
mtsp(get_user_space(), 2);
@@ -523,7 +70,7 @@ void * memcpy(void * dst,const void *src, size_t count)
return dst;
}
-EXPORT_SYMBOL(copy_in_user);
+EXPORT_SYMBOL(raw_copy_in_user);
EXPORT_SYMBOL(memcpy);
long probe_kernel_read(void *dst, const void *src, size_t size)
@@ -537,5 +84,3 @@ long probe_kernel_read(void *dst, const void *src, size_t size)
return __probe_kernel_read(dst, src, size);
}
-
-#endif
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index deab89a8915a..32ec22146141 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -150,6 +150,23 @@ int fixup_exception(struct pt_regs *regs)
d->fault_space = regs->isr;
d->fault_addr = regs->ior;
+ /*
+ * Fix up get_user() and put_user().
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
+ * bit in the relative address of the fixup routine to indicate
+ * that %r8 should be loaded with -EFAULT to report a userspace
+ * access error.
+ */
+ if (fix->fixup & 1) {
+ regs->gr[8] = -EFAULT;
+
+ /* zero target register for get_user() */
+ if (parisc_acctyp(0, regs->iir) == VM_READ) {
+ int treg = regs->iir & 0x1f;
+ regs->gr[treg] = 0;
+ }
+ }
+
regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
regs->iaoq[0] &= ~3;
/*
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 97a8bc8a095c..839f08887269 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -87,6 +87,7 @@ config PPC
select ARCH_HAS_DMA_SET_COHERENT_MASK
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_RAW_COPY_USER
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index 861e72109df2..f080abfc2f83 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -68,6 +68,7 @@ SECTIONS
}
#ifdef CONFIG_PPC64_BOOT_WRAPPER
+ . = ALIGN(256);
.got :
{
__toc_start = .;
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 9fa046d56eba..411994551afc 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
{
u32 *key = crypto_tfm_ctx(tfm);
- *key = 0;
+ *key = ~0;
return 0;
}
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 73eb794d6163..bc5fdfd22788 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -51,6 +51,10 @@
#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
+/* Put a PPC bit into a "normal" bit position */
+#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \
+ ((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
+
#include <asm/barrier.h>
/* Macro for generating the ***_bits() functions */
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 012223638815..26ed228d4dc6 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/book3s/32/hash.h>
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 1eeeb72c7015..8f4d41936e5a 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1,9 +1,12 @@
#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
+#include <asm-generic/5level-fixup.h>
+
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
#endif
+
/*
* Common bits between hash and Radix page table
*/
@@ -347,23 +350,58 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
__r; \
})
+static inline int __pte_write(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+ /*
+ * Saved write ptes are prot none ptes that doesn't have
+ * privileged bit sit. We mark prot none as one which has
+ * present and pviliged bit set and RWX cleared. To mark
+ * protnone which used to have _PAGE_WRITE set we clear
+ * the privileged bit.
+ */
+ return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
+}
+#else
+#define pte_savedwrite pte_savedwrite
+static inline bool pte_savedwrite(pte_t pte)
+{
+ return false;
+}
+#endif
+
+static inline int pte_write(pte_t pte)
+{
+ return __pte_write(pte) || pte_savedwrite(pte);
+}
+
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
- return;
-
- pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+ if (__pte_write(*ptep))
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
+ else if (unlikely(pte_savedwrite(*ptep)))
+ pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
- return;
-
- pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
+ /*
+ * We should not find protnone for hugetlb, but this complete the
+ * interface.
+ */
+ if (__pte_write(*ptep))
+ pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
+ else if (unlikely(pte_savedwrite(*ptep)))
+ pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
}
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
@@ -397,11 +435,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_update(mm, addr, ptep, ~0UL, 0, 0);
}
-static inline int pte_write(pte_t pte)
-{
- return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
-}
-
static inline int pte_dirty(pte_t pte)
{
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
@@ -465,19 +498,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
VM_BUG_ON(!pte_protnone(pte));
return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
}
-
-#define pte_savedwrite pte_savedwrite
-static inline bool pte_savedwrite(pte_t pte)
+#else
+#define pte_clear_savedwrite pte_clear_savedwrite
+static inline pte_t pte_clear_savedwrite(pte_t pte)
{
- /*
- * Saved write ptes are prot none ptes that doesn't have
- * privileged bit sit. We mark prot none as one which has
- * present and pviliged bit set and RWX cleared. To mark
- * protnone which used to have _PAGE_WRITE set we clear
- * the privileged bit.
- */
- VM_BUG_ON(!pte_protnone(pte));
- return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
+ VM_WARN_ON(1);
+ return __pte(pte_val(pte) & ~_PAGE_WRITE);
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -506,6 +532,8 @@ static inline unsigned long pte_pfn(pte_t pte)
/* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte)
{
+ if (unlikely(pte_savedwrite(pte)))
+ return pte_clear_savedwrite(pte);
return __pte(pte_val(pte) & ~_PAGE_WRITE);
}
@@ -926,6 +954,7 @@ static inline int pmd_protnone(pmd_t pmd)
#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+#define __pmd_write(pmd) __pte_write(pmd_pte(pmd))
#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -982,11 +1011,10 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
-
- if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_WRITE)) == 0)
- return;
-
- pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
+ if (__pmd_write((*pmdp)))
+ pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
+ else if (unlikely(pmd_savedwrite(*pmdp)))
+ pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
}
static inline int pmd_trans_huge(pmd_t pmd)
diff --git a/arch/powerpc/include/asm/extable.h b/arch/powerpc/include/asm/extable.h
new file mode 100644
index 000000000000..07cc45cd86d9
--- /dev/null
+++ b/arch/powerpc/include/asm/extable.h
@@ -0,0 +1,29 @@
+#ifndef _ARCH_POWERPC_EXTABLE_H
+#define _ARCH_POWERPC_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of relative addresses: the first is
+ * the address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out what
+ * to do.
+ *
+ * All the routines below use bits of fixup code that are out of line with the
+ * main instruction path. This means when everything is well, we don't even
+ * have to jump over them. Further, they do not intrude on our cache or tlb
+ * entries.
+ */
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+struct exception_table_entry {
+ int insn;
+ int fixup;
+};
+
+static inline unsigned long extable_fixup(const struct exception_table_entry *x)
+{
+ return (unsigned long)&x->fixup + x->fixup;
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index f97d8cb6bdf6..ed62efe01e49 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -66,6 +66,55 @@
#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
P8_DSISR_MC_ERAT_MULTIHIT_SEC)
+
+/*
+ * Machine Check bits on power9
+ */
+#define P9_SRR1_MC_LOADSTORE(srr1) (((srr1) >> PPC_BITLSHIFT(42)) & 1)
+
+#define P9_SRR1_MC_IFETCH(srr1) ( \
+ PPC_BITEXTRACT(srr1, 45, 0) | \
+ PPC_BITEXTRACT(srr1, 44, 1) | \
+ PPC_BITEXTRACT(srr1, 43, 2) | \
+ PPC_BITEXTRACT(srr1, 36, 3) )
+
+/* 0 is reserved */
+#define P9_SRR1_MC_IFETCH_UE 1
+#define P9_SRR1_MC_IFETCH_SLB_PARITY 2
+#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT 3
+#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT 4
+#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT 5
+#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD 6
+/* 7 is reserved */
+#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT 8
+#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT 9
+/* 10 ? */
+#define P9_SRR1_MC_IFETCH_RA 11
+#define P9_SRR1_MC_IFETCH_RA_TABLEWALK 12
+#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE 13
+#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT 14
+#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN 15
+
+/* DSISR bits for machine check (On Power9) */
+#define P9_DSISR_MC_UE (PPC_BIT(48))
+#define P9_DSISR_MC_UE_TABLEWALK (PPC_BIT(49))
+#define P9_DSISR_MC_LINK_LOAD_TIMEOUT (PPC_BIT(50))
+#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT (PPC_BIT(51))
+#define P9_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52))
+#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53))
+#define P9_DSISR_MC_USER_TLBIE (PPC_BIT(54))
+#define P9_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55))
+#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB (PPC_BIT(56))
+#define P9_DSISR_MC_RA_LOAD (PPC_BIT(57))
+#define P9_DSISR_MC_RA_TABLEWALK (PPC_BIT(58))
+#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN (PPC_BIT(59))
+#define P9_DSISR_MC_RA_FOREIGN (PPC_BIT(60))
+
+/* SLB error bits */
+#define P9_DSISR_MC_SLB_ERRORS (P9_DSISR_MC_ERAT_MULTIHIT | \
+ P9_DSISR_MC_SLB_PARITY_MFSLB | \
+ P9_DSISR_MC_SLB_MULTIHIT_MFSLB)
+
enum MCE_Version {
MCE_V1 = 1,
};
@@ -93,6 +142,9 @@ enum MCE_ErrorType {
MCE_ERROR_TYPE_SLB = 2,
MCE_ERROR_TYPE_ERAT = 3,
MCE_ERROR_TYPE_TLB = 4,
+ MCE_ERROR_TYPE_USER = 5,
+ MCE_ERROR_TYPE_RA = 6,
+ MCE_ERROR_TYPE_LINK = 7,
};
enum MCE_UeErrorType {
@@ -121,6 +173,32 @@ enum MCE_TlbErrorType {
MCE_TLB_ERROR_MULTIHIT = 2,
};
+enum MCE_UserErrorType {
+ MCE_USER_ERROR_INDETERMINATE = 0,
+ MCE_USER_ERROR_TLBIE = 1,
+};
+
+enum MCE_RaErrorType {
+ MCE_RA_ERROR_INDETERMINATE = 0,
+ MCE_RA_ERROR_IFETCH = 1,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3,
+ MCE_RA_ERROR_LOAD = 4,
+ MCE_RA_ERROR_STORE = 5,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7,
+ MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8,
+};
+
+enum MCE_LinkErrorType {
+ MCE_LINK_ERROR_INDETERMINATE = 0,
+ MCE_LINK_ERROR_IFETCH_TIMEOUT = 1,
+ MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2,
+ MCE_LINK_ERROR_LOAD_TIMEOUT = 3,
+ MCE_LINK_ERROR_STORE_TIMEOUT = 4,
+ MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5,
+};
+
struct machine_check_event {
enum MCE_Version version:8; /* 0x00 */
uint8_t in_use; /* 0x01 */
@@ -166,6 +244,30 @@ struct machine_check_event {
uint64_t effective_address;
uint8_t reserved_2[16];
} tlb_error;
+
+ struct {
+ enum MCE_UserErrorType user_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } user_error;
+
+ struct {
+ enum MCE_RaErrorType ra_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } ra_error;
+
+ struct {
+ enum MCE_LinkErrorType link_error_type:8;
+ uint8_t effective_address_provided;
+ uint8_t reserved_1[6];
+ uint64_t effective_address;
+ uint8_t reserved_2[16];
+ } link_error;
} u;
};
@@ -176,8 +278,12 @@ struct mce_error_info {
enum MCE_SlbErrorType slb_error_type:8;
enum MCE_EratErrorType erat_error_type:8;
enum MCE_TlbErrorType tlb_error_type:8;
+ enum MCE_UserErrorType user_error_type:8;
+ enum MCE_RaErrorType ra_error_type:8;
+ enum MCE_LinkErrorType link_error_type:8;
} u;
- uint8_t reserved[2];
+ enum MCE_Severity severity:8;
+ enum MCE_Initiator initiator:8;
};
#define MAX_MC_EVT 100
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index ba9921bf202e..5134ade2e850 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
index d0db98793dd8..9f4de0a1035e 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
@@ -1,5 +1,8 @@
#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
+
+#include <asm-generic/5level-fixup.h>
+
/*
* Entries per page directory level. The PTE level must use a 64b record
* for each page table entry. The PMD and PGD level use a 32b record for
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
index 55b28ef3409a..1facb584dd29 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h
@@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
#define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 4b369d83fe9c..1c9470881c4a 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -387,3 +387,4 @@ SYSCALL(copy_file_range)
COMPAT_SYS_SPU(preadv2)
COMPAT_SYS_SPU(pwritev2)
SYSCALL(kexec_file_load)
+SYSCALL(statx)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 3904040a3542..5c0d8a8cdae5 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -5,6 +5,7 @@
#include <asm/ppc_asm.h>
#include <asm/processor.h>
#include <asm/page.h>
+#include <asm/extable.h>
/*
* The fs value determines whether argument validity checking should be
@@ -56,31 +57,6 @@
__access_ok((__force unsigned long)(addr), (size), get_fs()))
/*
- * The exception table consists of pairs of relative addresses: the first is
- * the address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out what
- * to do.
- *
- * All the routines below use bits of fixup code that are out of line with the
- * main instruction path. This means when everything is well, we don't even
- * have to jump over them. Further, they do not intrude on our cache or tlb
- * entries.
- */
-
-#define ARCH_HAS_RELATIVE_EXTABLE
-
-struct exception_table_entry {
- int insn;
- int fixup;
-};
-
-static inline unsigned long extable_fixup(const struct exception_table_entry *x)
-{
- return (unsigned long)&x->fixup + x->fixup;
-}
-
-/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*
@@ -293,42 +269,19 @@ extern unsigned long __copy_tofrom_user(void __user *to,
#ifndef __powerpc64__
-static inline unsigned long copy_from_user(void *to,
- const void __user *from, unsigned long n)
-{
- if (likely(access_ok(VERIFY_READ, from, n))) {
- check_object_size(to, n, false);
- return __copy_tofrom_user((__force void __user *)to, from, n);
- }
- memset(to, 0, n);
- return n;
-}
-
-static inline unsigned long copy_to_user(void __user *to,
- const void *from, unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n)) {
- check_object_size(from, n, true);
- return __copy_tofrom_user(to, (__force void __user *)from, n);
- }
- return n;
-}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
#else /* __powerpc64__ */
-#define __copy_in_user(to, from, size) \
- __copy_tofrom_user((to), (from), (size))
-
-extern unsigned long copy_from_user(void *to, const void __user *from,
- unsigned long n);
-extern unsigned long copy_to_user(void __user *to, const void *from,
- unsigned long n);
-extern unsigned long copy_in_user(void __user *to, const void __user *from,
- unsigned long n);
-
+static inline unsigned long
+raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
+{
+ return __copy_tofrom_user(to, from, n);
+}
#endif /* __powerpc64__ */
-static inline unsigned long __copy_from_user_inatomic(void *to,
+static inline unsigned long raw_copy_from_user(void *to,
const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n) && (n <= 8)) {
@@ -352,12 +305,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
return 0;
}
- check_object_size(to, n, false);
-
return __copy_tofrom_user((__force void __user *)to, from, n);
}
-static inline unsigned long __copy_to_user_inatomic(void __user *to,
+static inline unsigned long raw_copy_to_user(void __user *to,
const void *from, unsigned long n)
{
if (__builtin_constant_p(n) && (n <= 8)) {
@@ -381,25 +332,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
return 0;
}
- check_object_size(from, n, true);
-
return __copy_tofrom_user(to, (__force const void __user *)from, n);
}
-static inline unsigned long __copy_from_user(void *to,
- const void __user *from, unsigned long size)
-{
- might_fault();
- return __copy_from_user_inatomic(to, from, size);
-}
-
-static inline unsigned long __copy_to_user(void __user *to,
- const void *from, unsigned long size)
-{
- might_fault();
- return __copy_to_user_inatomic(to, from, size);
-}
-
extern unsigned long __clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index eb1acee91a20..9ba11dbcaca9 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 383
+#define NR_syscalls 384
#define __NR__exit __NR_exit
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 2f26335a3c42..b85f14228857 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -393,5 +393,6 @@
#define __NR_preadv2 380
#define __NR_pwritev2 381
#define __NR_kexec_file_load 382
+#define __NR_statx 383
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index bb7a1890aeb7..e79b9daa873c 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -77,6 +77,7 @@ extern void __flush_tlb_power8(unsigned int action);
extern void __flush_tlb_power9(unsigned int action);
extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
+extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_E500)
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
@@ -540,6 +541,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.flush_tlb = __flush_tlb_power9,
+ .machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
{ /* Power9 */
@@ -559,6 +561,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.flush_tlb = __flush_tlb_power9,
+ .machine_check_early = __machine_check_early_realmode_p9,
.platform = "power9",
},
{ /* Cell Broadband Engine */
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 995728736677..6fd08219248d 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -449,9 +449,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
_GLOBAL(pnv_wakeup_tb_loss)
ld r1,PACAR1(r13)
/*
- * Before entering any idle state, the NVGPRs are saved in the stack
- * and they are restored before switching to the process context. Hence
- * until they are restored, they are free to be used.
+ * Before entering any idle state, the NVGPRs are saved in the stack.
+ * If there was a state loss, or PACA_NAPSTATELOST was set, then the
+ * NVGPRs are restored. If we are here, it is likely that state is lost,
+ * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
+ * here are the same as the test to restore NVGPRS:
+ * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
+ * and SRR1 test for restoring NVGPRs.
+ *
+ * We are about to clobber NVGPRs now, so set NAPSTATELOST to
+ * guarantee they will always be restored. This might be tightened
+ * with careful reading of specs (particularly for ISA300) but this
+ * is already a slow wakeup path and it's simpler to be safe.
+ */
+ li r0,1
+ stb r0,PACA_NAPSTATELOST(r13)
+
+ /*
*
* Save SRR1 and LR in NVGPRs as they might be clobbered in
* opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index c6923ff45131..a1475e6aef3a 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -58,6 +58,15 @@ static void mce_set_error_info(struct machine_check_event *mce,
case MCE_ERROR_TYPE_TLB:
mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
break;
+ case MCE_ERROR_TYPE_USER:
+ mce->u.user_error.user_error_type = mce_err->u.user_error_type;
+ break;
+ case MCE_ERROR_TYPE_RA:
+ mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
+ break;
+ case MCE_ERROR_TYPE_LINK:
+ mce->u.link_error.link_error_type = mce_err->u.link_error_type;
+ break;
case MCE_ERROR_TYPE_UNKNOWN:
default:
break;
@@ -90,13 +99,14 @@ void save_mce_event(struct pt_regs *regs, long handled,
mce->gpr3 = regs->gpr[3];
mce->in_use = 1;
- mce->initiator = MCE_INITIATOR_CPU;
/* Mark it recovered if we have handled it and MSR(RI=1). */
if (handled && (regs->msr & MSR_RI))
mce->disposition = MCE_DISPOSITION_RECOVERED;
else
mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
- mce->severity = MCE_SEV_ERROR_SYNC;
+
+ mce->initiator = mce_err->initiator;
+ mce->severity = mce_err->severity;
/*
* Populate the mce error_type and type-specific error_type.
@@ -115,6 +125,15 @@ void save_mce_event(struct pt_regs *regs, long handled,
} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
mce->u.erat_error.effective_address_provided = true;
mce->u.erat_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
+ mce->u.user_error.effective_address_provided = true;
+ mce->u.user_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
+ mce->u.ra_error.effective_address_provided = true;
+ mce->u.ra_error.effective_address = addr;
+ } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
+ mce->u.link_error.effective_address_provided = true;
+ mce->u.link_error.effective_address = addr;
} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
mce->u.ue_error.effective_address_provided = true;
mce->u.ue_error.effective_address = addr;
@@ -239,6 +258,29 @@ void machine_check_print_event_info(struct machine_check_event *evt)
"Parity",
"Multihit",
};
+ static const char *mc_user_types[] = {
+ "Indeterminate",
+ "tlbie(l) invalid",
+ };
+ static const char *mc_ra_types[] = {
+ "Indeterminate",
+ "Instruction fetch (bad)",
+ "Page table walk ifetch (bad)",
+ "Page table walk ifetch (foreign)",
+ "Load (bad)",
+ "Store (bad)",
+ "Page table walk Load/Store (bad)",
+ "Page table walk Load/Store (foreign)",
+ "Load/Store (foreign)",
+ };
+ static const char *mc_link_types[] = {
+ "Indeterminate",
+ "Instruction fetch (timeout)",
+ "Page table walk ifetch (timeout)",
+ "Load (timeout)",
+ "Store (timeout)",
+ "Page table walk Load/Store (timeout)",
+ };
/* Print things out */
if (evt->version != MCE_V1) {
@@ -315,6 +357,36 @@ void machine_check_print_event_info(struct machine_check_event *evt)
printk("%s Effective address: %016llx\n",
level, evt->u.tlb_error.effective_address);
break;
+ case MCE_ERROR_TYPE_USER:
+ subtype = evt->u.user_error.user_error_type <
+ ARRAY_SIZE(mc_user_types) ?
+ mc_user_types[evt->u.user_error.user_error_type]
+ : "Unknown";
+ printk("%s Error type: User [%s]\n", level, subtype);
+ if (evt->u.user_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.user_error.effective_address);
+ break;
+ case MCE_ERROR_TYPE_RA:
+ subtype = evt->u.ra_error.ra_error_type <
+ ARRAY_SIZE(mc_ra_types) ?
+ mc_ra_types[evt->u.ra_error.ra_error_type]
+ : "Unknown";
+ printk("%s Error type: Real address [%s]\n", level, subtype);
+ if (evt->u.ra_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.ra_error.effective_address);
+ break;
+ case MCE_ERROR_TYPE_LINK:
+ subtype = evt->u.link_error.link_error_type <
+ ARRAY_SIZE(mc_link_types) ?
+ mc_link_types[evt->u.link_error.link_error_type]
+ : "Unknown";
+ printk("%s Error type: Link [%s]\n", level, subtype);
+ if (evt->u.link_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt->u.link_error.effective_address);
+ break;
default:
case MCE_ERROR_TYPE_UNKNOWN:
printk("%s Error type: Unknown\n", level);
@@ -341,6 +413,18 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt)
if (evt->u.tlb_error.effective_address_provided)
return evt->u.tlb_error.effective_address;
break;
+ case MCE_ERROR_TYPE_USER:
+ if (evt->u.user_error.effective_address_provided)
+ return evt->u.user_error.effective_address;
+ break;
+ case MCE_ERROR_TYPE_RA:
+ if (evt->u.ra_error.effective_address_provided)
+ return evt->u.ra_error.effective_address;
+ break;
+ case MCE_ERROR_TYPE_LINK:
+ if (evt->u.link_error.effective_address_provided)
+ return evt->u.link_error.effective_address;
+ break;
default:
case MCE_ERROR_TYPE_UNKNOWN:
break;
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 7353991c4ece..763d6f58caa8 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -116,6 +116,51 @@ static void flush_and_reload_slb(void)
}
#endif
+static void flush_erat(void)
+{
+ asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
+}
+
+#define MCE_FLUSH_SLB 1
+#define MCE_FLUSH_TLB 2
+#define MCE_FLUSH_ERAT 3
+
+static int mce_flush(int what)
+{
+#ifdef CONFIG_PPC_STD_MMU_64
+ if (what == MCE_FLUSH_SLB) {
+ flush_and_reload_slb();
+ return 1;
+ }
+#endif
+ if (what == MCE_FLUSH_ERAT) {
+ flush_erat();
+ return 1;
+ }
+ if (what == MCE_FLUSH_TLB) {
+ if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
+ cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat)
+{
+ if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB))
+ dsisr &= ~slb;
+ if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT))
+ dsisr &= ~erat;
+ if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB))
+ dsisr &= ~tlb;
+ /* Any other errors we don't understand? */
+ if (dsisr)
+ return 0;
+ return 1;
+}
+
static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
{
long handled = 1;
@@ -281,6 +326,9 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
long handled = 1;
struct mce_error_info mce_error_info = { 0 };
+ mce_error_info.severity = MCE_SEV_ERROR_SYNC;
+ mce_error_info.initiator = MCE_INITIATOR_CPU;
+
srr1 = regs->msr;
nip = regs->nip;
@@ -352,6 +400,9 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
long handled = 1;
struct mce_error_info mce_error_info = { 0 };
+ mce_error_info.severity = MCE_SEV_ERROR_SYNC;
+ mce_error_info.initiator = MCE_INITIATOR_CPU;
+
srr1 = regs->msr;
nip = regs->nip;
@@ -372,3 +423,189 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
save_mce_event(regs, handled, &mce_error_info, nip, addr);
return handled;
}
+
+static int mce_handle_derror_p9(struct pt_regs *regs)
+{
+ uint64_t dsisr = regs->dsisr;
+
+ return mce_handle_flush_derrors(dsisr,
+ P9_DSISR_MC_SLB_PARITY_MFSLB |
+ P9_DSISR_MC_SLB_MULTIHIT_MFSLB,
+
+ P9_DSISR_MC_TLB_MULTIHIT_MFTLB,
+
+ P9_DSISR_MC_ERAT_MULTIHIT);
+}
+
+static int mce_handle_ierror_p9(struct pt_regs *regs)
+{
+ uint64_t srr1 = regs->msr;
+
+ switch (P9_SRR1_MC_IFETCH(srr1)) {
+ case P9_SRR1_MC_IFETCH_SLB_PARITY:
+ case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
+ return mce_flush(MCE_FLUSH_SLB);
+ case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
+ return mce_flush(MCE_FLUSH_TLB);
+ case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
+ return mce_flush(MCE_FLUSH_ERAT);
+ default:
+ return 0;
+ }
+}
+
+static void mce_get_derror_p9(struct pt_regs *regs,
+ struct mce_error_info *mce_err, uint64_t *addr)
+{
+ uint64_t dsisr = regs->dsisr;
+
+ mce_err->severity = MCE_SEV_ERROR_SYNC;
+ mce_err->initiator = MCE_INITIATOR_CPU;
+
+ if (dsisr & P9_DSISR_MC_USER_TLBIE)
+ *addr = regs->nip;
+ else
+ *addr = regs->dar;
+
+ if (dsisr & P9_DSISR_MC_UE) {
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
+ } else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) {
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
+ } else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) {
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT;
+ } else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) {
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT;
+ } else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) {
+ mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+ mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+ } else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) {
+ mce_err->error_type = MCE_ERROR_TYPE_TLB;
+ mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
+ } else if (dsisr & P9_DSISR_MC_USER_TLBIE) {
+ mce_err->error_type = MCE_ERROR_TYPE_USER;
+ mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE;
+ } else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) {
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
+ } else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) {
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
+ } else if (dsisr & P9_DSISR_MC_RA_LOAD) {
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD;
+ } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) {
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
+ } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) {
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
+ } else if (dsisr & P9_DSISR_MC_RA_FOREIGN) {
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN;
+ }
+}
+
+static void mce_get_ierror_p9(struct pt_regs *regs,
+ struct mce_error_info *mce_err, uint64_t *addr)
+{
+ uint64_t srr1 = regs->msr;
+
+ switch (P9_SRR1_MC_IFETCH(srr1)) {
+ case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
+ case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
+ mce_err->severity = MCE_SEV_FATAL;
+ break;
+ default:
+ mce_err->severity = MCE_SEV_ERROR_SYNC;
+ break;
+ }
+
+ mce_err->initiator = MCE_INITIATOR_CPU;
+
+ *addr = regs->nip;
+
+ switch (P9_SRR1_MC_IFETCH(srr1)) {
+ case P9_SRR1_MC_IFETCH_UE:
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
+ break;
+ case P9_SRR1_MC_IFETCH_SLB_PARITY:
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
+ break;
+ case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
+ mce_err->error_type = MCE_ERROR_TYPE_SLB;
+ mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
+ break;
+ case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
+ mce_err->error_type = MCE_ERROR_TYPE_ERAT;
+ mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
+ break;
+ case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
+ mce_err->error_type = MCE_ERROR_TYPE_TLB;
+ mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
+ break;
+ case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD:
+ mce_err->error_type = MCE_ERROR_TYPE_UE;
+ mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
+ break;
+ case P9_SRR1_MC_IFETCH_LINK_TIMEOUT:
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT;
+ break;
+ case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT:
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT;
+ break;
+ case P9_SRR1_MC_IFETCH_RA:
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH;
+ break;
+ case P9_SRR1_MC_IFETCH_RA_TABLEWALK:
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH;
+ break;
+ case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_STORE;
+ break;
+ case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
+ mce_err->error_type = MCE_ERROR_TYPE_LINK;
+ mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT;
+ break;
+ case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN:
+ mce_err->error_type = MCE_ERROR_TYPE_RA;
+ mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN;
+ break;
+ default:
+ break;
+ }
+}
+
+long __machine_check_early_realmode_p9(struct pt_regs *regs)
+{
+ uint64_t nip, addr;
+ long handled;
+ struct mce_error_info mce_error_info = { 0 };
+
+ nip = regs->nip;
+
+ if (P9_SRR1_MC_LOADSTORE(regs->msr)) {
+ handled = mce_handle_derror_p9(regs);
+ mce_get_derror_p9(regs, &mce_error_info, &addr);
+ } else {
+ handled = mce_handle_ierror_p9(regs);
+ mce_get_ierror_p9(regs, &mce_error_info, &addr);
+ }
+
+ /* Handle UE error. */
+ if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
+ handled = mce_handle_ue_error(regs);
+
+ save_mce_event(regs, handled, &mce_error_info, nip, addr);
+ return handled;
+}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index f3158fb16de3..8c68145ba1bd 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -601,7 +601,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hva, NULL, NULL);
if (ptep) {
pte = kvmppc_read_update_linux_pte(ptep, 1);
- if (pte_write(pte))
+ if (__pte_write(pte))
write_ok = 1;
}
local_irq_restore(flags);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 6fca970373ee..ce6f2121fffe 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -256,7 +256,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
}
pte = kvmppc_read_update_linux_pte(ptep, writing);
if (pte_present(pte) && !pte_protnone(pte)) {
- if (writing && !pte_write(pte))
+ if (writing && !__pte_write(pte))
/* make the actual HPTE be read-only */
ptel = hpte_make_readonly(ptel);
is_ci = pte_ci(pte);
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 2b5e09020cfe..ed7dfce331e0 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -14,7 +14,7 @@ obj-y += string.o alloc.o crtsavres.o code-patching.o \
obj-$(CONFIG_PPC32) += div64.o copy_32.o
-obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \
+obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \
memcpy_64.o memcmp_64.o
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index ff0d894d7ff9..8aedbb5f4b86 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -477,18 +477,6 @@ _GLOBAL(__copy_tofrom_user)
bdnz 130b
/* then clear out the destination: r3 bytes starting at 4(r6) */
132: mfctr r3
- srwi. r0,r3,2
- li r9,0
- mtctr r0
- beq 113f
-112: stwu r9,4(r6)
- bdnz 112b
-113: andi. r0,r3,3
- mtctr r0
- beq 120f
-114: stb r9,4(r6)
- addi r6,r6,1
- bdnz 114b
120: blr
EX_TABLE(30b,108b)
@@ -497,7 +485,5 @@ _GLOBAL(__copy_tofrom_user)
EX_TABLE(41b,111b)
EX_TABLE(130b,132b)
EX_TABLE(131b,120b)
- EX_TABLE(112b,120b)
- EX_TABLE(114b,120b)
EXPORT_SYMBOL(__copy_tofrom_user)
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index aee6e24e81ab..08da06e1bd72 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -319,32 +319,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr
/*
- * here we have trapped again, need to clear ctr bytes starting at r3
+ * here we have trapped again, amount remaining is in ctr.
*/
-143: mfctr r5
- li r0,0
- mr r4,r3
- mr r3,r5 /* return the number of bytes not copied */
-1: andi. r9,r4,7
- beq 3f
-90: stb r0,0(r4)
- addic. r5,r5,-1
- addi r4,r4,1
- bne 1b
- blr
-3: cmpldi cr1,r5,8
- srdi r9,r5,3
- andi. r5,r5,7
- blt cr1,93f
- mtctr r9
-91: std r0,0(r4)
- addi r4,r4,8
- bdnz 91b
-93: beqlr
- mtctr r5
-92: stb r0,0(r4)
- addi r4,r4,1
- bdnz 92b
+143: mfctr r3
blr
/*
@@ -389,10 +366,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
ld r5,-8(r1)
add r6,r6,r5
subf r3,r3,r6 /* #bytes not copied */
-190:
-191:
-192:
- blr /* #bytes not copied in r3 */
+ blr
EX_TABLE(20b,120b)
EX_TABLE(220b,320b)
@@ -451,9 +425,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
EX_TABLE(88b,188b)
EX_TABLE(43b,143b)
EX_TABLE(89b,189b)
- EX_TABLE(90b,190b)
- EX_TABLE(91b,191b)
- EX_TABLE(92b,192b)
/*
* Routine to copy a whole page of data, optimized for POWER4.
diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
deleted file mode 100644
index 9bd3a3dad78d..000000000000
--- a/arch/powerpc/lib/usercopy_64.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Functions which are too large to be inlined.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/module.h>
-#include <linux/uaccess.h>
-
-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- if (likely(access_ok(VERIFY_READ, from, n)))
- n = __copy_from_user(to, from, n);
- else
- memset(to, 0, n);
- return n;
-}
-
-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- if (likely(access_ok(VERIFY_WRITE, to, n)))
- n = __copy_to_user(to, from, n);
- return n;
-}
-
-unsigned long copy_in_user(void __user *to, const void __user *from,
- unsigned long n)
-{
- might_sleep();
- if (likely(access_ok(VERIFY_READ, from, n) &&
- access_ok(VERIFY_WRITE, to, n)))
- n =__copy_tofrom_user(to, from, n);
- return n;
-}
-
-EXPORT_SYMBOL(copy_from_user);
-EXPORT_SYMBOL(copy_to_user);
-EXPORT_SYMBOL(copy_in_user);
-
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 9be992083d2a..c22f207aa656 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -397,8 +397,7 @@ static void early_check_vec5(void)
void __init mmu_early_init_devtree(void)
{
/* Disable radix mode based on kernel command line. */
- /* We don't yet have the machinery to do radix as a guest. */
- if (disable_radix || !(mfmsr() & MSR_HV))
+ if (disable_radix)
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
/*
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 595dd718ea87..2ff13249f87a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -188,6 +188,8 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
sdsync = POWER7P_MMCRA_SDAR_VALID;
else if (ppmu->flags & PPMU_ALT_SIPR)
sdsync = POWER6_MMCRA_SDSYNC;
+ else if (ppmu->flags & PPMU_NO_SIAR)
+ sdsync = MMCRA_SAMPLE_ENABLE;
else
sdsync = MMCRA_SDSYNC;
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index e79fb5fb817d..cd951fd231c4 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -65,12 +65,41 @@ static bool is_event_valid(u64 event)
return !(event & ~valid_mask);
}
-static u64 mmcra_sdar_mode(u64 event)
+static inline bool is_event_marked(u64 event)
{
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
- return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
+ if (event & EVENT_IS_MARKED)
+ return true;
+
+ return false;
+}
- return MMCRA_SDAR_MODE_TLB;
+static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
+{
+ /*
+ * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
+ * continous sampling mode.
+ *
+ * Incase of Power8:
+ * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
+ * mode and will be un-changed when setting MMCRA[63] (Marked events).
+ *
+ * Incase of Power9:
+ * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
+ * or if group already have any marked events.
+ * Non-Marked events (for DD1):
+ * MMCRA[SDAR_MODE] will be set to 0b01
+ * For rest
+ * MMCRA[SDAR_MODE] will be set from event code.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
+ *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
+ else if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
+ *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
+ else if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ *mmcra |= MMCRA_SDAR_MODE_TLB;
+ } else
+ *mmcra |= MMCRA_SDAR_MODE_TLB;
}
static u64 thresh_cmp_val(u64 value)
@@ -180,7 +209,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
value |= CNST_L1_QUAL_VAL(cache);
}
- if (event & EVENT_IS_MARKED) {
+ if (is_event_marked(event)) {
mask |= CNST_SAMPLE_MASK;
value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
}
@@ -276,7 +305,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
}
/* In continuous sampling mode, update SDAR on TLB miss */
- mmcra |= mmcra_sdar_mode(event[i]);
+ mmcra_sdar_mode(event[i], &mmcra);
if (event[i] & EVENT_IS_L1) {
cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
@@ -285,7 +314,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
}
- if (event[i] & EVENT_IS_MARKED) {
+ if (is_event_marked(event[i])) {
mmcra |= MMCRA_SAMPLE_ENABLE;
val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index cf9bd8990159..899210f14ee4 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -246,6 +246,7 @@
#define MMCRA_THR_CMP_SHIFT 32
#define MMCRA_SDAR_MODE_SHIFT 42
#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT)
+#define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT)
#define MMCRA_IFM_SHIFT 30
/* MMCR1 Threshold Compare bit constant for power9 */
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 86d9fde93c17..e0f856bfbfe8 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -395,7 +395,6 @@ static int opal_recover_mce(struct pt_regs *regs,
struct machine_check_event *evt)
{
int recovered = 0;
- uint64_t ea = get_mce_fault_addr(evt);
if (!(regs->msr & MSR_RI)) {
/* If MSR_RI isn't set, we cannot recover */
@@ -404,26 +403,18 @@ static int opal_recover_mce(struct pt_regs *regs,
} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
/* Platform corrected itself */
recovered = 1;
- } else if (ea && !is_kernel_addr(ea)) {
+ } else if (evt->severity == MCE_SEV_FATAL) {
+ /* Fatal machine check */
+ pr_err("Machine check interrupt is fatal\n");
+ recovered = 0;
+ } else if ((evt->severity == MCE_SEV_ERROR_SYNC) &&
+ (user_mode(regs) && !is_global_init(current))) {
/*
- * Faulting address is not in kernel text. We should be fine.
- * We need to find which process uses this address.
* For now, kill the task if we have received exception when
* in userspace.
*
* TODO: Queue up this address for hwpoisioning later.
*/
- if (user_mode(regs) && !is_global_init(current)) {
- _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
- recovered = 1;
- } else
- recovered = 0;
- } else if (user_mode(regs) && !is_global_init(current) &&
- evt->severity == MCE_SEV_ERROR_SYNC) {
- /*
- * If we have received a synchronous error when in userspace
- * kill the task.
- */
_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
recovered = 1;
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 6901a06da2f9..e36738291c32 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1775,17 +1775,20 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
}
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
- struct pci_bus *bus)
+ struct pci_bus *bus,
+ bool add_to_group)
{
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
set_dma_offset(&dev->dev, pe->tce_bypass_base);
- iommu_add_device(&dev->dev);
+ if (add_to_group)
+ iommu_add_device(&dev->dev);
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
- pnv_ioda_setup_bus_dma(pe, dev->subordinate);
+ pnv_ioda_setup_bus_dma(pe, dev->subordinate,
+ add_to_group);
}
}
@@ -2191,7 +2194,7 @@ found:
set_iommu_table_base(&pe->pdev->dev, tbl);
iommu_add_device(&pe->pdev->dev);
} else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
- pnv_ioda_setup_bus_dma(pe, pe->pbus);
+ pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
return;
fail:
@@ -2426,6 +2429,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
pnv_pci_ioda2_set_bypass(pe, false);
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
+ if (pe->pbus)
+ pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
pnv_ioda2_table_free(tbl);
}
@@ -2435,6 +2440,8 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
table_group);
pnv_pci_ioda2_setup_default_config(pe);
+ if (pe->pbus)
+ pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
}
static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
@@ -2624,6 +2631,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
level_shift = entries_shift + 3;
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
+ if ((level_shift - 3) * levels + page_shift >= 60)
+ return -EINVAL;
+
/* Allocate TCE table */
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
levels, tce_table_size, &offset, &total_allocated);
@@ -2728,7 +2738,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
if (pe->flags & PNV_IODA_PE_DEV)
iommu_add_device(&pe->pdev->dev);
else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
- pnv_ioda_setup_bus_dma(pe, pe->pbus);
+ pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
}
#ifdef CONFIG_PCI_MSI
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 251060cf1713..8b1fe895daa3 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -751,7 +751,9 @@ void __init hpte_init_pseries(void)
mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
- mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
+
+ if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
+ mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
}
void radix_init_pseries(void)
diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S
index f9760ccf4032..3696ea6c4826 100644
--- a/arch/powerpc/purgatory/trampoline.S
+++ b/arch/powerpc/purgatory/trampoline.S
@@ -116,13 +116,13 @@ dt_offset:
.data
.balign 8
-.globl sha256_digest
-sha256_digest:
+.globl purgatory_sha256_digest
+purgatory_sha256_digest:
.skip 32
- .size sha256_digest, . - sha256_digest
+ .size purgatory_sha256_digest, . - purgatory_sha256_digest
.balign 8
-.globl sha_regions
-sha_regions:
+.globl purgatory_sha_regions
+purgatory_sha_regions:
.skip 8 * 2 * 16
- .size sha_regions, . - sha_regions
+ .size purgatory_sha_regions, . - purgatory_sha_regions
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index ada29eaed6e2..f523ac883150 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -274,7 +274,9 @@ failed:
if (bank->disk->major > 0)
unregister_blkdev(bank->disk->major,
bank->disk->disk_name);
- del_gendisk(bank->disk);
+ if (bank->disk->flags & GENHD_FL_UP)
+ del_gendisk(bank->disk);
+ put_disk(bank->disk);
}
device->dev.platform_data = NULL;
if (bank->io_addr != 0)
@@ -299,6 +301,7 @@ axon_ram_remove(struct platform_device *device)
device_remove_file(&device->dev, &dev_attr_ecc);
free_irq(bank->irq_id, device);
del_gendisk(bank->disk);
+ put_disk(bank->disk);
iounmap((void __iomem *) bank->io_addr);
kfree(bank);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index a2dcef0aacc7..0724dbf2d8f8 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -178,6 +178,7 @@ config S390
select ARCH_HAS_SCALED_CPUTIME
select VIRT_TO_BUS
select HAVE_NMI
+ select ARCH_HAS_RAW_COPY_USER
config SCHED_OMIT_FRAME_POINTER
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index d69ea495c4d7..716b17238599 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -474,8 +474,11 @@ static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
if (k < n) {
- if (__ctr_paes_set_key(ctx) != 0)
+ if (__ctr_paes_set_key(ctx) != 0) {
+ if (locked)
+ spin_unlock(&ctrblk_lock);
return blkcipher_walk_done(desc, walk, -EIO);
+ }
}
}
if (locked)
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index d1c407ddf703..9072bf63a846 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -8,31 +8,27 @@
#define _S390_CPUTIME_H
#include <linux/types.h>
-#include <asm/div64.h>
+#include <asm/timex.h>
#define CPUTIME_PER_USEC 4096ULL
#define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC)
/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
-typedef unsigned long long __nocast cputime_t;
-typedef unsigned long long __nocast cputime64_t;
-
#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
-static inline unsigned long __div(unsigned long long n, unsigned long base)
-{
- return n / base;
-}
-
/*
- * Convert cputime to microseconds and back.
+ * Convert cputime to microseconds.
*/
-static inline unsigned int cputime_to_usecs(const cputime_t cputime)
+static inline u64 cputime_to_usecs(const u64 cputime)
{
- return (__force unsigned long long) cputime >> 12;
+ return cputime >> 12;
}
+/*
+ * Convert cputime to nanoseconds.
+ */
+#define cputime_to_nsecs(cputime) tod_to_ns(cputime)
u64 arch_cpu_idle_time(int cpu);
diff --git a/arch/s390/include/asm/extable.h b/arch/s390/include/asm/extable.h
new file mode 100644
index 000000000000..16cfe2d62eeb
--- /dev/null
+++ b/arch/s390/include/asm/extable.h
@@ -0,0 +1,28 @@
+#ifndef __S390_EXTABLE_H
+#define __S390_EXTABLE_H
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+ int insn, fixup;
+};
+
+static inline unsigned long extable_fixup(const struct exception_table_entry *x)
+{
+ return (unsigned long)&x->fixup + x->fixup;
+}
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+#endif
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 7ed1972b1920..93e37b12e882 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -24,6 +24,7 @@
* the S390 page table tree.
*/
#ifndef __ASSEMBLY__
+#include <asm-generic/5level-fixup.h>
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 354344dcc198..118535123f34 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -206,20 +206,16 @@ static inline unsigned long long get_tod_clock_monotonic(void)
* ns = (todval * 125) >> 9;
*
* In order to avoid an overflow with the multiplication we can rewrite this.
- * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits)
+ * With a split todval == 2^9 * th + tl (th upper 55 bits, tl lower 9 bits)
* we end up with
*
- * ns = ((2^32 * th + tl) * 125 ) >> 9;
- * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9);
+ * ns = ((2^9 * th + tl) * 125 ) >> 9;
+ * -> ns = (th * 125) + ((tl * 125) >> 9);
*
*/
static inline unsigned long long tod_to_ns(unsigned long long todval)
{
- unsigned long long ns;
-
- ns = ((todval >> 32) << 23) * 125;
- ns += ((todval & 0xffffffff) * 125) >> 9;
- return ns;
+ return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9);
}
#endif
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 7228ed8da67d..72d69b9585c2 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -14,6 +14,7 @@
*/
#include <asm/processor.h>
#include <asm/ctl_reg.h>
+#include <asm/extable.h>
/*
@@ -59,72 +60,14 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
#define access_ok(type, addr, size) __access_ok(addr, size)
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- int insn, fixup;
-};
-
-static inline unsigned long extable_fixup(const struct exception_table_entry *x)
-{
- return (unsigned long)&x->fixup + x->fixup;
-}
-
-#define ARCH_HAS_RELATIVE_EXTABLE
-
-/**
- * __copy_from_user: - Copy a block of data from user space, with less checking.
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-unsigned long __must_check __copy_from_user(void *to, const void __user *from,
- unsigned long n);
+unsigned long __must_check
+raw_copy_from_user(void *to, const void __user *from, unsigned long n);
-/**
- * __copy_to_user: - Copy a block of data into user space, with less checking.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-unsigned long __must_check __copy_to_user(void __user *to, const void *from,
- unsigned long n);
+unsigned long __must_check
+raw_copy_to_user(void __user *to, const void *from, unsigned long n);
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
@@ -213,13 +156,13 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s
static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
- size = __copy_to_user(ptr, x, size);
+ size = raw_copy_to_user(ptr, x, size);
return size ? -EFAULT : 0;
}
static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
- size = __copy_from_user(x, ptr, size);
+ size = raw_copy_from_user(x, ptr, size);
return size ? -EFAULT : 0;
}
@@ -309,77 +252,8 @@ int __get_user_bad(void) __attribute__((noreturn));
#define __put_user_unaligned __put_user
#define __get_user_unaligned __get_user
-extern void __compiletime_error("usercopy buffer size is too small")
-__bad_copy_user(void);
-
-static inline void copy_user_overflow(int size, unsigned long count)
-{
- WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
-}
-
-/**
- * copy_to_user: - Copy a block of data into user space.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-static inline unsigned long __must_check
-copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- might_fault();
- return __copy_to_user(to, from, n);
-}
-
-/**
- * copy_from_user: - Copy a block of data from user space.
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-static inline unsigned long __must_check
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- unsigned int sz = __compiletime_object_size(to);
-
- might_fault();
- if (unlikely(sz != -1 && sz < n)) {
- if (!__builtin_constant_p(n))
- copy_user_overflow(sz, n);
- else
- __bad_copy_user();
- return n;
- }
- return __copy_from_user(to, from, n);
-}
-
unsigned long __must_check
-__copy_in_user(void __user *to, const void __user *from, unsigned long n);
-
-static inline unsigned long __must_check
-copy_in_user(void __user *to, const void __user *from, unsigned long n)
-{
- might_fault();
- return __copy_in_user(to, from, n);
-}
+raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
/*
* Copy a null terminated string from userspace.
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 4384bc797a54..152de9b796e1 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -313,7 +313,9 @@
#define __NR_copy_file_range 375
#define __NR_preadv2 376
#define __NR_pwritev2 377
-#define NR_syscalls 378
+/* Number 378 is reserved for guarded storage */
+#define __NR_statx 379
+#define NR_syscalls 380
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index ae2cda5eee5a..e89cc2e71db1 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -178,3 +178,4 @@ COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
+COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index dff2152350a7..6a7d737d514c 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -490,7 +490,7 @@ ENTRY(pgm_check_handler)
jnz .Lpgm_svcper # -> single stepped svc
1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j 3f
+ j 4f
2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
lg %r15,__LC_KERNEL_STACK
lgr %r14,%r12
@@ -499,8 +499,8 @@ ENTRY(pgm_check_handler)
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
jz 3f
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
-3: la %r11,STACK_FRAME_OVERHEAD(%r15)
- stg %r10,__THREAD_last_break(%r14)
+3: stg %r10,__THREAD_last_break(%r14)
+4: la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
@@ -509,14 +509,14 @@ ENTRY(pgm_check_handler)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
stg %r10,__PT_ARGS(%r11)
tm __LC_PGM_ILC+3,0x80 # check for per exception
- jz 4f
+ jz 5f
tmhh %r8,0x0001 # kernel per event ?
jz .Lpgm_kprobe
oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
-4: REENABLE_IRQS
+5: REENABLE_IRQS
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11)
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index b67dafb7b7cf..e545ffe5155a 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -564,6 +564,8 @@ static struct kset *ipl_kset;
static void __ipl_run(void *unused)
{
+ if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW)
+ diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
diag308(DIAG308_LOAD_CLEAR, NULL);
if (MACHINE_IS_VM)
__cpcmd("IPL", NULL, 0, NULL);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 20cd339e11ae..f29e41c5e2ec 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -124,7 +124,10 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
/* Initialize per thread user and system timer values */
p->thread.user_timer = 0;
+ p->thread.guest_timer = 0;
p->thread.system_timer = 0;
+ p->thread.hardirq_timer = 0;
+ p->thread.softirq_timer = 0;
frame->sf.back_chain = 0;
/* new return point is ret_from_fork */
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 9b59e6212d8f..2659b5cfeddb 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -386,3 +386,5 @@ SYSCALL(sys_mlock2,compat_sys_mlock2)
SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */
SYSCALL(sys_preadv2,compat_sys_preadv2)
SYSCALL(sys_pwritev2,compat_sys_pwritev2)
+NI_SYSCALL
+SYSCALL(sys_statx,compat_sys_statx)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c14fc9029912..072d84ba42a3 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -111,7 +111,7 @@ static inline u64 scale_vtime(u64 vtime)
}
static void account_system_index_scaled(struct task_struct *p,
- cputime_t cputime, cputime_t scaled,
+ u64 cputime, u64 scaled,
enum cpu_usage_stat index)
{
p->stimescaled += cputime_to_nsecs(scaled);
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index f481fcde067b..1e5bb2b86c42 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -26,7 +26,7 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
tmp1 = -4096UL;
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
- "9: jz 7f\n"
+ "6: jz 4f\n"
"1: algr %0,%3\n"
" slgr %1,%3\n"
" slgr %2,%3\n"
@@ -35,23 +35,13 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
" slgr %4,%1\n"
" clgr %0,%4\n" /* copy crosses next page boundary? */
- " jnh 4f\n"
+ " jnh 5f\n"
"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
- "10:slgr %0,%4\n"
- " algr %2,%4\n"
- "4: lghi %4,-1\n"
- " algr %4,%0\n" /* copy remaining size, subtract 1 */
- " bras %3,6f\n" /* memset loop */
- " xc 0(1,%2),0(%2)\n"
- "5: xc 0(256,%2),0(%2)\n"
- " la %2,256(%2)\n"
- "6: aghi %4,-256\n"
- " jnm 5b\n"
- " ex %4,0(%3)\n"
- " j 8f\n"
- "7: slgr %0,%0\n"
- "8:\n"
- EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
+ "7: slgr %0,%4\n"
+ " j 5f\n"
+ "4: slgr %0,%0\n"
+ "5:\n"
+ EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: "d" (reg0) : "cc", "memory");
return size;
@@ -67,49 +57,38 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
asm volatile(
" sacf 0\n"
"0: mvcp 0(%0,%2),0(%1),%3\n"
- "10:jz 8f\n"
+ "7: jz 5f\n"
"1: algr %0,%3\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
"2: mvcp 0(%0,%2),0(%1),%3\n"
- "11:jnz 1b\n"
- " j 8f\n"
+ "8: jnz 1b\n"
+ " j 5f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
" lghi %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
" slgr %4,%1\n"
" clgr %0,%4\n" /* copy crosses next page boundary? */
- " jnh 5f\n"
+ " jnh 6f\n"
"4: mvcp 0(%4,%2),0(%1),%3\n"
- "12:slgr %0,%4\n"
- " algr %2,%4\n"
- "5: lghi %4,-1\n"
- " algr %4,%0\n" /* copy remaining size, subtract 1 */
- " bras %3,7f\n" /* memset loop */
- " xc 0(1,%2),0(%2)\n"
- "6: xc 0(256,%2),0(%2)\n"
- " la %2,256(%2)\n"
- "7: aghi %4,-256\n"
- " jnm 6b\n"
- " ex %4,0(%3)\n"
- " j 9f\n"
- "8: slgr %0,%0\n"
- "9: sacf 768\n"
- EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
- EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
+ "9: slgr %0,%4\n"
+ " j 6f\n"
+ "5: slgr %0,%0\n"
+ "6: sacf 768\n"
+ EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
+ EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
: : "cc", "memory");
return size;
}
-unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- check_object_size(to, n, false);
if (static_branch_likely(&have_mvcos))
return copy_from_user_mvcos(to, from, n);
return copy_from_user_mvcp(to, from, n);
}
-EXPORT_SYMBOL(__copy_from_user);
+EXPORT_SYMBOL(raw_copy_from_user);
static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
unsigned long size)
@@ -176,14 +155,13 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
return size;
}
-unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- check_object_size(from, n, true);
if (static_branch_likely(&have_mvcos))
return copy_to_user_mvcos(to, from, n);
return copy_to_user_mvcs(to, from, n);
}
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(raw_copy_to_user);
static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
unsigned long size)
@@ -240,13 +218,13 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
return size;
}
-unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
+unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
if (static_branch_likely(&have_mvcos))
return copy_in_user_mvcos(to, from, n);
return copy_in_user_mvc(to, from, n);
}
-EXPORT_SYMBOL(__copy_in_user);
+EXPORT_SYMBOL(raw_copy_in_user);
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
{
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b48dc5f1900b..463e5ef02304 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -608,12 +608,29 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
{
spinlock_t *ptl;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
pgste_t pgste;
pte_t *ptep;
pte_t pte;
bool dirty;
- ptep = get_locked_pte(mm, addr, &ptl);
+ pgd = pgd_offset(mm, addr);
+ pud = pud_alloc(mm, pgd, addr);
+ if (!pud)
+ return false;
+ pmd = pmd_alloc(mm, pud, addr);
+ if (!pmd)
+ return false;
+ /* We can't run guests backed by huge pages, but userspace can
+ * still set them up and then try to migrate them without any
+ * migration support.
+ */
+ if (pmd_large(*pmd))
+ return true;
+
+ ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (unlikely(!ptep))
return false;
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index 507d63181389..4d241cfedc3b 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -15,6 +15,7 @@ config SCORE
select MODULES_USE_ELF_REL
select CLONE_BACKWARDS
select CPU_NO_EFFICIENT_FFS
+ select ARCH_HAS_RAW_COPY_USER
choice
prompt "System type"
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index 926943a49ea5..e3a8d0f96652 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -4,6 +4,7 @@ header-y +=
generic-y += barrier.h
generic-y += clkdev.h
generic-y += current.h
+generic-y += extable.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
diff --git a/arch/score/include/asm/extable.h b/arch/score/include/asm/extable.h
deleted file mode 100644
index c4423ccf830d..000000000000
--- a/arch/score/include/asm/extable.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _ASM_SCORE_EXTABLE_H
-#define _ASM_SCORE_EXTABLE_H
-
-struct exception_table_entry {
- unsigned long insn;
- unsigned long fixup;
-};
-
-struct pt_regs;
-extern int fixup_exception(struct pt_regs *regs);
-#endif
diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h
index 0553e5cd5985..46ff8fd678a7 100644
--- a/arch/score/include/asm/pgtable.h
+++ b/arch/score/include/asm/pgtable.h
@@ -2,6 +2,7 @@
#define _ASM_SCORE_PGTABLE_H
#include <linux/const.h>
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/fixmap.h>
diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h
index 7a6c6982420a..916e5dbf0bfd 100644
--- a/arch/score/include/asm/uaccess.h
+++ b/arch/score/include/asm/uaccess.h
@@ -295,61 +295,19 @@ extern void __put_user_unknown(void);
extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
static inline unsigned long
-copy_from_user(void *to, const void *from, unsigned long len)
+raw_copy_from_user(void *to, const void __user *from, unsigned long len)
{
- unsigned long res = len;
-
- if (likely(access_ok(VERIFY_READ, from, len)))
- res = __copy_tofrom_user(to, from, len);
-
- if (unlikely(res))
- memset(to + (len - res), 0, res);
-
- return res;
+ return __copy_tofrom_user(to, (__force const void *)from, len);
}
static inline unsigned long
-copy_to_user(void *to, const void *from, unsigned long len)
+raw_copy_to_user(void __user *to, const void *from, unsigned long len)
{
- if (likely(access_ok(VERIFY_WRITE, to, len)))
- len = __copy_tofrom_user(to, from, len);
-
- return len;
+ return __copy_tofrom_user((__force void *)to, from, len);
}
-static inline unsigned long
-__copy_from_user(void *to, const void *from, unsigned long len)
-{
- unsigned long left = __copy_tofrom_user(to, from, len);
- if (unlikely(left))
- memset(to + (len - left), 0, left);
- return left;
-}
-
-#define __copy_to_user(to, from, len) \
- __copy_tofrom_user((to), (from), (len))
-
-static inline unsigned long
-__copy_to_user_inatomic(void *to, const void *from, unsigned long len)
-{
- return __copy_to_user(to, from, len);
-}
-
-static inline unsigned long
-__copy_from_user_inatomic(void *to, const void *from, unsigned long len)
-{
- return __copy_tofrom_user(to, from, len);
-}
-
-#define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len)
-
-static inline unsigned long
-copy_in_user(void *to, const void *from, unsigned long len)
-{
- if (access_ok(VERIFY_READ, from, len) &&
- access_ok(VERFITY_WRITE, to, len))
- return __copy_tofrom_user(to, from, len);
-}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
/*
* __clear_user: - Zero a block of memory in user space, with less checking.
diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c
index e359ec675869..12daf45369b4 100644
--- a/arch/score/kernel/traps.c
+++ b/arch/score/kernel/traps.c
@@ -24,6 +24,7 @@
*/
#include <linux/extable.h>
+#include <linux/ptrace.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
diff --git a/arch/score/mm/extable.c b/arch/score/mm/extable.c
index ec871355fc2d..6736a3ad6286 100644
--- a/arch/score/mm/extable.c
+++ b/arch/score/mm/extable.c
@@ -24,6 +24,8 @@
*/
#include <linux/extable.h>
+#include <linux/ptrace.h>
+#include <asm/extable.h>
int fixup_exception(struct pt_regs *regs)
{
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index ee086958b2b2..c689645eb8b7 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -48,6 +48,7 @@ config SUPERH
select HAVE_ARCH_AUDITSYSCALL
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_NMI
+ select ARCH_HAS_RAW_COPY_USER
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
diff --git a/arch/sh/boards/mach-cayman/setup.c b/arch/sh/boards/mach-cayman/setup.c
index 340fd40b381d..9c292c27e0d7 100644
--- a/arch/sh/boards/mach-cayman/setup.c
+++ b/arch/sh/boards/mach-cayman/setup.c
@@ -128,7 +128,6 @@ static int __init smsc_superio_setup(void)
SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX);
SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX);
-#ifdef CONFIG_IDE
/*
* Only IDE1 exists on the Cayman
*/
@@ -158,7 +157,6 @@ static int __init smsc_superio_setup(void)
SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */
SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */
SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */
-#endif
/* Exit the configuration state */
outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR);
diff --git a/arch/sh/include/asm/extable.h b/arch/sh/include/asm/extable.h
new file mode 100644
index 000000000000..df2ee2fcb8d3
--- /dev/null
+++ b/arch/sh/include/asm/extable.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_SH_EXTABLE_H
+#define __ASM_SH_EXTABLE_H
+
+#include <asm-generic/extable.h>
+
+#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU)
+#define ARCH_HAS_SEARCH_EXTABLE
+#endif
+
+#endif
diff --git a/arch/sh/include/asm/pgtable-2level.h b/arch/sh/include/asm/pgtable-2level.h
index 19bd89db17e7..f75cf4387257 100644
--- a/arch/sh/include/asm/pgtable-2level.h
+++ b/arch/sh/include/asm/pgtable-2level.h
@@ -1,6 +1,7 @@
#ifndef __ASM_SH_PGTABLE_2LEVEL_H
#define __ASM_SH_PGTABLE_2LEVEL_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/*
diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
index 249a985d9648..9b1e776eca31 100644
--- a/arch/sh/include/asm/pgtable-3level.h
+++ b/arch/sh/include/asm/pgtable-3level.h
@@ -1,6 +1,7 @@
#ifndef __ASM_SH_PGTABLE_3LEVEL_H
#define __ASM_SH_PGTABLE_3LEVEL_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
/*
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index 89a28dfbabfa..2722b61b2283 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -2,6 +2,7 @@
#define __ASM_SH_UACCESS_H
#include <asm/segment.h>
+#include <asm/extable.h>
#define __addr_ok(addr) \
((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg)
@@ -107,19 +108,18 @@ extern __must_check long strnlen_user(const char __user *str, long n);
__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
static __always_inline unsigned long
-__copy_from_user(void *to, const void __user *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return __copy_user(to, (__force void *)from, n);
}
static __always_inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return __copy_user((__force void *)to, from, n);
}
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
/*
* Clear the area and return remaining number of bytes
@@ -139,55 +139,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
__cl_size; \
})
-static inline unsigned long
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- unsigned long __copy_from = (unsigned long) from;
- __kernel_size_t __copy_size = (__kernel_size_t) n;
-
- if (__copy_size && __access_ok(__copy_from, __copy_size))
- __copy_size = __copy_user(to, from, __copy_size);
-
- if (unlikely(__copy_size))
- memset(to + (n - __copy_size), 0, __copy_size);
-
- return __copy_size;
-}
-
-static inline unsigned long
-copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- unsigned long __copy_to = (unsigned long) to;
- __kernel_size_t __copy_size = (__kernel_size_t) n;
-
- if (__copy_size && __access_ok(__copy_to, __copy_size))
- return __copy_user(to, from, __copy_size);
-
- return __copy_size;
-}
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-struct exception_table_entry {
- unsigned long insn, fixup;
-};
-
-#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU)
-#define ARCH_HAS_SEARCH_EXTABLE
-#endif
-
-int fixup_exception(struct pt_regs *regs);
-
extern void *set_exception_table_vec(unsigned int vec, void *handler);
static inline void *set_exception_table_evt(unsigned int evt, void *handler)
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 68ac5c7cd982..68353048f24e 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -45,6 +45,7 @@ config SPARC
select HAVE_ARCH_HARDENED_USERCOPY
select PROVE_LOCKING_SMALL if PROVE_LOCKING
select ARCH_WANT_RELAX_ORDER
+ select ARCH_HAS_RAW_COPY_USER
config SPARC32
def_bool !64BIT
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 56e49c8f770d..8a598528ec1f 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -12,6 +12,7 @@
* the SpitFire page tables.
*/
+#include <asm-generic/5level-fixup.h>
#include <linux/compiler.h>
#include <linux/const.h>
#include <asm/types.h>
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index a59a1e81986d..12ebee2d97c7 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -71,8 +71,6 @@ struct exception_table_entry
/* Returns 0 if exception not found and fixup otherwise. */
unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
-void __ret_efault(void);
-
/* Uh, these should become the main single-value transfer routines..
* They automatically use the right size if we just have the right
* pointer type..
@@ -237,39 +235,18 @@ int __get_user_bad(void);
unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
-static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- if (n && __access_ok((unsigned long) to, n)) {
- check_object_size(from, n, true);
- return __copy_user(to, (__force void __user *) from, n);
- } else
- return n;
-}
-
-static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- check_object_size(from, n, true);
return __copy_user(to, (__force void __user *) from, n);
}
-static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- if (n && __access_ok((unsigned long) from, n)) {
- check_object_size(to, n, false);
- return __copy_user((__force void __user *) to, from, n);
- } else {
- memset(to, 0, n);
- return n;
- }
-}
-
-static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return __copy_user((__force void __user *) to, from, n);
}
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
static inline unsigned long __clear_user(void __user *addr, unsigned long size)
{
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 7afb4f64553f..6096d671aa63 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -176,39 +176,19 @@ __asm__ __volatile__( \
int __get_user_bad(void);
-unsigned long __must_check ___copy_from_user(void *to,
+unsigned long __must_check raw_copy_from_user(void *to,
const void __user *from,
unsigned long size);
-static inline unsigned long __must_check
-copy_from_user(void *to, const void __user *from, unsigned long size)
-{
- check_object_size(to, size, false);
-
- return ___copy_from_user(to, from, size);
-}
-#define __copy_from_user copy_from_user
-unsigned long __must_check ___copy_to_user(void __user *to,
+unsigned long __must_check raw_copy_to_user(void __user *to,
const void *from,
unsigned long size);
-static inline unsigned long __must_check
-copy_to_user(void __user *to, const void *from, unsigned long size)
-{
- check_object_size(from, size, true);
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
- return ___copy_to_user(to, from, size);
-}
-#define __copy_to_user copy_to_user
-
-unsigned long __must_check ___copy_in_user(void __user *to,
+unsigned long __must_check raw_copy_in_user(void __user *to,
const void __user *from,
unsigned long size);
-static inline unsigned long __must_check
-copy_in_user(void __user *to, void __user *from, unsigned long size)
-{
- return ___copy_in_user(to, from, size);
-}
-#define __copy_in_user copy_in_user
unsigned long __must_check __clear_user(void __user *, unsigned long);
@@ -217,9 +197,6 @@ unsigned long __must_check __clear_user(void __user *, unsigned long);
__must_check long strlen_user(const char __user *str);
__must_check long strnlen_user(const char __user *str, long n);
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
struct pt_regs;
unsigned long compute_effective_address(struct pt_regs *,
unsigned int insn,
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index 7bb317b87dde..7274e43ff9be 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -809,10 +809,3 @@ lvl14_save:
.word 0
.word 0
.word t_irq14
-
- .section ".fixup",#alloc,#execinstr
- .globl __ret_efault
-__ret_efault:
- ret
- restore %g0, -EFAULT, %o0
-EXPORT_SYMBOL(__ret_efault)
diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S
index 69a439fa2fc1..8aa16ef113f2 100644
--- a/arch/sparc/lib/GENcopy_from_user.S
+++ b/arch/sparc/lib/GENcopy_from_user.S
@@ -23,7 +23,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, ___copy_in_user; \
+ bne,pn %icc, raw_copy_in_user; \
nop
#endif
diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S
index 9947427ce354..311c8fa5e98e 100644
--- a/arch/sparc/lib/GENcopy_to_user.S
+++ b/arch/sparc/lib/GENcopy_to_user.S
@@ -27,7 +27,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, ___copy_in_user; \
+ bne,pn %icc, raw_copy_in_user; \
nop
#endif
diff --git a/arch/sparc/lib/GENpatch.S b/arch/sparc/lib/GENpatch.S
index fab9e89f16bd..95e2f1f9e477 100644
--- a/arch/sparc/lib/GENpatch.S
+++ b/arch/sparc/lib/GENpatch.S
@@ -26,8 +26,8 @@
.type generic_patch_copyops,#function
generic_patch_copyops:
GEN_DO_PATCH(memcpy, GENmemcpy)
- GEN_DO_PATCH(___copy_from_user, GENcopy_from_user)
- GEN_DO_PATCH(___copy_to_user, GENcopy_to_user)
+ GEN_DO_PATCH(raw_copy_from_user, GENcopy_from_user)
+ GEN_DO_PATCH(raw_copy_to_user, GENcopy_to_user)
retl
nop
.size generic_patch_copyops,.-generic_patch_copyops
diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S
index b79a6998d87c..0d8a018118c2 100644
--- a/arch/sparc/lib/NG2copy_from_user.S
+++ b/arch/sparc/lib/NG2copy_from_user.S
@@ -36,7 +36,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, ___copy_in_user; \
+ bne,pn %icc, raw_copy_in_user; \
nop
#endif
diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S
index dcec55f254ab..a7a0ea0d8a0b 100644
--- a/arch/sparc/lib/NG2copy_to_user.S
+++ b/arch/sparc/lib/NG2copy_to_user.S
@@ -45,7 +45,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, ___copy_in_user; \
+ bne,pn %icc, raw_copy_in_user; \
nop
#endif
diff --git a/arch/sparc/lib/NG2patch.S b/arch/sparc/lib/NG2patch.S
index 28c36f06a6d1..56ccc19adde8 100644
--- a/arch/sparc/lib/NG2patch.S
+++ b/arch/sparc/lib/NG2patch.S
@@ -26,8 +26,8 @@
.type niagara2_patch_copyops,#function
niagara2_patch_copyops:
NG_DO_PATCH(memcpy, NG2memcpy)
- NG_DO_PATCH(___copy_from_user, NG2copy_from_user)
- NG_DO_PATCH(___copy_to_user, NG2copy_to_user)
+ NG_DO_PATCH(raw_copy_from_user, NG2copy_from_user)
+ NG_DO_PATCH(raw_copy_to_user, NG2copy_to_user)
retl
nop
.size niagara2_patch_copyops,.-niagara2_patch_copyops
diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S
index 16a286c1a528..5bb506bd61fa 100644
--- a/arch/sparc/lib/NG4copy_from_user.S
+++ b/arch/sparc/lib/NG4copy_from_user.S
@@ -31,7 +31,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, ___copy_in_user; \
+ bne,pn %icc, raw_copy_in_user; \
nop
#endif
diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S
index 6b0276ffc858..a82d4d45fc1c 100644
--- a/arch/sparc/lib/NG4copy_to_user.S
+++ b/arch/sparc/lib/NG4copy_to_user.S
@@ -40,7 +40,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, ___copy_in_user; \
+ bne,pn %icc, raw_copy_in_user; \
nop
#endif
diff --git a/arch/sparc/lib/NG4patch.S b/arch/sparc/lib/NG4patch.S
index a114cbcf2a48..3cc0f8cc95df 100644
--- a/arch/sparc/lib/NG4patch.S
+++ b/arch/sparc/lib/NG4patch.S
@@ -26,8 +26,8 @@
.type niagara4_patch_copyops,#function
niagara4_patch_copyops:
NG_DO_PATCH(memcpy, NG4memcpy)
- NG_DO_PATCH(___copy_from_user, NG4copy_from_user)
- NG_DO_PATCH(___copy_to_user, NG4copy_to_user)
+ NG_DO_PATCH(raw_copy_from_user, NG4copy_from_user)
+ NG_DO_PATCH(raw_copy_to_user, NG4copy_to_user)
retl
nop
.size niagara4_patch_copyops,.-niagara4_patch_copyops
diff --git a/arch/sparc/lib/NGcopy_from_user.S b/arch/sparc/lib/NGcopy_from_user.S
index 9cd42fcbc781..2333b6f3e824 100644
--- a/arch/sparc/lib/NGcopy_from_user.S
+++ b/arch/sparc/lib/NGcopy_from_user.S
@@ -25,7 +25,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, ___copy_in_user; \
+ bne,pn %icc, raw_copy_in_user; \
nop
#endif
diff --git a/arch/sparc/lib/NGcopy_to_user.S b/arch/sparc/lib/NGcopy_to_user.S
index 5c358afd464e..07ba20bc4ea4 100644
--- a/arch/sparc/lib/NGcopy_to_user.S
+++ b/arch/sparc/lib/NGcopy_to_user.S
@@ -28,7 +28,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, ___copy_in_user; \
+ bne,pn %icc, raw_copy_in_user; \
nop
#endif
diff --git a/arch/sparc/lib/NGpatch.S b/arch/sparc/lib/NGpatch.S
index 3b0674fc3366..62ccda7e7b38 100644
--- a/arch/sparc/lib/NGpatch.S
+++ b/arch/sparc/lib/NGpatch.S
@@ -26,8 +26,8 @@
.type niagara_patch_copyops,#function
niagara_patch_copyops:
NG_DO_PATCH(memcpy, NGmemcpy)
- NG_DO_PATCH(___copy_from_user, NGcopy_from_user)
- NG_DO_PATCH(___copy_to_user, NGcopy_to_user)
+ NG_DO_PATCH(raw_copy_from_user, NGcopy_from_user)
+ NG_DO_PATCH(raw_copy_to_user, NGcopy_to_user)
retl
nop
.size niagara_patch_copyops,.-niagara_patch_copyops
diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S
index bb6ff73229e3..9a6e68a9bf4a 100644
--- a/arch/sparc/lib/U1copy_from_user.S
+++ b/arch/sparc/lib/U1copy_from_user.S
@@ -19,7 +19,7 @@
.text; \
.align 4;
-#define FUNC_NAME ___copy_from_user
+#define FUNC_NAME raw_copy_from_user
#define LOAD(type,addr,dest) type##a [addr] %asi, dest
#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest
#define EX_RETVAL(x) 0
@@ -31,7 +31,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, ___copy_in_user; \
+ bne,pn %icc, raw_copy_in_user; \
nop; \
#include "U1memcpy.S"
diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S
index ed92ce739558..d7b28491eddf 100644
--- a/arch/sparc/lib/U1copy_to_user.S
+++ b/arch/sparc/lib/U1copy_to_user.S
@@ -19,7 +19,7 @@
.text; \
.align 4;
-#define FUNC_NAME ___copy_to_user
+#define FUNC_NAME raw_copy_to_user
#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS
#define EX_RETVAL(x) 0
@@ -31,7 +31,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, ___copy_in_user; \
+ bne,pn %icc, raw_copy_in_user; \
nop; \
#include "U1memcpy.S"
diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S
index c4ee858e352a..f48fb87fe9f2 100644
--- a/arch/sparc/lib/U3copy_to_user.S
+++ b/arch/sparc/lib/U3copy_to_user.S
@@ -31,7 +31,7 @@
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
- bne,pn %icc, ___copy_in_user; \
+ bne,pn %icc, raw_copy_in_user; \
nop; \
#include "U3memcpy.S"
diff --git a/arch/sparc/lib/U3patch.S b/arch/sparc/lib/U3patch.S
index ecc302619a6e..91cd6539b6e1 100644
--- a/arch/sparc/lib/U3patch.S
+++ b/arch/sparc/lib/U3patch.S
@@ -26,8 +26,8 @@
.type cheetah_patch_copyops,#function
cheetah_patch_copyops:
ULTRA3_DO_PATCH(memcpy, U3memcpy)
- ULTRA3_DO_PATCH(___copy_from_user, U3copy_from_user)
- ULTRA3_DO_PATCH(___copy_to_user, U3copy_to_user)
+ ULTRA3_DO_PATCH(raw_copy_from_user, U3copy_from_user)
+ ULTRA3_DO_PATCH(raw_copy_to_user, U3copy_to_user)
retl
nop
.size cheetah_patch_copyops,.-cheetah_patch_copyops
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S
index 0252b218de45..1b73bb80aeb0 100644
--- a/arch/sparc/lib/copy_in_user.S
+++ b/arch/sparc/lib/copy_in_user.S
@@ -44,7 +44,7 @@ __retl_o2_plus_1:
* to copy register windows around during thread cloning.
*/
-ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
+ENTRY(raw_copy_in_user) /* %o0=dst, %o1=src, %o2=len */
cmp %o2, 0
be,pn %XCC, 85f
or %o0, %o1, %o3
@@ -105,5 +105,5 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
add %o0, 1, %o0
retl
clr %o0
-ENDPROC(___copy_in_user)
-EXPORT_SYMBOL(___copy_in_user)
+ENDPROC(raw_copy_in_user)
+EXPORT_SYMBOL(raw_copy_in_user)
diff --git a/arch/sparc/lib/copy_user.S b/arch/sparc/lib/copy_user.S
index cea644dc67a6..bc243ee807cc 100644
--- a/arch/sparc/lib/copy_user.S
+++ b/arch/sparc/lib/copy_user.S
@@ -364,21 +364,7 @@ short_aligned_end:
97:
mov %o2, %g3
fixupretl:
- sethi %hi(PAGE_OFFSET), %g1
- cmp %o0, %g1
- blu 1f
- cmp %o1, %g1
- bgeu 1f
- ld [%g6 + TI_PREEMPT], %g1
- cmp %g1, 0
- bne 1f
- nop
- save %sp, -64, %sp
- mov %i0, %o0
- call __bzero
- mov %g3, %o1
- restore
-1: retl
+ retl
mov %g3, %o0
/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 4583c0320059..27808d86eb57 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -33,6 +33,7 @@ config TILE
select USER_STACKTRACE_SUPPORT
select USE_PMC if PERF_EVENTS
select VIRT_TO_BUS
+ select ARCH_HAS_RAW_COPY_USER
config MMU
def_bool y
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index aa48b6eaff2d..24c44e93804d 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += clkdev.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += extable.h
generic-y += fb.h
generic-y += fcntl.h
generic-y += hw_irq.h
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
index d26a42279036..5f8c615cb5e9 100644
--- a/arch/tile/include/asm/pgtable_32.h
+++ b/arch/tile/include/asm/pgtable_32.h
@@ -74,6 +74,7 @@ extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */;
#define MAXMEM (_VMALLOC_START - PAGE_OFFSET)
/* We have no pmd or pud since we are strictly a two-level page table */
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
static inline int pud_huge_page(pud_t pud) { return 0; }
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
index e96cec52f6d8..96fe58b45118 100644
--- a/arch/tile/include/asm/pgtable_64.h
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -59,6 +59,7 @@
#ifndef __ASSEMBLY__
/* We have no pud since we are a three-level page table. */
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
/*
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index 14ea3d1ca2c7..a803f6bb4d92 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -98,24 +98,7 @@ int __range_ok(unsigned long addr, unsigned long size);
likely(__range_ok((unsigned long)(addr), (size)) == 0); \
})
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry {
- unsigned long insn, fixup;
-};
-
-extern int fixup_exception(struct pt_regs *regs);
+#include <asm/extable.h>
/*
* This is a type: either unsigned long, if the argument fits into
@@ -330,145 +313,16 @@ extern int __put_user_bad(void)
((x) = 0, -EFAULT); \
})
-/**
- * __copy_to_user() - copy data into user space, with less checking.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * An alternate version - __copy_to_user_inatomic() - is designed
- * to be called from atomic context, typically bracketed by calls
- * to pagefault_disable() and pagefault_enable().
- */
-extern unsigned long __must_check __copy_to_user_inatomic(
- void __user *to, const void *from, unsigned long n);
-
-static inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- might_fault();
- return __copy_to_user_inatomic(to, from, n);
-}
-
-static inline unsigned long __must_check
-copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n))
- n = __copy_to_user(to, from, n);
- return n;
-}
-
-/**
- * __copy_from_user() - copy data from user space, with less checking.
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- *
- * An alternate version - __copy_from_user_inatomic() - is designed
- * to be called from atomic context, typically bracketed by calls
- * to pagefault_disable() and pagefault_enable(). This version
- * does *NOT* pad with zeros.
- */
-extern unsigned long __must_check __copy_from_user_inatomic(
- void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_from_user_zeroing(
- void *to, const void __user *from, unsigned long n);
-
-static inline unsigned long __must_check
-__copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- might_fault();
- return __copy_from_user_zeroing(to, from, n);
-}
-
-static inline unsigned long __must_check
-_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- if (access_ok(VERIFY_READ, from, n))
- n = __copy_from_user(to, from, n);
- else
- memset(to, 0, n);
- return n;
-}
-
-extern void __compiletime_error("usercopy buffer size is too small")
-__bad_copy_user(void);
-
-static inline void copy_user_overflow(int size, unsigned long count)
-{
- WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
-}
-
-static inline unsigned long __must_check copy_from_user(void *to,
- const void __user *from,
- unsigned long n)
-{
- int sz = __compiletime_object_size(to);
-
- if (likely(sz == -1 || sz >= n))
- n = _copy_from_user(to, from, n);
- else if (!__builtin_constant_p(n))
- copy_user_overflow(sz, n);
- else
- __bad_copy_user();
-
- return n;
-}
+extern unsigned long __must_check
+raw_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+raw_copy_from_user(void *to, const void __user *from, unsigned long n);
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
#ifdef __tilegx__
-/**
- * __copy_in_user() - copy data within user space, with less checking.
- * @to: Destination address, in user space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from user space to user space. Caller must check
- * the specified blocks with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-extern unsigned long __copy_in_user_inatomic(
+extern unsigned long raw_copy_in_user(
void __user *to, const void __user *from, unsigned long n);
-
-static inline unsigned long __must_check
-__copy_in_user(void __user *to, const void __user *from, unsigned long n)
-{
- might_fault();
- return __copy_in_user_inatomic(to, from, n);
-}
-
-static inline unsigned long __must_check
-copy_in_user(void __user *to, const void __user *from, unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
- n = __copy_in_user(to, from, n);
- return n;
-}
#endif
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index c5369fe643c7..ecce8e177e3f 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -38,11 +38,10 @@ EXPORT_SYMBOL(__mcount);
/* arch/tile/lib/, various memcpy files */
EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(__copy_to_user_inatomic);
-EXPORT_SYMBOL(__copy_from_user_inatomic);
-EXPORT_SYMBOL(__copy_from_user_zeroing);
+EXPORT_SYMBOL(raw_copy_to_user);
+EXPORT_SYMBOL(raw_copy_from_user);
#ifdef __tilegx__
-EXPORT_SYMBOL(__copy_in_user_inatomic);
+EXPORT_SYMBOL(raw_copy_in_user);
#endif
/* hypervisor glue */
diff --git a/arch/tile/lib/memcpy_32.S b/arch/tile/lib/memcpy_32.S
index a2771ae5da53..270f1267cd18 100644
--- a/arch/tile/lib/memcpy_32.S
+++ b/arch/tile/lib/memcpy_32.S
@@ -24,7 +24,6 @@
#define IS_MEMCPY 0
#define IS_COPY_FROM_USER 1
-#define IS_COPY_FROM_USER_ZEROING 2
#define IS_COPY_TO_USER -1
.section .text.memcpy_common, "ax"
@@ -42,40 +41,31 @@
9
-/* __copy_from_user_inatomic takes the kernel target address in r0,
+/* raw_copy_from_user takes the kernel target address in r0,
* the user source in r1, and the bytes to copy in r2.
* It returns the number of uncopiable bytes (hopefully zero) in r0.
*/
-ENTRY(__copy_from_user_inatomic)
-.type __copy_from_user_inatomic, @function
- FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \
+ENTRY(raw_copy_from_user)
+.type raw_copy_from_user, @function
+ FEEDBACK_ENTER_EXPLICIT(raw_copy_from_user, \
.text.memcpy_common, \
- .Lend_memcpy_common - __copy_from_user_inatomic)
+ .Lend_memcpy_common - raw_copy_from_user)
{ movei r29, IS_COPY_FROM_USER; j memcpy_common }
- .size __copy_from_user_inatomic, . - __copy_from_user_inatomic
+ .size raw_copy_from_user, . - raw_copy_from_user
-/* __copy_from_user_zeroing is like __copy_from_user_inatomic, but
- * any uncopiable bytes are zeroed in the target.
- */
-ENTRY(__copy_from_user_zeroing)
-.type __copy_from_user_zeroing, @function
- FEEDBACK_REENTER(__copy_from_user_inatomic)
- { movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common }
- .size __copy_from_user_zeroing, . - __copy_from_user_zeroing
-
-/* __copy_to_user_inatomic takes the user target address in r0,
+/* raw_copy_to_user takes the user target address in r0,
* the kernel source in r1, and the bytes to copy in r2.
* It returns the number of uncopiable bytes (hopefully zero) in r0.
*/
-ENTRY(__copy_to_user_inatomic)
-.type __copy_to_user_inatomic, @function
- FEEDBACK_REENTER(__copy_from_user_inatomic)
+ENTRY(raw_copy_to_user)
+.type raw_copy_to_user, @function
+ FEEDBACK_REENTER(raw_copy_from_user)
{ movei r29, IS_COPY_TO_USER; j memcpy_common }
- .size __copy_to_user_inatomic, . - __copy_to_user_inatomic
+ .size raw_copy_to_user, . - raw_copy_to_user
ENTRY(memcpy)
.type memcpy, @function
- FEEDBACK_REENTER(__copy_from_user_inatomic)
+ FEEDBACK_REENTER(raw_copy_from_user)
{ movei r29, IS_MEMCPY }
.size memcpy, . - memcpy
/* Fall through */
@@ -520,12 +510,7 @@ copy_from_user_fixup_loop:
{ bnzt r2, copy_from_user_fixup_loop }
.Lcopy_from_user_fixup_zero_remainder:
- { bbs r29, 2f } /* low bit set means IS_COPY_FROM_USER */
- /* byte-at-a-time loop faulted, so zero the rest. */
- { move r3, r2; bz r2, 2f /* should be impossible, but handle it. */ }
-1: { sb r0, zero; addi r0, r0, 1; addi r3, r3, -1 }
- { bnzt r3, 1b }
-2: move lr, r27
+ move lr, r27
{ move r0, r2; jrp lr }
copy_to_user_fixup_loop:
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c
index 97bbb6060b25..a3fea9fd973e 100644
--- a/arch/tile/lib/memcpy_user_64.c
+++ b/arch/tile/lib/memcpy_user_64.c
@@ -51,7 +51,7 @@
__v; \
})
-#define USERCOPY_FUNC __copy_to_user_inatomic
+#define USERCOPY_FUNC raw_copy_to_user
#define ST1(p, v) _ST((p), st1, (v))
#define ST2(p, v) _ST((p), st2, (v))
#define ST4(p, v) _ST((p), st4, (v))
@@ -62,7 +62,7 @@
#define LD8 LD
#include "memcpy_64.c"
-#define USERCOPY_FUNC __copy_from_user_inatomic
+#define USERCOPY_FUNC raw_copy_from_user
#define ST1 ST
#define ST2 ST
#define ST4 ST
@@ -73,7 +73,7 @@
#define LD8(p) _LD((p), ld)
#include "memcpy_64.c"
-#define USERCOPY_FUNC __copy_in_user_inatomic
+#define USERCOPY_FUNC raw_copy_in_user
#define ST1(p, v) _ST((p), st1, (v))
#define ST2(p, v) _ST((p), st2, (v))
#define ST4(p, v) _ST((p), st4, (v))
@@ -83,12 +83,3 @@
#define LD4(p) _LD((p), ld4u)
#define LD8(p) _LD((p), ld)
#include "memcpy_64.c"
-
-unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
- unsigned long n)
-{
- unsigned long rc = __copy_from_user_inatomic(to, from, n);
- if (unlikely(rc))
- memset(to + n - rc, 0, rc);
- return rc;
-}
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index fd443852103c..e13f763849b1 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -13,6 +13,7 @@ config UML
select GENERIC_CLOCKEVENTS
select HAVE_GCC_PLUGINS
select TTY # Needed for line.c
+ select ARCH_HAS_RAW_COPY_USER
config MMU
bool
diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h
index cfbe59752469..179c0ea87a0c 100644
--- a/arch/um/include/asm/pgtable-2level.h
+++ b/arch/um/include/asm/pgtable-2level.h
@@ -8,6 +8,7 @@
#ifndef __UM_PGTABLE_2LEVEL_H
#define __UM_PGTABLE_2LEVEL_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
/* PGDIR_SHIFT determines what a third-level page table entry can map */
diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
index bae8523a162f..c4d876dfb9ac 100644
--- a/arch/um/include/asm/pgtable-3level.h
+++ b/arch/um/include/asm/pgtable-3level.h
@@ -7,6 +7,7 @@
#ifndef __UM_PGTABLE_3LEVEL_H
#define __UM_PGTABLE_3LEVEL_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
/* PGDIR_SHIFT determines what a third-level page table entry can map */
diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h
index 9396c8a4e920..cc00fc50768f 100644
--- a/arch/um/include/asm/uaccess.h
+++ b/arch/um/include/asm/uaccess.h
@@ -21,8 +21,8 @@
#define __addr_range_nowrap(addr, size) \
((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
-extern long __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern long __copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n);
+extern unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n);
extern long __strncpy_from_user(char *dst, const char __user *src, long count);
extern long __strnlen_user(const void __user *str, long len);
extern unsigned long __clear_user(void __user *mem, unsigned long len);
@@ -33,8 +33,8 @@ static inline int __access_ok(unsigned long addr, unsigned long size);
#define __clear_user __clear_user
#define __strnlen_user __strnlen_user
#define __strncpy_from_user __strncpy_from_user
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
#include <asm-generic/uaccess.h>
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 22c9f79db8e6..d450797a3a7c 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -139,7 +139,7 @@ static int copy_chunk_from_user(unsigned long from, int len, void *arg)
return 0;
}
-long __copy_from_user(void *to, const void __user *from, unsigned long n)
+unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (uaccess_kernel()) {
memcpy(to, (__force void*)from, n);
@@ -148,7 +148,7 @@ long __copy_from_user(void *to, const void __user *from, unsigned long n)
return buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to);
}
-EXPORT_SYMBOL(__copy_from_user);
+EXPORT_SYMBOL(raw_copy_from_user);
static int copy_chunk_to_user(unsigned long to, int len, void *arg)
{
@@ -159,7 +159,7 @@ static int copy_chunk_to_user(unsigned long to, int len, void *arg)
return 0;
}
-long __copy_to_user(void __user *to, const void *from, unsigned long n)
+unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (uaccess_kernel()) {
memcpy((__force void *) to, from, n);
@@ -168,7 +168,7 @@ long __copy_to_user(void __user *to, const void *from, unsigned long n)
return buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from);
}
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(raw_copy_to_user);
static int strncpy_chunk_from_user(unsigned long from, int len, void *arg)
{
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 0769066929c6..319519eaed24 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -18,6 +18,7 @@ config UNICORE32
select ARCH_WANT_FRAME_POINTERS
select GENERIC_IOMAP
select MODULES_USE_ELF_REL
+ select ARCH_HAS_RAW_COPY_USER
help
UniCore-32 is 32-bit Instruction Set Architecture,
including a series of low-power-consumption RISC chip
diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h
index 818d0f5598e3..a4f2bef37e70 100644
--- a/arch/unicore32/include/asm/pgtable.h
+++ b/arch/unicore32/include/asm/pgtable.h
@@ -12,6 +12,7 @@
#ifndef __UNICORE_PGTABLE_H__
#define __UNICORE_PGTABLE_H__
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/cpu-single.h>
diff --git a/arch/unicore32/include/asm/uaccess.h b/arch/unicore32/include/asm/uaccess.h
index b2bcab69dfee..1d55f2f83759 100644
--- a/arch/unicore32/include/asm/uaccess.h
+++ b/arch/unicore32/include/asm/uaccess.h
@@ -24,15 +24,17 @@
#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
extern unsigned long __must_check
-__copy_from_user(void *to, const void __user *from, unsigned long n);
+raw_copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n);
+raw_copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check
__clear_user(void __user *addr, unsigned long n);
extern unsigned long __must_check
__strncpy_from_user(char *to, const char __user *from, unsigned long count);
extern unsigned long
__strnlen_user(const char __user *s, long n);
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
#include <asm-generic/uaccess.h>
diff --git a/arch/unicore32/kernel/ksyms.c b/arch/unicore32/kernel/ksyms.c
index 0323528a80fd..dcc72ee1fcdb 100644
--- a/arch/unicore32/kernel/ksyms.c
+++ b/arch/unicore32/kernel/ksyms.c
@@ -46,8 +46,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(raw_copy_from_user);
+EXPORT_SYMBOL(raw_copy_to_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__ashldi3);
diff --git a/arch/unicore32/lib/copy_from_user.S b/arch/unicore32/lib/copy_from_user.S
index ab0767ea5dbd..5f80fcbe8631 100644
--- a/arch/unicore32/lib/copy_from_user.S
+++ b/arch/unicore32/lib/copy_from_user.S
@@ -16,7 +16,7 @@
/*
* Prototype:
*
- * size_t __copy_from_user(void *to, const void *from, size_t n)
+ * size_t raw_copy_from_user(void *to, const void *from, size_t n)
*
* Purpose:
*
@@ -87,22 +87,18 @@
.text
-ENTRY(__copy_from_user)
+ENTRY(raw_copy_from_user)
#include "copy_template.S"
-ENDPROC(__copy_from_user)
+ENDPROC(raw_copy_from_user)
.pushsection .fixup,"ax"
.align 0
copy_abort_preamble
- ldm.w (r1, r2), [sp]+
- sub r3, r0, r1
- rsub r2, r3, r2
- stw r2, [sp]
- mov r1, #0
- b.l memset
- ldw.w r0, [sp]+, #4
+ ldm.w (r1, r2, r3), [sp]+
+ sub r0, r0, r1
+ rsub r0, r0, r2
copy_abort_end
.popsection
diff --git a/arch/unicore32/lib/copy_to_user.S b/arch/unicore32/lib/copy_to_user.S
index 6e22151c840d..857c6816ffe7 100644
--- a/arch/unicore32/lib/copy_to_user.S
+++ b/arch/unicore32/lib/copy_to_user.S
@@ -16,7 +16,7 @@
/*
* Prototype:
*
- * size_t __copy_to_user(void *to, const void *from, size_t n)
+ * size_t raw_copy_to_user(void *to, const void *from, size_t n)
*
* Purpose:
*
@@ -79,11 +79,11 @@
.text
-WEAK(__copy_to_user)
+WEAK(raw_copy_to_user)
#include "copy_template.S"
-ENDPROC(__copy_to_user)
+ENDPROC(raw_copy_to_user)
.pushsection .fixup,"ax"
.align 0
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cc98d5a294ee..5f59fc388063 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -175,6 +175,7 @@ config X86
select USER_STACKTRACE_SUPPORT
select VIRT_TO_BUS
select X86_FEATURE_NAMES if PROC_FS
+ select ARCH_HAS_RAW_COPY_USER
config INSTRUCTION_DECODER
def_bool y
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 349d4d17aa7f..2aa1ad194db2 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
static void refresh_pce(void *ignored)
{
- if (current->mm)
- load_mm_cr4(current->mm);
+ if (current->active_mm)
+ load_mm_cr4(current->active_mm);
}
static void x86_pmu_event_mapped(struct perf_event *event)
@@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return;
+ /*
+ * This function relies on not being called concurrently in two
+ * tasks in the same mm. Otherwise one task could observe
+ * perf_rdpmc_allowed > 1 and return all the way back to
+ * userspace with CR4.PCE clear while another task is still
+ * doing on_each_cpu_mask() to propagate CR4.PCE.
+ *
+ * For now, this can't happen because all callers hold mmap_sem
+ * for write. If this changes, we'll need a different solution.
+ */
+ lockdep_assert_held_exclusive(&current->mm->mmap_sem);
+
if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
}
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 4e7772387c6e..b04bb6dfed7f 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -289,7 +289,8 @@
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
-#define X86_FEATURE_RDPID (16*32+ 22) /* RDPID instruction */
+#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
+#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index d74747b031ec..c4eda791f877 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -46,6 +46,7 @@ struct kvm_page_track_notifier_node {
};
void kvm_page_track_init(struct kvm *kvm);
+void kvm_page_track_cleanup(struct kvm *kvm);
void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 72277b1028a5..50d35e3185f5 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd)
*(tmp + 1) = 0;
}
-#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \
- defined(CONFIG_PARAVIRT))
static inline void native_pud_clear(pud_t *pudp)
{
}
-#endif
static inline void pud_clear(pud_t *pudp)
{
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 1cfb36b8c024..585ee0d42d18 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
# define set_pud(pudp, pud) native_set_pud(pudp, pud)
#endif
-#ifndef __PAGETABLE_PMD_FOLDED
+#ifndef __PAGETABLE_PUD_FOLDED
#define pud_clear(pud) native_pud_clear(pud)
#endif
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 8b4de22d6429..62484333673d 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -273,6 +273,8 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
}
#if CONFIG_PGTABLE_LEVELS > 3
+#include <asm-generic/5level-fixup.h>
+
typedef struct { pudval_t pud; } pud_t;
static inline pud_t native_make_pud(pmdval_t val)
@@ -285,6 +287,7 @@ static inline pudval_t native_pud_val(pud_t pud)
return pud.pud;
}
#else
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h>
static inline pudval_t native_pud_val(pud_t pud)
@@ -306,6 +309,7 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
return pmd.pmd;
}
#else
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
static inline pmdval_t native_pmd_val(pmd_t pmd)
diff --git a/arch/x86/include/asm/purgatory.h b/arch/x86/include/asm/purgatory.h
new file mode 100644
index 000000000000..d7da2729903d
--- /dev/null
+++ b/arch/x86/include/asm/purgatory.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_X86_PURGATORY_H
+#define _ASM_X86_PURGATORY_H
+
+#ifndef __ASSEMBLY__
+#include <linux/purgatory.h>
+
+extern void purgatory(void);
+/*
+ * These forward declarations serve two purposes:
+ *
+ * 1) Make sparse happy when checking arch/purgatory
+ * 2) Document that these are required to be global so the symbol
+ * lookup in kexec works
+ */
+extern unsigned long purgatory_backup_dest;
+extern unsigned long purgatory_backup_src;
+extern unsigned long purgatory_backup_sz;
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_PURGATORY_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 6fa85944af83..fc5abff9b7fd 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -188,7 +188,7 @@ static inline void __native_flush_tlb_single(unsigned long addr)
static inline void __flush_tlb_all(void)
{
- if (static_cpu_has(X86_FEATURE_PGE))
+ if (boot_cpu_has(X86_FEATURE_PGE))
__flush_tlb_global();
else
__flush_tlb();
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 0522d88a7f90..68766b276d9e 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -379,6 +379,18 @@ do { \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
+#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
+ asm volatile("\n" \
+ "1: mov"itype" %2,%"rtype"1\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (err), ltype(x) \
+ : "m" (__m(addr)), "i" (errret), "0" (err))
+
/*
* This doesn't do __uaccess_begin/end - the exception handling
* around it must do that.
@@ -670,59 +682,6 @@ extern struct movsl_mask {
# include <asm/uaccess_64.h>
#endif
-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
- unsigned n);
-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
- unsigned n);
-
-extern void __compiletime_error("usercopy buffer size is too small")
-__bad_copy_user(void);
-
-static inline void copy_user_overflow(int size, unsigned long count)
-{
- WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
-}
-
-static __always_inline unsigned long __must_check
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- int sz = __compiletime_object_size(to);
-
- might_fault();
-
- kasan_check_write(to, n);
-
- if (likely(sz < 0 || sz >= n)) {
- check_object_size(to, n, false);
- n = _copy_from_user(to, from, n);
- } else if (!__builtin_constant_p(n))
- copy_user_overflow(sz, n);
- else
- __bad_copy_user();
-
- return n;
-}
-
-static __always_inline unsigned long __must_check
-copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- int sz = __compiletime_object_size(from);
-
- kasan_check_read(from, n);
-
- might_fault();
-
- if (likely(sz < 0 || sz >= n)) {
- check_object_size(from, n, true);
- n = _copy_to_user(to, from, n);
- } else if (!__builtin_constant_p(n))
- copy_user_overflow(sz, n);
- else
- __bad_copy_user();
-
- return n;
-}
-
/*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2.
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 5268ecceea96..aeda9bb8af50 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -8,143 +8,48 @@
#include <asm/asm.h>
#include <asm/page.h>
-unsigned long __must_check __copy_to_user_ll
- (void __user *to, const void *from, unsigned long n);
-unsigned long __must_check __copy_from_user_ll
- (void *to, const void __user *from, unsigned long n);
-unsigned long __must_check __copy_from_user_ll_nozero
- (void *to, const void __user *from, unsigned long n);
-unsigned long __must_check __copy_from_user_ll_nocache
- (void *to, const void __user *from, unsigned long n);
+unsigned long __must_check __copy_user_ll
+ (void *to, const void *from, unsigned long n);
unsigned long __must_check __copy_from_user_ll_nocache_nozero
(void *to, const void __user *from, unsigned long n);
-/**
- * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
- * The caller should also make sure he pins the user space address
- * so that we don't result in page fault and sleep.
- */
-static __always_inline unsigned long __must_check
-__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
-{
- check_object_size(from, n, true);
- return __copy_to_user_ll(to, from, n);
-}
-
-/**
- * __copy_to_user: - Copy a block of data into user space, with less checking.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
static __always_inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- might_fault();
- return __copy_to_user_inatomic(to, from, n);
+ return __copy_user_ll((__force void *)to, from, n);
}
static __always_inline unsigned long
-__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
-{
- return __copy_from_user_ll_nozero(to, from, n);
-}
-
-/**
- * __copy_from_user: - Copy a block of data from user space, with less checking.
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- *
- * An alternate version - __copy_from_user_inatomic() - may be called from
- * atomic context and will fail rather than sleep. In this case the
- * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
- * for explanation of why this is needed.
- */
-static __always_inline unsigned long
-__copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- might_fault();
- check_object_size(to, n, false);
- if (__builtin_constant_p(n)) {
- unsigned long ret;
-
- switch (n) {
- case 1:
- __uaccess_begin();
- __get_user_size(*(u8 *)to, from, 1, ret, 1);
- __uaccess_end();
- return ret;
- case 2:
- __uaccess_begin();
- __get_user_size(*(u16 *)to, from, 2, ret, 2);
- __uaccess_end();
- return ret;
- case 4:
- __uaccess_begin();
- __get_user_size(*(u32 *)to, from, 4, ret, 4);
- __uaccess_end();
- return ret;
- }
- }
- return __copy_from_user_ll(to, from, n);
-}
-
-static __always_inline unsigned long __copy_from_user_nocache(void *to,
- const void __user *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- might_fault();
if (__builtin_constant_p(n)) {
unsigned long ret;
switch (n) {
case 1:
+ ret = 0;
__uaccess_begin();
- __get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __get_user_asm_nozero(*(u8 *)to, from, ret,
+ "b", "b", "=q", 1);
__uaccess_end();
return ret;
case 2:
+ ret = 0;
__uaccess_begin();
- __get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __get_user_asm_nozero(*(u16 *)to, from, ret,
+ "w", "w", "=r", 2);
__uaccess_end();
return ret;
case 4:
+ ret = 0;
__uaccess_begin();
- __get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __get_user_asm_nozero(*(u32 *)to, from, ret,
+ "l", "k", "=r", 4);
__uaccess_end();
return ret;
}
}
- return __copy_from_user_ll_nocache(to, from, n);
+ return __copy_user_ll(to, (__force const void *)from, n);
}
static __always_inline unsigned long
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 8ddadd93639e..c5504b9a472e 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -45,58 +45,54 @@ copy_user_generic(void *to, const void *from, unsigned len)
return ret;
}
-__must_check unsigned long
-copy_in_user(void __user *to, const void __user *from, unsigned len);
-
-static __always_inline __must_check
-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
+static __always_inline __must_check unsigned long
+raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
{
int ret = 0;
- check_object_size(dst, size, false);
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
case 1:
__uaccess_begin();
- __get_user_asm(*(u8 *)dst, (u8 __user *)src,
+ __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
ret, "b", "b", "=q", 1);
__uaccess_end();
return ret;
case 2:
__uaccess_begin();
- __get_user_asm(*(u16 *)dst, (u16 __user *)src,
+ __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
ret, "w", "w", "=r", 2);
__uaccess_end();
return ret;
case 4:
__uaccess_begin();
- __get_user_asm(*(u32 *)dst, (u32 __user *)src,
+ __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
ret, "l", "k", "=r", 4);
__uaccess_end();
return ret;
case 8:
__uaccess_begin();
- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+ __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 8);
__uaccess_end();
return ret;
case 10:
__uaccess_begin();
- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+ __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 10);
if (likely(!ret))
- __get_user_asm(*(u16 *)(8 + (char *)dst),
+ __get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
(u16 __user *)(8 + (char __user *)src),
ret, "w", "w", "=r", 2);
__uaccess_end();
return ret;
case 16:
__uaccess_begin();
- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
+ __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 16);
if (likely(!ret))
- __get_user_asm(*(u64 *)(8 + (char *)dst),
+ __get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
(u64 __user *)(8 + (char __user *)src),
ret, "q", "", "=r", 8);
__uaccess_end();
@@ -106,20 +102,11 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
}
}
-static __always_inline __must_check
-int __copy_from_user(void *dst, const void __user *src, unsigned size)
-{
- might_fault();
- kasan_check_write(dst, size);
- return __copy_from_user_nocheck(dst, src, size);
-}
-
-static __always_inline __must_check
-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
+static __always_inline __must_check unsigned long
+raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
{
int ret = 0;
- check_object_size(src, size, true);
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
@@ -175,100 +162,16 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
}
static __always_inline __must_check
-int __copy_to_user(void __user *dst, const void *src, unsigned size)
-{
- might_fault();
- kasan_check_read(src, size);
- return __copy_to_user_nocheck(dst, src, size);
-}
-
-static __always_inline __must_check
-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
-{
- int ret = 0;
-
- might_fault();
- if (!__builtin_constant_p(size))
- return copy_user_generic((__force void *)dst,
- (__force void *)src, size);
- switch (size) {
- case 1: {
- u8 tmp;
- __uaccess_begin();
- __get_user_asm(tmp, (u8 __user *)src,
- ret, "b", "b", "=q", 1);
- if (likely(!ret))
- __put_user_asm(tmp, (u8 __user *)dst,
- ret, "b", "b", "iq", 1);
- __uaccess_end();
- return ret;
- }
- case 2: {
- u16 tmp;
- __uaccess_begin();
- __get_user_asm(tmp, (u16 __user *)src,
- ret, "w", "w", "=r", 2);
- if (likely(!ret))
- __put_user_asm(tmp, (u16 __user *)dst,
- ret, "w", "w", "ir", 2);
- __uaccess_end();
- return ret;
- }
-
- case 4: {
- u32 tmp;
- __uaccess_begin();
- __get_user_asm(tmp, (u32 __user *)src,
- ret, "l", "k", "=r", 4);
- if (likely(!ret))
- __put_user_asm(tmp, (u32 __user *)dst,
- ret, "l", "k", "ir", 4);
- __uaccess_end();
- return ret;
- }
- case 8: {
- u64 tmp;
- __uaccess_begin();
- __get_user_asm(tmp, (u64 __user *)src,
- ret, "q", "", "=r", 8);
- if (likely(!ret))
- __put_user_asm(tmp, (u64 __user *)dst,
- ret, "q", "", "er", 8);
- __uaccess_end();
- return ret;
- }
- default:
- return copy_user_generic((__force void *)dst,
- (__force void *)src, size);
- }
-}
-
-static __must_check __always_inline int
-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
-{
- kasan_check_write(dst, size);
- return __copy_from_user_nocheck(dst, src, size);
-}
-
-static __must_check __always_inline int
-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
+unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
{
- kasan_check_read(src, size);
- return __copy_to_user_nocheck(dst, src, size);
+ return copy_user_generic((__force void *)dst,
+ (__force void *)src, size);
}
extern long __copy_user_nocache(void *dst, const void __user *src,
unsigned size, int zerorest);
static inline int
-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
-{
- might_fault();
- kasan_check_write(dst, size);
- return __copy_user_nocache(dst, src, size, 1);
-}
-
-static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size)
{
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index ae32838cac5f..b2879cc23db4 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -179,10 +179,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
return -EINVAL;
}
+ if (!enabled) {
+ ++disabled_cpus;
+ return -EINVAL;
+ }
+
if (boot_cpu_physical_apicid != -1U)
ver = boot_cpu_apic_version;
- cpu = __generic_processor_info(id, ver, enabled);
+ cpu = generic_processor_info(id, ver);
if (cpu >= 0)
early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
@@ -710,7 +715,7 @@ static void __init acpi_set_irq_model_ioapic(void)
#ifdef CONFIG_ACPI_HOTPLUG_CPU
#include <acpi/processor.h>
-int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
{
#ifdef CONFIG_ACPI_NUMA
int nid;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index aee7deddabd0..8ccb7ef512e0 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2063,7 +2063,7 @@ static int allocate_logical_cpuid(int apicid)
return nr_logical_cpuids++;
}
-int __generic_processor_info(int apicid, int version, bool enabled)
+int generic_processor_info(int apicid, int version)
{
int cpu, max = nr_cpu_ids;
bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2121,11 +2121,9 @@ int __generic_processor_info(int apicid, int version, bool enabled)
if (num_processors >= nr_cpu_ids) {
int thiscpu = max + disabled_cpus;
- if (enabled) {
- pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
- "reached. Processor %d/0x%x ignored.\n",
- max, thiscpu, apicid);
- }
+ pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
+ "reached. Processor %d/0x%x ignored.\n",
+ max, thiscpu, apicid);
disabled_cpus++;
return -EINVAL;
@@ -2177,23 +2175,13 @@ int __generic_processor_info(int apicid, int version, bool enabled)
apic->x86_32_early_logical_apicid(cpu);
#endif
set_cpu_possible(cpu, true);
-
- if (enabled) {
- num_processors++;
- physid_set(apicid, phys_cpu_present_map);
- set_cpu_present(cpu, true);
- } else {
- disabled_cpus++;
- }
+ physid_set(apicid, phys_cpu_present_map);
+ set_cpu_present(cpu, true);
+ num_processors++;
return cpu;
}
-int generic_processor_info(int apicid, int version)
-{
- return __generic_processor_info(apicid, version, true);
-}
-
int hard_smp_processor_id(void)
{
return read_apic_id();
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index c05509d38b1f..9ac2a5cdd9c2 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -727,7 +727,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
if (atomic_dec_and_test(&rdtgrp->waitcount) &&
(rdtgrp->flags & RDT_DELETED)) {
kernfs_unbreak_active_protection(kn);
- kernfs_put(kn);
+ kernfs_put(rdtgrp->kn);
kfree(rdtgrp);
} else {
kernfs_unbreak_active_protection(kn);
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8639bb2ae058..8f3d9cf26ff9 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -535,7 +535,7 @@ static void run_sync(void)
{
int enable_irqs = irqs_disabled();
- /* We may be called with interrupts disbled (on bootup). */
+ /* We may be called with interrupts disabled (on bootup). */
if (enable_irqs)
local_irq_enable();
on_each_cpu(do_sync_core, NULL, 1);
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 54a2372f5dbb..b5785c197e53 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -4,6 +4,7 @@
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
*/
+#define DISABLE_BRANCH_PROFILING
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/types.h>
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 307b1f4543de..857cdbd02867 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -194,19 +194,22 @@ static int arch_update_purgatory(struct kimage *image)
/* Setup copying of backup region */
if (image->type == KEXEC_TYPE_CRASH) {
- ret = kexec_purgatory_get_set_symbol(image, "backup_dest",
+ ret = kexec_purgatory_get_set_symbol(image,
+ "purgatory_backup_dest",
&image->arch.backup_load_addr,
sizeof(image->arch.backup_load_addr), 0);
if (ret)
return ret;
- ret = kexec_purgatory_get_set_symbol(image, "backup_src",
+ ret = kexec_purgatory_get_set_symbol(image,
+ "purgatory_backup_src",
&image->arch.backup_src_start,
sizeof(image->arch.backup_src_start), 0);
if (ret)
return ret;
- ret = kexec_purgatory_get_set_symbol(image, "backup_sz",
+ ret = kexec_purgatory_get_set_symbol(image,
+ "purgatory_backup_sz",
&image->arch.backup_src_sz,
sizeof(image->arch.backup_src_sz), 0);
if (ret)
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index f088ea4c66e7..a723ae9440ab 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -166,11 +166,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
spin_lock_irqsave(&desc->lock, flags);
/*
- * most handlers of type NMI_UNKNOWN never return because
- * they just assume the NMI is theirs. Just a sanity check
- * to manage expectations
+ * Indicate if there are multiple registrations on the
+ * internal NMI handler call chains (SERR and IO_CHECK).
*/
- WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 4194d6f9bb29..067f9813fd2c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -228,7 +228,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
.ident = "ASUS EeeBook X205TA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X205TAW"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X205TA"),
},
},
{ /* Handle problems with rebooting on ASUS EeeBook X205TAW */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 4f7a9833d8e5..c73a7f9e881a 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1333,6 +1333,8 @@ static int __init init_tsc_clocksource(void)
* the refined calibration and directly register it as a clocksource.
*/
if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
+ if (boot_cpu_has(X86_FEATURE_ART))
+ art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz);
return 0;
}
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 478d15dbaee4..08339262b666 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -82,19 +82,43 @@ static size_t regs_size(struct pt_regs *regs)
return sizeof(*regs);
}
+#ifdef CONFIG_X86_32
+#define GCC_REALIGN_WORDS 3
+#else
+#define GCC_REALIGN_WORDS 1
+#endif
+
static bool is_last_task_frame(struct unwind_state *state)
{
- unsigned long bp = (unsigned long)state->bp;
- unsigned long regs = (unsigned long)task_pt_regs(state->task);
+ unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2;
+ unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS;
/*
* We have to check for the last task frame at two different locations
* because gcc can occasionally decide to realign the stack pointer and
- * change the offset of the stack frame by a word in the prologue of a
- * function called by head/entry code.
+ * change the offset of the stack frame in the prologue of a function
+ * called by head/entry code. Examples:
+ *
+ * <start_secondary>:
+ * push %edi
+ * lea 0x8(%esp),%edi
+ * and $0xfffffff8,%esp
+ * pushl -0x4(%edi)
+ * push %ebp
+ * mov %esp,%ebp
+ *
+ * <x86_64_start_kernel>:
+ * lea 0x8(%rsp),%r10
+ * and $0xfffffffffffffff0,%rsp
+ * pushq -0x8(%r10)
+ * push %rbp
+ * mov %rsp,%rbp
+ *
+ * Note that after aligning the stack, it pushes a duplicate copy of
+ * the return address before pushing the frame pointer.
*/
- return bp == regs - FRAME_HEADER_SIZE ||
- bp == regs - FRAME_HEADER_SIZE - sizeof(long);
+ return (state->bp == last_bp ||
+ (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
}
/*
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 73ea24d4f119..047b17a26269 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -657,6 +657,9 @@ void kvm_pic_destroy(struct kvm *kvm)
{
struct kvm_pic *vpic = kvm->arch.vpic;
+ if (!vpic)
+ return;
+
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 6e219e5c07d2..289270a6aecb 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -635,6 +635,9 @@ void kvm_ioapic_destroy(struct kvm *kvm)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+ if (!ioapic)
+ return;
+
cancel_delayed_work_sync(&ioapic->eoi_inject);
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
kvm->arch.vioapic = NULL;
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
index 37942e419c32..60168cdd0546 100644
--- a/arch/x86/kvm/page_track.c
+++ b/arch/x86/kvm/page_track.c
@@ -160,6 +160,14 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
}
+void kvm_page_track_cleanup(struct kvm *kvm)
+{
+ struct kvm_page_track_notifier_head *head;
+
+ head = &kvm->arch.track_notifier_head;
+ cleanup_srcu_struct(&head->track_srcu);
+}
+
void kvm_page_track_init(struct kvm *kvm)
{
struct kvm_page_track_notifier_head *head;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d1efe2c62b3f..5fba70646c32 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1379,6 +1379,9 @@ static void avic_vm_destroy(struct kvm *kvm)
unsigned long flags;
struct kvm_arch *vm_data = &kvm->arch;
+ if (!avic)
+ return;
+
avic_free_vm_id(vm_data->avic_vm_id);
if (vm_data->avic_logical_id_table_page)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 283aa8601833..2ee00dbbbd51 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1239,6 +1239,11 @@ static inline bool cpu_has_vmx_invvpid_global(void)
return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
}
+static inline bool cpu_has_vmx_invvpid(void)
+{
+ return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
+}
+
static inline bool cpu_has_vmx_ept(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -2753,7 +2758,6 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_DESC |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
- SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING |
@@ -2781,10 +2785,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
* though it is treated as global context. The alternative is
* not failing the single-context invvpid, and it is worse.
*/
- if (enable_vpid)
+ if (enable_vpid) {
+ vmx->nested.nested_vmx_secondary_ctls_high |=
+ SECONDARY_EXEC_ENABLE_VPID;
vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
VMX_VPID_EXTENT_SUPPORTED_MASK;
- else
+ } else
vmx->nested.nested_vmx_vpid_caps = 0;
if (enable_unrestricted_guest)
@@ -4024,6 +4030,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
}
+static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
+{
+ if (enable_ept)
+ vmx_flush_tlb(vcpu);
+}
+
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -6517,8 +6529,10 @@ static __init int hardware_setup(void)
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
- if (!cpu_has_vmx_vpid())
+ if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
+ !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
enable_vpid = 0;
+
if (!cpu_has_vmx_shadow_vmcs())
enable_shadow_vmcs = 0;
if (enable_shadow_vmcs)
@@ -7258,9 +7272,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
static int handle_vmclear(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 zero = 0;
gpa_t vmptr;
- struct vmcs12 *vmcs12;
- struct page *page;
if (!nested_vmx_check_permission(vcpu))
return 1;
@@ -7271,22 +7284,9 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
if (vmptr == vmx->nested.current_vmptr)
nested_release_vmcs12(vmx);
- page = nested_get_page(vcpu, vmptr);
- if (page == NULL) {
- /*
- * For accurate processor emulation, VMCLEAR beyond available
- * physical memory should do nothing at all. However, it is
- * possible that a nested vmx bug, not a guest hypervisor bug,
- * resulted in this case, so let's shut down before doing any
- * more damage:
- */
- kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
- return 1;
- }
- vmcs12 = kmap(page);
- vmcs12->launch_state = 0;
- kunmap(page);
- nested_release_page(page);
+ kvm_vcpu_write_guest(vcpu,
+ vmptr + offsetof(struct vmcs12, launch_state),
+ &zero, sizeof(zero));
nested_free_vmcs02(vmx, vmptr);
@@ -8515,7 +8515,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
&& kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu);
else {
- WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
+ vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
+ exit_reason);
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
@@ -8561,6 +8562,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
} else {
sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ vmx_flush_tlb_ept_only(vcpu);
}
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
@@ -8586,8 +8588,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
*/
if (!is_guest_mode(vcpu) ||
!nested_cpu_has2(get_vmcs12(&vmx->vcpu),
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
vmcs_write64(APIC_ACCESS_ADDR, hpa);
+ vmx_flush_tlb_ept_only(vcpu);
+ }
}
static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
@@ -9694,10 +9698,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
return false;
page = nested_get_page(vcpu, vmcs12->msr_bitmap);
- if (!page) {
- WARN_ON(1);
+ if (!page)
return false;
- }
msr_bitmap_l1 = (unsigned long *)kmap(page);
memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
@@ -9990,7 +9992,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exec_control;
- bool nested_ept_enabled = false;
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
@@ -10137,8 +10138,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs12->guest_intr_status);
}
- nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0;
-
/*
* Write an illegal value to APIC_ACCESS_ADDR. Later,
* nested_get_vmcs12_pages will either fix it up or
@@ -10271,6 +10270,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (nested_cpu_has_ept(vmcs12)) {
kvm_mmu_unload(vcpu);
nested_ept_init_mmu_context(vcpu);
+ } else if (nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+ vmx_flush_tlb_ept_only(vcpu);
}
/*
@@ -10298,12 +10300,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmx_set_efer(vcpu, vcpu->arch.efer);
/* Shadow page tables on either EPT or shadow page tables. */
- if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled,
+ if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
entry_failure_code))
return 1;
- kvm_mmu_reset_context(vcpu);
-
if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
@@ -11072,6 +11072,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
vmx_set_virtual_x2apic_mode(vcpu,
vcpu->arch.apic_base & X2APIC_ENABLE);
+ } else if (!nested_cpu_has_ept(vmcs12) &&
+ nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+ vmx_flush_tlb_ept_only(vcpu);
}
/* This is needed for same reason as it was needed in prepare_vmcs02 */
@@ -11121,8 +11125,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
*/
static void vmx_leave_nested(struct kvm_vcpu *vcpu)
{
- if (is_guest_mode(vcpu))
+ if (is_guest_mode(vcpu)) {
+ to_vmx(vcpu)->nested.nested_run_pending = 0;
nested_vmx_vmexit(vcpu, -1, 0, 0);
+ }
free_nested(to_vmx(vcpu));
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1faf620a6fdc..ccbd45ecd41a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8153,11 +8153,12 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
if (kvm_x86_ops->vm_destroy)
kvm_x86_ops->vm_destroy(kvm);
kvm_iommu_unmap_guest(kvm);
- kfree(kvm->arch.vpic);
- kfree(kvm->arch.vioapic);
+ kvm_pic_destroy(kvm);
+ kvm_ioapic_destroy(kvm);
kvm_free_vcpus(kvm);
kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
kvm_mmu_uninit_vm(kvm);
+ kvm_page_track_cleanup(kvm);
}
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
@@ -8566,11 +8567,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
{
struct x86_exception fault;
- trace_kvm_async_pf_ready(work->arch.token, work->gva);
if (work->wakeup_all)
work->arch.token = ~0; /* broadcast wakeup */
else
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+ trace_kvm_async_pf_ready(work->arch.token, work->gva);
if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index c074799bddae..c8c6ad0d58b8 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -4,12 +4,9 @@
* For licencing details see kernel-base/COPYING
*/
-#include <linux/highmem.h>
+#include <linux/uaccess.h>
#include <linux/export.h>
-#include <asm/word-at-a-time.h>
-#include <linux/sched.h>
-
/*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2.
@@ -34,52 +31,3 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
return ret;
}
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
-
-/**
- * copy_to_user: - Copy a block of data into user space.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
-{
- if (access_ok(VERIFY_WRITE, to, n))
- n = __copy_to_user(to, from, n);
- return n;
-}
-EXPORT_SYMBOL(_copy_to_user);
-
-/**
- * copy_from_user: - Copy a block of data from user space.
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
-{
- if (access_ok(VERIFY_READ, from, n))
- n = __copy_from_user(to, from, n);
- else
- memset(to, 0, n);
- return n;
-}
-EXPORT_SYMBOL(_copy_from_user);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 1f65ff6540f0..bd057a4ffe6e 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -5,12 +5,7 @@
* Copyright 1997 Andi Kleen <ak@muc.de>
* Copyright 1997 Linus Torvalds
*/
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/blkdev.h>
#include <linux/export.h>
-#include <linux/backing-dev.h>
-#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <asm/mmx.h>
#include <asm/asm.h>
@@ -201,197 +196,6 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
return size;
}
-static unsigned long
-__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
-{
- int d0, d1;
- __asm__ __volatile__(
- " .align 2,0x90\n"
- "0: movl 32(%4), %%eax\n"
- " cmpl $67, %0\n"
- " jbe 2f\n"
- "1: movl 64(%4), %%eax\n"
- " .align 2,0x90\n"
- "2: movl 0(%4), %%eax\n"
- "21: movl 4(%4), %%edx\n"
- " movl %%eax, 0(%3)\n"
- " movl %%edx, 4(%3)\n"
- "3: movl 8(%4), %%eax\n"
- "31: movl 12(%4),%%edx\n"
- " movl %%eax, 8(%3)\n"
- " movl %%edx, 12(%3)\n"
- "4: movl 16(%4), %%eax\n"
- "41: movl 20(%4), %%edx\n"
- " movl %%eax, 16(%3)\n"
- " movl %%edx, 20(%3)\n"
- "10: movl 24(%4), %%eax\n"
- "51: movl 28(%4), %%edx\n"
- " movl %%eax, 24(%3)\n"
- " movl %%edx, 28(%3)\n"
- "11: movl 32(%4), %%eax\n"
- "61: movl 36(%4), %%edx\n"
- " movl %%eax, 32(%3)\n"
- " movl %%edx, 36(%3)\n"
- "12: movl 40(%4), %%eax\n"
- "71: movl 44(%4), %%edx\n"
- " movl %%eax, 40(%3)\n"
- " movl %%edx, 44(%3)\n"
- "13: movl 48(%4), %%eax\n"
- "81: movl 52(%4), %%edx\n"
- " movl %%eax, 48(%3)\n"
- " movl %%edx, 52(%3)\n"
- "14: movl 56(%4), %%eax\n"
- "91: movl 60(%4), %%edx\n"
- " movl %%eax, 56(%3)\n"
- " movl %%edx, 60(%3)\n"
- " addl $-64, %0\n"
- " addl $64, %4\n"
- " addl $64, %3\n"
- " cmpl $63, %0\n"
- " ja 0b\n"
- "5: movl %0, %%eax\n"
- " shrl $2, %0\n"
- " andl $3, %%eax\n"
- " cld\n"
- "6: rep; movsl\n"
- " movl %%eax,%0\n"
- "7: rep; movsb\n"
- "8:\n"
- ".section .fixup,\"ax\"\n"
- "9: lea 0(%%eax,%0,4),%0\n"
- "16: pushl %0\n"
- " pushl %%eax\n"
- " xorl %%eax,%%eax\n"
- " rep; stosb\n"
- " popl %%eax\n"
- " popl %0\n"
- " jmp 8b\n"
- ".previous\n"
- _ASM_EXTABLE(0b,16b)
- _ASM_EXTABLE(1b,16b)
- _ASM_EXTABLE(2b,16b)
- _ASM_EXTABLE(21b,16b)
- _ASM_EXTABLE(3b,16b)
- _ASM_EXTABLE(31b,16b)
- _ASM_EXTABLE(4b,16b)
- _ASM_EXTABLE(41b,16b)
- _ASM_EXTABLE(10b,16b)
- _ASM_EXTABLE(51b,16b)
- _ASM_EXTABLE(11b,16b)
- _ASM_EXTABLE(61b,16b)
- _ASM_EXTABLE(12b,16b)
- _ASM_EXTABLE(71b,16b)
- _ASM_EXTABLE(13b,16b)
- _ASM_EXTABLE(81b,16b)
- _ASM_EXTABLE(14b,16b)
- _ASM_EXTABLE(91b,16b)
- _ASM_EXTABLE(6b,9b)
- _ASM_EXTABLE(7b,16b)
- : "=&c"(size), "=&D" (d0), "=&S" (d1)
- : "1"(to), "2"(from), "0"(size)
- : "eax", "edx", "memory");
- return size;
-}
-
-/*
- * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware.
- * hyoshiok@miraclelinux.com
- */
-
-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
- const void __user *from, unsigned long size)
-{
- int d0, d1;
-
- __asm__ __volatile__(
- " .align 2,0x90\n"
- "0: movl 32(%4), %%eax\n"
- " cmpl $67, %0\n"
- " jbe 2f\n"
- "1: movl 64(%4), %%eax\n"
- " .align 2,0x90\n"
- "2: movl 0(%4), %%eax\n"
- "21: movl 4(%4), %%edx\n"
- " movnti %%eax, 0(%3)\n"
- " movnti %%edx, 4(%3)\n"
- "3: movl 8(%4), %%eax\n"
- "31: movl 12(%4),%%edx\n"
- " movnti %%eax, 8(%3)\n"
- " movnti %%edx, 12(%3)\n"
- "4: movl 16(%4), %%eax\n"
- "41: movl 20(%4), %%edx\n"
- " movnti %%eax, 16(%3)\n"
- " movnti %%edx, 20(%3)\n"
- "10: movl 24(%4), %%eax\n"
- "51: movl 28(%4), %%edx\n"
- " movnti %%eax, 24(%3)\n"
- " movnti %%edx, 28(%3)\n"
- "11: movl 32(%4), %%eax\n"
- "61: movl 36(%4), %%edx\n"
- " movnti %%eax, 32(%3)\n"
- " movnti %%edx, 36(%3)\n"
- "12: movl 40(%4), %%eax\n"
- "71: movl 44(%4), %%edx\n"
- " movnti %%eax, 40(%3)\n"
- " movnti %%edx, 44(%3)\n"
- "13: movl 48(%4), %%eax\n"
- "81: movl 52(%4), %%edx\n"
- " movnti %%eax, 48(%3)\n"
- " movnti %%edx, 52(%3)\n"
- "14: movl 56(%4), %%eax\n"
- "91: movl 60(%4), %%edx\n"
- " movnti %%eax, 56(%3)\n"
- " movnti %%edx, 60(%3)\n"
- " addl $-64, %0\n"
- " addl $64, %4\n"
- " addl $64, %3\n"
- " cmpl $63, %0\n"
- " ja 0b\n"
- " sfence \n"
- "5: movl %0, %%eax\n"
- " shrl $2, %0\n"
- " andl $3, %%eax\n"
- " cld\n"
- "6: rep; movsl\n"
- " movl %%eax,%0\n"
- "7: rep; movsb\n"
- "8:\n"
- ".section .fixup,\"ax\"\n"
- "9: lea 0(%%eax,%0,4),%0\n"
- "16: pushl %0\n"
- " pushl %%eax\n"
- " xorl %%eax,%%eax\n"
- " rep; stosb\n"
- " popl %%eax\n"
- " popl %0\n"
- " jmp 8b\n"
- ".previous\n"
- _ASM_EXTABLE(0b,16b)
- _ASM_EXTABLE(1b,16b)
- _ASM_EXTABLE(2b,16b)
- _ASM_EXTABLE(21b,16b)
- _ASM_EXTABLE(3b,16b)
- _ASM_EXTABLE(31b,16b)
- _ASM_EXTABLE(4b,16b)
- _ASM_EXTABLE(41b,16b)
- _ASM_EXTABLE(10b,16b)
- _ASM_EXTABLE(51b,16b)
- _ASM_EXTABLE(11b,16b)
- _ASM_EXTABLE(61b,16b)
- _ASM_EXTABLE(12b,16b)
- _ASM_EXTABLE(71b,16b)
- _ASM_EXTABLE(13b,16b)
- _ASM_EXTABLE(81b,16b)
- _ASM_EXTABLE(14b,16b)
- _ASM_EXTABLE(91b,16b)
- _ASM_EXTABLE(6b,9b)
- _ASM_EXTABLE(7b,16b)
- : "=&c"(size), "=&D" (d0), "=&S" (d1)
- : "1"(to), "2"(from), "0"(size)
- : "eax", "edx", "memory");
- return size;
-}
-
static unsigned long __copy_user_intel_nocache(void *to,
const void __user *from, unsigned long size)
{
@@ -486,12 +290,8 @@ static unsigned long __copy_user_intel_nocache(void *to,
* Leave these declared but undefined. They should not be any references to
* them
*/
-unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
- unsigned long size);
unsigned long __copy_user_intel(void __user *to, const void *from,
unsigned long size);
-unsigned long __copy_user_zeroing_intel_nocache(void *to,
- const void __user *from, unsigned long size);
#endif /* CONFIG_X86_INTEL_USERCOPY */
/* Generic arbitrary sized copy. */
@@ -528,47 +328,7 @@ do { \
: "memory"); \
} while (0)
-#define __copy_user_zeroing(to, from, size) \
-do { \
- int __d0, __d1, __d2; \
- __asm__ __volatile__( \
- " cmp $7,%0\n" \
- " jbe 1f\n" \
- " movl %1,%0\n" \
- " negl %0\n" \
- " andl $7,%0\n" \
- " subl %0,%3\n" \
- "4: rep; movsb\n" \
- " movl %3,%0\n" \
- " shrl $2,%0\n" \
- " andl $3,%3\n" \
- " .align 2,0x90\n" \
- "0: rep; movsl\n" \
- " movl %3,%0\n" \
- "1: rep; movsb\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "5: addl %3,%0\n" \
- " jmp 6f\n" \
- "3: lea 0(%3,%0,4),%0\n" \
- "6: pushl %0\n" \
- " pushl %%eax\n" \
- " xorl %%eax,%%eax\n" \
- " rep; stosb\n" \
- " popl %%eax\n" \
- " popl %0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE(4b,5b) \
- _ASM_EXTABLE(0b,3b) \
- _ASM_EXTABLE(1b,6b) \
- : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
- : "3"(size), "0"(size), "1"(to), "2"(from) \
- : "memory"); \
-} while (0)
-
-unsigned long __copy_to_user_ll(void __user *to, const void *from,
- unsigned long n)
+unsigned long __copy_user_ll(void *to, const void *from, unsigned long n)
{
stac();
if (movsl_is_ok(to, from, n))
@@ -578,51 +338,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
clac();
return n;
}
-EXPORT_SYMBOL(__copy_to_user_ll);
-
-unsigned long __copy_from_user_ll(void *to, const void __user *from,
- unsigned long n)
-{
- stac();
- if (movsl_is_ok(to, from, n))
- __copy_user_zeroing(to, from, n);
- else
- n = __copy_user_zeroing_intel(to, from, n);
- clac();
- return n;
-}
-EXPORT_SYMBOL(__copy_from_user_ll);
-
-unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
- unsigned long n)
-{
- stac();
- if (movsl_is_ok(to, from, n))
- __copy_user(to, from, n);
- else
- n = __copy_user_intel((void __user *)to,
- (const void *)from, n);
- clac();
- return n;
-}
-EXPORT_SYMBOL(__copy_from_user_ll_nozero);
-
-unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
- unsigned long n)
-{
- stac();
-#ifdef CONFIG_X86_INTEL_USERCOPY
- if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
- n = __copy_user_zeroing_intel_nocache(to, from, n);
- else
- __copy_user_zeroing(to, from, n);
-#else
- __copy_user_zeroing(to, from, n);
-#endif
- clac();
- return n;
-}
-EXPORT_SYMBOL(__copy_from_user_ll_nocache);
+EXPORT_SYMBOL(__copy_user_ll);
unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
unsigned long n)
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 69873589c0ba..3b7c40a2e3e1 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -54,15 +54,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
}
EXPORT_SYMBOL(clear_user);
-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
-{
- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
- return copy_user_generic((__force void *)to, (__force void *)from, len);
- }
- return len;
-}
-EXPORT_SYMBOL(copy_in_user);
-
/*
* Try to copy last bytes and clear the rest if needed.
* Since protection fault in copy_from/to_user is not a normal situation,
@@ -80,9 +71,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
break;
}
clac();
-
- /* If the destination is a kernel buffer, we always clear the end */
- if (!__addr_ok(to))
- memset(to, 0, len);
return len;
}
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 99c7805a9693..1f3b6ef105cd 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -106,32 +106,35 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
struct dev_pagemap *pgmap = NULL;
- int nr_start = *nr;
- pte_t *ptep;
+ int nr_start = *nr, ret = 0;
+ pte_t *ptep, *ptem;
- ptep = pte_offset_map(&pmd, addr);
+ /*
+ * Keep the original mapped PTE value (ptem) around since we
+ * might increment ptep off the end of the page when finishing
+ * our loop iteration.
+ */
+ ptem = ptep = pte_offset_map(&pmd, addr);
do {
pte_t pte = gup_get_pte(ptep);
struct page *page;
/* Similar to the PMD case, NUMA hinting must take slow path */
- if (pte_protnone(pte)) {
- pte_unmap(ptep);
- return 0;
- }
+ if (pte_protnone(pte))
+ break;
+
+ if (!pte_allows_gup(pte_val(pte), write))
+ break;
if (pte_devmap(pte)) {
pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
if (unlikely(!pgmap)) {
undo_dev_pagemap(nr, nr_start, pages);
- pte_unmap(ptep);
- return 0;
+ break;
}
- } else if (!pte_allows_gup(pte_val(pte), write) ||
- pte_special(pte)) {
- pte_unmap(ptep);
- return 0;
- }
+ } else if (pte_special(pte))
+ break;
+
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
get_page(page);
@@ -141,9 +144,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
- pte_unmap(ptep - 1);
+ if (addr == end)
+ ret = 1;
+ pte_unmap(ptem);
- return 1;
+ return ret;
}
static inline void get_head_page_multiple(struct page *page, int nr)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 8d63d7a104c3..4c90cfdc128b 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -1,3 +1,4 @@
+#define DISABLE_BRANCH_PROFILING
#define pr_fmt(fmt) "kasan: " fmt
#include <linux/bootmem.h>
#include <linux/kasan.h>
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 5126dfd52b18..cd44ae727df7 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -590,7 +590,7 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
* we might run off the end of the bounds table if we are on
* a 64-bit kernel and try to get 8 bytes.
*/
-int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
+static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
long __user *bd_entry_ptr)
{
u32 bd_entry_32;
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index e1fb269c87af..292ab0364a89 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -234,23 +234,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return 1;
for_each_pci_msi_entry(msidesc, dev) {
- __pci_read_msi_msg(msidesc, &msg);
- pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
- ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
- if (msg.data != XEN_PIRQ_MSI_DATA ||
- xen_irq_from_pirq(pirq) < 0) {
- pirq = xen_allocate_pirq_msi(dev, msidesc);
- if (pirq < 0) {
- irq = -ENODEV;
- goto error;
- }
- xen_msi_compose_msg(dev, pirq, &msg);
- __pci_write_msi_msg(msidesc, &msg);
- dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
- } else {
- dev_dbg(&dev->dev,
- "xen: msi already bound to pirq=%d\n", pirq);
+ pirq = xen_allocate_pirq_msi(dev, msidesc);
+ if (pirq < 0) {
+ irq = -ENODEV;
+ goto error;
}
+ xen_msi_compose_msg(dev, pirq, &msg);
+ __pci_write_msi_msg(msidesc, &msg);
+ dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
(type == PCI_CAP_ID_MSI) ? nvec : 1,
(type == PCI_CAP_ID_MSIX) ?
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index a7dbec4dce27..3dbde04febdc 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -26,5 +26,6 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
# MISC Devices
obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
+obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o
obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c
new file mode 100644
index 000000000000..a6c3705a28ad
--- /dev/null
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c
@@ -0,0 +1,82 @@
+/*
+ * Intel Merrifield power button support
+ *
+ * (C) Copyright 2017 Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/sfi.h>
+
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+
+static struct resource mrfld_power_btn_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mrfld_power_btn_dev = {
+ .name = "msic_power_btn",
+ .id = PLATFORM_DEVID_NONE,
+ .num_resources = ARRAY_SIZE(mrfld_power_btn_resources),
+ .resource = mrfld_power_btn_resources,
+};
+
+static int mrfld_power_btn_scu_status_change(struct notifier_block *nb,
+ unsigned long code, void *data)
+{
+ if (code == SCU_DOWN) {
+ platform_device_unregister(&mrfld_power_btn_dev);
+ return 0;
+ }
+
+ return platform_device_register(&mrfld_power_btn_dev);
+}
+
+static struct notifier_block mrfld_power_btn_scu_notifier = {
+ .notifier_call = mrfld_power_btn_scu_status_change,
+};
+
+static int __init register_mrfld_power_btn(void)
+{
+ if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+ return -ENODEV;
+
+ /*
+ * We need to be sure that the SCU IPC is ready before
+ * PMIC power button device can be registered:
+ */
+ intel_scu_notifier_add(&mrfld_power_btn_scu_notifier);
+
+ return 0;
+}
+arch_initcall(register_mrfld_power_btn);
+
+static void __init *mrfld_power_btn_platform_data(void *info)
+{
+ struct resource *res = mrfld_power_btn_resources;
+ struct sfi_device_table_entry *pentry = info;
+
+ res->start = res->end = pentry->irq;
+ return NULL;
+}
+
+static const struct devs_id mrfld_power_btn_dev_id __initconst = {
+ .name = "bcove_power_btn",
+ .type = SFI_DEV_TYPE_IPC,
+ .delay = 1,
+ .msic = 1,
+ .get_platform_data = &mrfld_power_btn_platform_data,
+};
+
+sfi_device(mrfld_power_btn_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
index 86edd1e941eb..9e304e2ea4f5 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c
@@ -19,7 +19,7 @@
#include <asm/intel_scu_ipc.h>
#include <asm/io_apic.h>
-#define TANGIER_EXT_TIMER0_MSI 15
+#define TANGIER_EXT_TIMER0_MSI 12
static struct platform_device wdt_dev = {
.name = "intel_mid_wdt",
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
index e793fe509971..e42978d4deaf 100644
--- a/arch/x86/platform/intel-mid/mfld.c
+++ b/arch/x86/platform/intel-mid/mfld.c
@@ -17,16 +17,6 @@
#include "intel_mid_weak_decls.h"
-static void penwell_arch_setup(void);
-/* penwell arch ops */
-static struct intel_mid_ops penwell_ops = {
- .arch_setup = penwell_arch_setup,
-};
-
-static void mfld_power_off(void)
-{
-}
-
static unsigned long __init mfld_calibrate_tsc(void)
{
unsigned long fast_calibrate;
@@ -63,9 +53,12 @@ static unsigned long __init mfld_calibrate_tsc(void)
static void __init penwell_arch_setup(void)
{
x86_platform.calibrate_tsc = mfld_calibrate_tsc;
- pm_power_off = mfld_power_off;
}
+static struct intel_mid_ops penwell_ops = {
+ .arch_setup = penwell_arch_setup,
+};
+
void *get_penwell_ops(void)
{
return &penwell_ops;
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index b6d5c8946e66..470edad96bb9 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -10,22 +10,19 @@
* Version 2. See the file COPYING for more details.
*/
+#include <linux/bug.h>
+#include <asm/purgatory.h>
+
#include "sha256.h"
-#include "purgatory.h"
#include "../boot/string.h"
-struct sha_region {
- unsigned long start;
- unsigned long len;
-};
-
-static unsigned long backup_dest;
-static unsigned long backup_src;
-static unsigned long backup_sz;
+unsigned long purgatory_backup_dest __section(.kexec-purgatory);
+unsigned long purgatory_backup_src __section(.kexec-purgatory);
+unsigned long purgatory_backup_sz __section(.kexec-purgatory);
-static u8 sha256_digest[SHA256_DIGEST_SIZE] = { 0 };
+u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(.kexec-purgatory);
-struct sha_region sha_regions[16] = {};
+struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(.kexec-purgatory);
/*
* On x86, second kernel requries first 640K of memory to boot. Copy
@@ -34,26 +31,28 @@ struct sha_region sha_regions[16] = {};
*/
static int copy_backup_region(void)
{
- if (backup_dest)
- memcpy((void *)backup_dest, (void *)backup_src, backup_sz);
-
+ if (purgatory_backup_dest) {
+ memcpy((void *)purgatory_backup_dest,
+ (void *)purgatory_backup_src, purgatory_backup_sz);
+ }
return 0;
}
static int verify_sha256_digest(void)
{
- struct sha_region *ptr, *end;
+ struct kexec_sha_region *ptr, *end;
u8 digest[SHA256_DIGEST_SIZE];
struct sha256_state sctx;
sha256_init(&sctx);
- end = &sha_regions[sizeof(sha_regions)/sizeof(sha_regions[0])];
- for (ptr = sha_regions; ptr < end; ptr++)
+ end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions);
+
+ for (ptr = purgatory_sha_regions; ptr < end; ptr++)
sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len);
sha256_final(&sctx, digest);
- if (memcmp(digest, sha256_digest, sizeof(digest)))
+ if (memcmp(digest, purgatory_sha256_digest, sizeof(digest)))
return 1;
return 0;
diff --git a/arch/x86/purgatory/purgatory.h b/arch/x86/purgatory/purgatory.h
deleted file mode 100644
index e2e365a6c192..000000000000
--- a/arch/x86/purgatory/purgatory.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef PURGATORY_H
-#define PURGATORY_H
-
-#ifndef __ASSEMBLY__
-extern void purgatory(void);
-#endif /* __ASSEMBLY__ */
-
-#endif /* PURGATORY_H */
diff --git a/arch/x86/purgatory/setup-x86_64.S b/arch/x86/purgatory/setup-x86_64.S
index f90e9dfa90bb..dfae9b9e60b5 100644
--- a/arch/x86/purgatory/setup-x86_64.S
+++ b/arch/x86/purgatory/setup-x86_64.S
@@ -9,7 +9,7 @@
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
-#include "purgatory.h"
+#include <asm/purgatory.h>
.text
.globl purgatory_start
diff --git a/arch/x86/purgatory/sha256.h b/arch/x86/purgatory/sha256.h
index bd15a4127735..2867d9825a57 100644
--- a/arch/x86/purgatory/sha256.h
+++ b/arch/x86/purgatory/sha256.h
@@ -10,7 +10,6 @@
#ifndef SHA256_H
#define SHA256_H
-
#include <linux/types.h>
#include <crypto/sha.h>
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index f4126cf997a4..043d37d45919 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -29,6 +29,7 @@ config XTENSA
select NO_BOOTMEM
select PERF_USE_VMALLOC
select VIRT_TO_BUS
+ select ARCH_HAS_RAW_COPY_USER
help
Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index f41408c53fe1..cc23e9ecc6bb 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += dma-contiguous.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += extable.h
generic-y += fcntl.h
generic-y += hardirq.h
generic-y += ioctl.h
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 8aa0e0d9cbb2..30dd5b2e4ad5 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -11,6 +11,7 @@
#ifndef _XTENSA_PGTABLE_H
#define _XTENSA_PGTABLE_H
+#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#include <asm/page.h>
#include <asm/kmem_layout.h>
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 26512692e28f..2e7bac0d4b2c 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -18,6 +18,7 @@
#include <linux/prefetch.h>
#include <asm/types.h>
+#include <asm/extable.h>
/*
* The fs value determines whether argument validity checking should
@@ -233,60 +234,22 @@ __asm__ __volatile__( \
* Copy to/from user space
*/
-/*
- * We use a generic, arbitrary-sized copy subroutine. The Xtensa
- * architecture would cause heavy code bloat if we tried to inline
- * these functions and provide __constant_copy_* equivalents like the
- * i386 versions. __xtensa_copy_user is quite efficient. See the
- * .fixup section of __xtensa_copy_user for a discussion on the
- * X_zeroing equivalents for Xtensa.
- */
-
extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
-#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size)
-
-
-static inline unsigned long
-__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
-{
- return __copy_user(to, from, n);
-}
static inline unsigned long
-__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- return __copy_user(to, from, n);
+ prefetchw(to);
+ return __xtensa_copy_user(to, (__force const void *)from, n);
}
-
static inline unsigned long
-__generic_copy_to_user(void *to, const void *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
prefetch(from);
- if (access_ok(VERIFY_WRITE, to, n))
- return __copy_user(to, from, n);
- return n;
+ return __xtensa_copy_user((__force void *)to, from, n);
}
-
-static inline unsigned long
-__generic_copy_from_user(void *to, const void *from, unsigned long n)
-{
- prefetchw(to);
- if (access_ok(VERIFY_READ, from, n))
- return __copy_user(to, from, n);
- else
- memset(to, 0, n);
- return n;
-}
-
-#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
-#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
-#define __copy_to_user(to, from, n) \
- __generic_copy_to_user_nocheck((to), (from), (n))
-#define __copy_from_user(to, from, n) \
- __generic_copy_from_user_nocheck((to), (from), (n))
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
/*
* We need to return the number of bytes not cleared. Our memset()
@@ -342,10 +305,4 @@ static inline long strnlen_user(const char *str, long len)
return __strnlen_user(str, len);
}
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
#endif /* _XTENSA_UACCESS_H */
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
index 7ea4dd68893e..d9cd766bde3e 100644
--- a/arch/xtensa/lib/usercopy.S
+++ b/arch/xtensa/lib/usercopy.S
@@ -102,9 +102,9 @@ __xtensa_copy_user:
bltui a4, 7, .Lbytecopy # do short copies byte by byte
# copy 1 byte
- EX(l8ui, a6, a3, 0, l_fixup)
+ EX(l8ui, a6, a3, 0, fixup)
addi a3, a3, 1
- EX(s8i, a6, a5, 0, s_fixup)
+ EX(s8i, a6, a5, 0, fixup)
addi a5, a5, 1
addi a4, a4, -1
bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
@@ -112,11 +112,11 @@ __xtensa_copy_user:
.Ldst2mod4: # dst 16-bit aligned
# copy 2 bytes
bltui a4, 6, .Lbytecopy # do short copies byte by byte
- EX(l8ui, a6, a3, 0, l_fixup)
- EX(l8ui, a7, a3, 1, l_fixup)
+ EX(l8ui, a6, a3, 0, fixup)
+ EX(l8ui, a7, a3, 1, fixup)
addi a3, a3, 2
- EX(s8i, a6, a5, 0, s_fixup)
- EX(s8i, a7, a5, 1, s_fixup)
+ EX(s8i, a6, a5, 0, fixup)
+ EX(s8i, a7, a5, 1, fixup)
addi a5, a5, 2
addi a4, a4, -2
j .Ldstaligned # dst is now aligned, return to main algorithm
@@ -135,9 +135,9 @@ __xtensa_copy_user:
add a7, a3, a4 # a7 = end address for source
#endif /* !XCHAL_HAVE_LOOPS */
.Lnextbyte:
- EX(l8ui, a6, a3, 0, l_fixup)
+ EX(l8ui, a6, a3, 0, fixup)
addi a3, a3, 1
- EX(s8i, a6, a5, 0, s_fixup)
+ EX(s8i, a6, a5, 0, fixup)
addi a5, a5, 1
#if !XCHAL_HAVE_LOOPS
blt a3, a7, .Lnextbyte
@@ -161,15 +161,15 @@ __xtensa_copy_user:
add a8, a8, a3 # a8 = end of last 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */
.Loop1:
- EX(l32i, a6, a3, 0, l_fixup)
- EX(l32i, a7, a3, 4, l_fixup)
- EX(s32i, a6, a5, 0, s_fixup)
- EX(l32i, a6, a3, 8, l_fixup)
- EX(s32i, a7, a5, 4, s_fixup)
- EX(l32i, a7, a3, 12, l_fixup)
- EX(s32i, a6, a5, 8, s_fixup)
+ EX(l32i, a6, a3, 0, fixup)
+ EX(l32i, a7, a3, 4, fixup)
+ EX(s32i, a6, a5, 0, fixup)
+ EX(l32i, a6, a3, 8, fixup)
+ EX(s32i, a7, a5, 4, fixup)
+ EX(l32i, a7, a3, 12, fixup)
+ EX(s32i, a6, a5, 8, fixup)
addi a3, a3, 16
- EX(s32i, a7, a5, 12, s_fixup)
+ EX(s32i, a7, a5, 12, fixup)
addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS
blt a3, a8, .Loop1
@@ -177,31 +177,31 @@ __xtensa_copy_user:
.Loop1done:
bbci.l a4, 3, .L2
# copy 8 bytes
- EX(l32i, a6, a3, 0, l_fixup)
- EX(l32i, a7, a3, 4, l_fixup)
+ EX(l32i, a6, a3, 0, fixup)
+ EX(l32i, a7, a3, 4, fixup)
addi a3, a3, 8
- EX(s32i, a6, a5, 0, s_fixup)
- EX(s32i, a7, a5, 4, s_fixup)
+ EX(s32i, a6, a5, 0, fixup)
+ EX(s32i, a7, a5, 4, fixup)
addi a5, a5, 8
.L2:
bbci.l a4, 2, .L3
# copy 4 bytes
- EX(l32i, a6, a3, 0, l_fixup)
+ EX(l32i, a6, a3, 0, fixup)
addi a3, a3, 4
- EX(s32i, a6, a5, 0, s_fixup)
+ EX(s32i, a6, a5, 0, fixup)
addi a5, a5, 4
.L3:
bbci.l a4, 1, .L4
# copy 2 bytes
- EX(l16ui, a6, a3, 0, l_fixup)
+ EX(l16ui, a6, a3, 0, fixup)
addi a3, a3, 2
- EX(s16i, a6, a5, 0, s_fixup)
+ EX(s16i, a6, a5, 0, fixup)
addi a5, a5, 2
.L4:
bbci.l a4, 0, .L5
# copy 1 byte
- EX(l8ui, a6, a3, 0, l_fixup)
- EX(s8i, a6, a5, 0, s_fixup)
+ EX(l8ui, a6, a3, 0, fixup)
+ EX(s8i, a6, a5, 0, fixup)
.L5:
movi a2, 0 # return success for len bytes copied
retw
@@ -217,7 +217,7 @@ __xtensa_copy_user:
# copy 16 bytes per iteration for word-aligned dst and unaligned src
and a10, a3, a8 # save unalignment offset for below
sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware)
- EX(l32i, a6, a3, 0, l_fixup) # load first word
+ EX(l32i, a6, a3, 0, fixup) # load first word
#if XCHAL_HAVE_LOOPS
loopnez a7, .Loop2done
#else /* !XCHAL_HAVE_LOOPS */
@@ -226,19 +226,19 @@ __xtensa_copy_user:
add a12, a12, a3 # a12 = end of last 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */
.Loop2:
- EX(l32i, a7, a3, 4, l_fixup)
- EX(l32i, a8, a3, 8, l_fixup)
+ EX(l32i, a7, a3, 4, fixup)
+ EX(l32i, a8, a3, 8, fixup)
ALIGN( a6, a6, a7)
- EX(s32i, a6, a5, 0, s_fixup)
- EX(l32i, a9, a3, 12, l_fixup)
+ EX(s32i, a6, a5, 0, fixup)
+ EX(l32i, a9, a3, 12, fixup)
ALIGN( a7, a7, a8)
- EX(s32i, a7, a5, 4, s_fixup)
- EX(l32i, a6, a3, 16, l_fixup)
+ EX(s32i, a7, a5, 4, fixup)
+ EX(l32i, a6, a3, 16, fixup)
ALIGN( a8, a8, a9)
- EX(s32i, a8, a5, 8, s_fixup)
+ EX(s32i, a8, a5, 8, fixup)
addi a3, a3, 16
ALIGN( a9, a9, a6)
- EX(s32i, a9, a5, 12, s_fixup)
+ EX(s32i, a9, a5, 12, fixup)
addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS
blt a3, a12, .Loop2
@@ -246,39 +246,39 @@ __xtensa_copy_user:
.Loop2done:
bbci.l a4, 3, .L12
# copy 8 bytes
- EX(l32i, a7, a3, 4, l_fixup)
- EX(l32i, a8, a3, 8, l_fixup)
+ EX(l32i, a7, a3, 4, fixup)
+ EX(l32i, a8, a3, 8, fixup)
ALIGN( a6, a6, a7)
- EX(s32i, a6, a5, 0, s_fixup)
+ EX(s32i, a6, a5, 0, fixup)
addi a3, a3, 8
ALIGN( a7, a7, a8)
- EX(s32i, a7, a5, 4, s_fixup)
+ EX(s32i, a7, a5, 4, fixup)
addi a5, a5, 8
mov a6, a8
.L12:
bbci.l a4, 2, .L13
# copy 4 bytes
- EX(l32i, a7, a3, 4, l_fixup)
+ EX(l32i, a7, a3, 4, fixup)
addi a3, a3, 4
ALIGN( a6, a6, a7)
- EX(s32i, a6, a5, 0, s_fixup)
+ EX(s32i, a6, a5, 0, fixup)
addi a5, a5, 4
mov a6, a7
.L13:
add a3, a3, a10 # readjust a3 with correct misalignment
bbci.l a4, 1, .L14
# copy 2 bytes
- EX(l8ui, a6, a3, 0, l_fixup)
- EX(l8ui, a7, a3, 1, l_fixup)
+ EX(l8ui, a6, a3, 0, fixup)
+ EX(l8ui, a7, a3, 1, fixup)
addi a3, a3, 2
- EX(s8i, a6, a5, 0, s_fixup)
- EX(s8i, a7, a5, 1, s_fixup)
+ EX(s8i, a6, a5, 0, fixup)
+ EX(s8i, a7, a5, 1, fixup)
addi a5, a5, 2
.L14:
bbci.l a4, 0, .L15
# copy 1 byte
- EX(l8ui, a6, a3, 0, l_fixup)
- EX(s8i, a6, a5, 0, s_fixup)
+ EX(l8ui, a6, a3, 0, fixup)
+ EX(s8i, a6, a5, 0, fixup)
.L15:
movi a2, 0 # return success for len bytes copied
retw
@@ -291,30 +291,10 @@ __xtensa_copy_user:
* bytes_copied = a5 - a2
* retval = bytes_not_copied = original len - bytes_copied
* retval = a11 - (a5 - a2)
- *
- * Clearing the remaining pieces of kernel memory plugs security
- * holes. This functionality is the equivalent of the *_zeroing
- * functions that some architectures provide.
*/
-.Lmemset:
- .word memset
-s_fixup:
+fixup:
sub a2, a5, a2 /* a2 <-- bytes copied */
sub a2, a11, a2 /* a2 <-- bytes not copied */
retw
-
-l_fixup:
- sub a2, a5, a2 /* a2 <-- bytes copied */
- sub a2, a11, a2 /* a2 <-- bytes not copied == return value */
-
- /* void *memset(void *s, int c, size_t n); */
- mov a6, a5 /* s */
- movi a7, 0 /* c */
- mov a8, a2 /* n */
- l32r a4, .Lmemset
- callx4 a4
- /* Ignore memset return value in a6. */
- /* a2 still contains bytes not copied. */
- retw