summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig12
-rw-r--r--arch/alpha/include/uapi/asm/Kbuild41
-rw-r--r--arch/alpha/kernel/osf_sys.c6
-rw-r--r--arch/alpha/lib/Makefile11
-rw-r--r--arch/arc/Makefile4
-rw-r--r--arch/arc/include/asm/processor.h2
-rw-r--r--arch/arc/include/uapi/asm/Kbuild3
-rw-r--r--arch/arc/mm/mmap.c2
-rw-r--r--arch/arm/Kconfig20
-rw-r--r--arch/arm/Makefile8
-rw-r--r--arch/arm/boot/compressed/efi-header.S5
-rw-r--r--arch/arm/boot/compressed/head.S17
-rw-r--r--arch/arm/boot/dts/am335x-sl50.dts8
-rw-r--r--arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi27
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts2
-rw-r--r--arch/arm/boot/dts/dra7.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-qsrb.dts2
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dts17
-rw-r--r--arch/arm/boot/dts/imx6ul-14x14-evk.dts6
l---------arch/arm/boot/dts/include/arm1
l---------arch/arm/boot/dts/include/arm641
l---------arch/arm/boot/dts/include/dt-bindings1
-rw-r--r--arch/arm/boot/dts/keystone-k2l-netcp.dtsi4
-rw-r--r--arch/arm/boot/dts/keystone-k2l.dtsi8
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts6
-rw-r--r--arch/arm/boot/dts/mt7623.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dtsi3
-rw-r--r--arch/arm/boot/dts/omap4-panda-a4.dts2
-rw-r--r--arch/arm/boot/dts/omap4-panda-es.dts2
-rw-r--r--arch/arm/boot/dts/rk1108.dtsi2
-rw-r--r--arch/arm/boot/dts/sunxi-h3-h5.dtsi7
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts2
-rw-r--r--arch/arm/common/mcpm_entry.c6
-rw-r--r--arch/arm/configs/gemini_defconfig68
-rw-r--r--arch/arm/include/asm/device.h3
-rw-r--r--arch/arm/include/asm/dmi.h19
-rw-r--r--arch/arm/include/asm/kvm_coproc.h3
-rw-r--r--arch/arm/include/asm/pgtable-nommu.h1
-rw-r--r--arch/arm/include/uapi/asm/Kbuild17
-rw-r--r--arch/arm/include/uapi/asm/kvm.h6
-rw-r--r--arch/arm/kernel/module.c11
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kernel/smp.c3
-rw-r--r--arch/arm/kvm/Makefile7
-rw-r--r--arch/arm/kvm/arm.c1480
-rw-r--r--arch/arm/kvm/coproc.c106
-rw-r--r--arch/arm/kvm/handle_exit.c4
-rw-r--r--arch/arm/kvm/hyp/Makefile2
-rw-r--r--arch/arm/kvm/hyp/switch.c4
-rw-r--r--arch/arm/kvm/init.S5
-rw-r--r--arch/arm/kvm/mmio.c217
-rw-r--r--arch/arm/kvm/mmu.c1975
-rw-r--r--arch/arm/kvm/perf.c68
-rw-r--r--arch/arm/kvm/psci.c332
-rw-r--r--arch/arm/kvm/trace.h255
-rw-r--r--arch/arm/mach-at91/Kconfig1
-rw-r--r--arch/arm/mach-at91/pm.c2
-rw-r--r--arch/arm/mach-bcm/bcm_kona_smc.c2
-rw-r--r--arch/arm/mach-cns3xxx/core.c2
-rw-r--r--arch/arm/mach-davinci/pm.c7
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_dpllcore.c3
-rw-r--r--arch/arm/mach-omap2/clock.c35
-rw-r--r--arch/arm/mach-omap2/clock.h2
-rw-r--r--arch/arm/mach-omap2/cm.h5
-rw-r--r--arch/arm/mach-omap2/cm2xxx.c9
-rw-r--r--arch/arm/mach-omap2/cm3xxx.c10
-rw-r--r--arch/arm/mach-omap2/cm_common.c2
-rw-r--r--arch/arm/mach-omap2/common.h3
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c10
-rw-r--r--arch/arm/mach-omap2/omap-smp.c11
-rw-r--r--arch/arm/mach-omap2/prm_common.c2
-rw-r--r--arch/arm/mach-omap2/vc.c2
-rw-r--r--arch/arm/mach-spear/time.c2
-rw-r--r--arch/arm/mm/dma-mapping.c29
-rw-r--r--arch/arm/mm/mmap.c4
-rw-r--r--arch/arm/mm/mmu.c8
-rw-r--r--arch/arm64/Kconfig6
-rw-r--r--arch/arm64/Kconfig.platforms5
-rw-r--r--arch/arm64/Makefile6
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi2
l---------arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi1
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts85
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220.dtsi31
l---------arch/arm64/boot/dts/include/arm1
l---------arch/arm64/boot/dts/include/arm641
l---------arch/arm64/boot/dts/include/dt-bindings1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-db.dts8
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi73
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-evb.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts2
-rw-r--r--arch/arm64/configs/defconfig116
-rw-r--r--arch/arm64/include/asm/acpi.h6
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h9
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h1
-rw-r--r--arch/arm64/include/asm/atomic_lse.h4
-rw-r--r--arch/arm64/include/asm/barrier.h20
-rw-r--r--arch/arm64/include/asm/cmpxchg.h2
-rw-r--r--arch/arm64/include/asm/cpufeature.h12
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h6
-rw-r--r--arch/arm64/include/asm/kvm_host.h8
-rw-r--r--arch/arm64/include/asm/sysreg.h4
-rw-r--r--arch/arm64/include/asm/uaccess.h13
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild18
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h6
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c3
-rw-r--r--arch/arm64/kernel/cpufeature.c23
-rw-r--r--arch/arm64/kernel/efi.c15
-rw-r--r--arch/arm64/kernel/entry.S5
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c3
-rw-r--r--arch/arm64/kernel/module.c7
-rw-r--r--arch/arm64/kernel/pci.c4
-rw-r--r--arch/arm64/kernel/perf_event.c23
-rw-r--r--arch/arm64/kernel/smp.c3
-rw-r--r--arch/arm64/kernel/traps.c4
-rw-r--r--arch/arm64/kernel/vdso.c5
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S1
-rw-r--r--arch/arm64/kvm/Makefile5
-rw-r--r--arch/arm64/kvm/hyp-init.S11
-rw-r--r--arch/arm64/kvm/hyp/Makefile2
-rw-r--r--arch/arm64/kvm/sys_regs.c8
-rw-r--r--arch/arm64/kvm/vgic-sys-reg-v3.c10
-rw-r--r--arch/arm64/net/bpf_jit_comp.c12
-rw-r--r--arch/blackfin/include/asm/processor.h5
-rw-r--r--arch/blackfin/include/uapi/asm/Kbuild17
-rw-r--r--arch/c6x/include/asm/processor.h5
-rw-r--r--arch/c6x/include/uapi/asm/Kbuild8
-rw-r--r--arch/cris/arch-v10/kernel/process.c8
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig1
-rw-r--r--arch/cris/arch-v32/kernel/process.c8
l---------arch/cris/boot/dts/include/dt-bindings1
-rw-r--r--arch/cris/include/arch-v10/arch/Kbuild1
-rw-r--r--arch/cris/include/arch-v32/arch/Kbuild1
-rw-r--r--arch/cris/include/asm/processor.h2
-rw-r--r--arch/cris/include/uapi/arch-v10/arch/Kbuild5
-rw-r--r--arch/cris/include/uapi/arch-v32/arch/Kbuild3
-rw-r--r--arch/cris/include/uapi/asm/Kbuild42
-rw-r--r--arch/frv/include/asm/processor.h5
-rw-r--r--arch/frv/include/asm/timex.h6
-rw-r--r--arch/frv/include/uapi/asm/Kbuild33
-rw-r--r--arch/frv/kernel/asm-offsets.c19
-rw-r--r--arch/frv/kernel/process.c9
-rw-r--r--arch/frv/mm/elf-fdpic.c2
-rw-r--r--arch/h8300/include/asm/processor.h4
-rw-r--r--arch/h8300/include/uapi/asm/Kbuild28
-rw-r--r--arch/h8300/include/uapi/asm/bitsperlong.h (renamed from arch/h8300/include/asm/bitsperlong.h)6
-rw-r--r--arch/h8300/kernel/process.c5
-rw-r--r--arch/hexagon/include/asm/Kbuild3
-rw-r--r--arch/hexagon/include/asm/processor.h3
-rw-r--r--arch/hexagon/include/uapi/asm/Kbuild13
-rw-r--r--arch/hexagon/kernel/process.c8
-rw-r--r--arch/hexagon/mm/uaccess.c5
-rw-r--r--arch/ia64/include/asm/processor.h17
-rw-r--r--arch/ia64/include/uapi/asm/Kbuild45
-rw-r--r--arch/ia64/kernel/Makefile26
-rw-r--r--arch/ia64/kernel/Makefile.gate2
-rw-r--r--arch/m32r/include/asm/processor.h2
-rw-r--r--arch/m32r/include/uapi/asm/Kbuild31
-rw-r--r--arch/m32r/kernel/process.c8
-rw-r--r--arch/m68k/configs/amiga_defconfig8
-rw-r--r--arch/m68k/configs/apollo_defconfig8
-rw-r--r--arch/m68k/configs/atari_defconfig8
-rw-r--r--arch/m68k/configs/bvme6000_defconfig8
-rw-r--r--arch/m68k/configs/hp300_defconfig8
-rw-r--r--arch/m68k/configs/mac_defconfig8
-rw-r--r--arch/m68k/configs/multi_defconfig8
-rw-r--r--arch/m68k/configs/mvme147_defconfig8
-rw-r--r--arch/m68k/configs/mvme16x_defconfig8
-rw-r--r--arch/m68k/configs/q40_defconfig8
-rw-r--r--arch/m68k/configs/sun3_defconfig8
-rw-r--r--arch/m68k/configs/sun3x_defconfig8
-rw-r--r--arch/m68k/include/asm/processor.h2
-rw-r--r--arch/m68k/include/asm/signal.h5
-rw-r--r--arch/m68k/include/uapi/asm/Kbuild24
-rw-r--r--arch/m68k/kernel/process.c14
-rw-r--r--arch/m68k/kernel/signal.c16
l---------arch/metag/boot/dts/include/dt-bindings1
-rw-r--r--arch/metag/include/asm/uaccess.h58
-rw-r--r--arch/metag/include/uapi/asm/Kbuild8
-rw-r--r--arch/metag/kernel/smp.c3
-rw-r--r--arch/metag/lib/usercopy.c236
-rw-r--r--arch/metag/mm/mmu-meta1.c1
-rw-r--r--arch/microblaze/configs/mmu_defconfig16
-rw-r--r--arch/microblaze/configs/nommu_defconfig17
-rw-r--r--arch/microblaze/include/asm/Kbuild45
-rw-r--r--arch/microblaze/include/asm/bitops.h1
-rw-r--r--arch/microblaze/include/asm/bug.h1
-rw-r--r--arch/microblaze/include/asm/bugs.h1
-rw-r--r--arch/microblaze/include/asm/div64.h1
-rw-r--r--arch/microblaze/include/asm/emergency-restart.h1
-rw-r--r--arch/microblaze/include/asm/fb.h1
-rw-r--r--arch/microblaze/include/asm/hardirq.h1
-rw-r--r--arch/microblaze/include/asm/irq_regs.h1
-rw-r--r--arch/microblaze/include/asm/kdebug.h1
-rw-r--r--arch/microblaze/include/asm/kmap_types.h6
-rw-r--r--arch/microblaze/include/asm/linkage.h1
-rw-r--r--arch/microblaze/include/asm/local.h1
-rw-r--r--arch/microblaze/include/asm/local64.h1
-rw-r--r--arch/microblaze/include/asm/parport.h1
-rw-r--r--arch/microblaze/include/asm/percpu.h1
-rw-r--r--arch/microblaze/include/asm/processor.h6
-rw-r--r--arch/microblaze/include/asm/serial.h1
-rw-r--r--arch/microblaze/include/asm/shmparam.h1
-rw-r--r--arch/microblaze/include/asm/topology.h1
-rw-r--r--arch/microblaze/include/asm/ucontext.h1
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/include/asm/vga.h1
-rw-r--r--arch/microblaze/include/asm/xor.h1
-rw-r--r--arch/microblaze/include/uapi/asm/Kbuild32
-rw-r--r--arch/microblaze/include/uapi/asm/bitsperlong.h1
-rw-r--r--arch/microblaze/include/uapi/asm/errno.h1
-rw-r--r--arch/microblaze/include/uapi/asm/fcntl.h1
-rw-r--r--arch/microblaze/include/uapi/asm/ioctl.h1
-rw-r--r--arch/microblaze/include/uapi/asm/ioctls.h1
-rw-r--r--arch/microblaze/include/uapi/asm/ipcbuf.h1
-rw-r--r--arch/microblaze/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/microblaze/include/uapi/asm/mman.h1
-rw-r--r--arch/microblaze/include/uapi/asm/msgbuf.h1
-rw-r--r--arch/microblaze/include/uapi/asm/param.h1
-rw-r--r--arch/microblaze/include/uapi/asm/poll.h1
-rw-r--r--arch/microblaze/include/uapi/asm/resource.h1
-rw-r--r--arch/microblaze/include/uapi/asm/sembuf.h1
-rw-r--r--arch/microblaze/include/uapi/asm/shmbuf.h1
-rw-r--r--arch/microblaze/include/uapi/asm/siginfo.h1
-rw-r--r--arch/microblaze/include/uapi/asm/signal.h1
-rw-r--r--arch/microblaze/include/uapi/asm/socket.h1
-rw-r--r--arch/microblaze/include/uapi/asm/sockios.h1
-rw-r--r--arch/microblaze/include/uapi/asm/stat.h1
-rw-r--r--arch/microblaze/include/uapi/asm/statfs.h1
-rw-r--r--arch/microblaze/include/uapi/asm/swab.h1
-rw-r--r--arch/microblaze/include/uapi/asm/termbits.h1
-rw-r--r--arch/microblaze/include/uapi/asm/termios.h1
-rw-r--r--arch/microblaze/include/uapi/asm/unistd.h1
-rw-r--r--arch/microblaze/kernel/dma.c3
-rw-r--r--arch/microblaze/kernel/entry.S28
-rw-r--r--arch/microblaze/kernel/process.c17
-rw-r--r--arch/microblaze/kernel/syscall_table.S1
-rw-r--r--arch/microblaze/kernel/timer.c2
-rw-r--r--arch/microblaze/mm/highmem.c24
-rw-r--r--arch/mips/Kbuild2
-rw-r--r--arch/mips/Kconfig24
-rw-r--r--arch/mips/Kconfig.debug2
-rw-r--r--arch/mips/boot/Makefile10
l---------arch/mips/boot/dts/include/dt-bindings1
-rw-r--r--arch/mips/cavium-octeon/Kconfig9
-rw-r--r--arch/mips/cavium-octeon/Platform4
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c139
-rw-r--r--arch/mips/cavium-octeon/executive/octeon-model.c21
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c113
-rw-r--r--arch/mips/cavium-octeon/setup.c12
-rw-r--r--arch/mips/configs/generic_defconfig3
-rw-r--r--arch/mips/include/asm/cache.h5
-rw-r--r--arch/mips/include/asm/cpu-info.h3
-rw-r--r--arch/mips/include/asm/cpufeature.h26
-rw-r--r--arch/mips/include/asm/highmem.h5
-rw-r--r--arch/mips/include/asm/kprobes.h3
-rw-r--r--arch/mips/include/asm/mach-rm/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c-defs.h3193
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c.h59
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2d-defs.h526
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2t-defs.h286
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pciercx-defs.h3225
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sli-defs.h3541
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h3
-rw-r--r--arch/mips/include/asm/pgalloc.h26
-rw-r--r--arch/mips/include/asm/pgtable-32.h7
-rw-r--r--arch/mips/include/asm/pgtable-64.h88
-rw-r--r--arch/mips/include/asm/uasm.h88
-rw-r--r--arch/mips/include/uapi/asm/Kbuild37
-rw-r--r--arch/mips/kernel/branch.c4
-rw-r--r--arch/mips/kernel/cpu-probe.c7
-rw-r--r--arch/mips/kernel/entry.S3
-rw-r--r--arch/mips/kernel/ftrace.c24
-rw-r--r--arch/mips/kernel/head.S2
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c16
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c6
-rw-r--r--arch/mips/kernel/pm-cps.c9
-rw-r--r--arch/mips/kernel/process.c7
-rw-r--r--arch/mips/kernel/r4k_switch.S6
-rw-r--r--arch/mips/kernel/smp-cps.c7
-rw-r--r--arch/mips/kernel/smp-mt.c49
-rw-r--r--arch/mips/kernel/smp.c20
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/kvm/tlb.c6
-rw-r--r--arch/mips/lantiq/irq.c52
-rw-r--r--arch/mips/math-emu/cp1emu.c10
-rw-r--r--arch/mips/math-emu/dp_maddf.c5
-rw-r--r--arch/mips/math-emu/sp_maddf.c5
-rw-r--r--arch/mips/mm/dma-default.c23
-rw-r--r--arch/mips/mm/fault.c16
-rw-r--r--arch/mips/mm/init.c3
-rw-r--r--arch/mips/mm/mmap.c2
-rw-r--r--arch/mips/mm/pgtable-32.c6
-rw-r--r--arch/mips/mm/pgtable-64.c33
-rw-r--r--arch/mips/mm/tlbex.c22
-rw-r--r--arch/mips/mm/uasm-mips.c1
-rw-r--r--arch/mips/mm/uasm.c159
-rw-r--r--arch/mips/mti-malta/malta-int.c83
-rw-r--r--arch/mips/net/bpf_jit.c41
-rw-r--r--arch/mips/net/bpf_jit_asm.S23
-rw-r--r--arch/mips/pci/pcie-octeon.c4
-rw-r--r--arch/mips/sibyte/bcm1480/setup.c1
-rw-r--r--arch/mips/sibyte/sb1250/setup.c1
-rw-r--r--arch/mn10300/include/asm/processor.h5
-rw-r--r--arch/mn10300/include/uapi/asm/Kbuild32
-rw-r--r--arch/mn10300/kernel/process.c8
-rw-r--r--arch/nios2/Kconfig2
-rw-r--r--arch/nios2/Kconfig.debug1
-rw-r--r--arch/nios2/Makefile5
-rw-r--r--arch/nios2/boot/.gitignore2
-rw-r--r--arch/nios2/boot/dts/10m50_devboard.dts3
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/nios2/include/asm/cacheflush.h6
-rw-r--r--arch/nios2/include/asm/cmpxchg.h14
-rw-r--r--arch/nios2/include/asm/cpuinfo.h2
-rw-r--r--arch/nios2/include/asm/processor.h3
-rw-r--r--arch/nios2/include/asm/prom.h22
-rw-r--r--arch/nios2/include/asm/setup.h2
-rw-r--r--arch/nios2/include/asm/uaccess.h7
-rw-r--r--arch/nios2/include/uapi/asm/Kbuild4
-rw-r--r--arch/nios2/kernel/.gitignore1
-rw-r--r--arch/nios2/kernel/Makefile1
-rw-r--r--arch/nios2/kernel/cpuinfo.c18
-rw-r--r--arch/nios2/kernel/early_printk.c118
-rw-r--r--arch/nios2/kernel/irq.c2
-rw-r--r--arch/nios2/kernel/prom.c49
-rw-r--r--arch/nios2/kernel/setup.c6
-rw-r--r--arch/nios2/mm/uaccess.c33
-rw-r--r--arch/nios2/platform/Kconfig.platform26
-rw-r--r--arch/openrisc/include/asm/Kbuild3
-rw-r--r--arch/openrisc/include/asm/processor.h5
-rw-r--r--arch/openrisc/include/uapi/asm/Kbuild8
-rw-r--r--arch/openrisc/kernel/process.c7
-rw-r--r--arch/parisc/include/asm/processor.h5
-rw-r--r--arch/parisc/include/uapi/asm/Kbuild28
-rw-r--r--arch/parisc/kernel/process.c5
-rw-r--r--arch/parisc/kernel/sys_parisc.c15
-rw-r--r--arch/powerpc/Kconfig8
-rw-r--r--arch/powerpc/Makefile.postlink2
l---------arch/powerpc/boot/dts/include/dt-bindings1
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h6
-rw-r--r--arch/powerpc/include/asm/bug.h2
-rw-r--r--arch/powerpc/include/asm/cpm1.h2
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h6
-rw-r--r--arch/powerpc/include/asm/cputable.h5
-rw-r--r--arch/powerpc/include/asm/dt_cpu_ftrs.h26
-rw-r--r--arch/powerpc/include/asm/kprobes.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h28
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h74
-rw-r--r--arch/powerpc/include/asm/module.h4
-rw-r--r--arch/powerpc/include/asm/page.h12
-rw-r--r--arch/powerpc/include/asm/processor.h26
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/include/asm/topology.h14
-rw-r--r--arch/powerpc/include/asm/uaccess.h8
-rw-r--r--arch/powerpc/include/asm/xive.h21
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild45
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h9
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c10
-rw-r--r--arch/powerpc/kernel/cputable.c40
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c1069
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S12
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S15
-rw-r--r--arch/powerpc/kernel/idle_book3s.S2
-rw-r--r--arch/powerpc/kernel/kprobes.c20
-rw-r--r--arch/powerpc/kernel/process.c22
-rw-r--r--arch/powerpc/kernel/prom.c31
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c45
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S59
-rw-r--r--arch/powerpc/kvm/Kconfig7
-rw-r--r--arch/powerpc/kvm/Makefile8
-rw-r--r--arch/powerpc/kvm/book3s.c75
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c13
-rw-r--r--arch/powerpc/kvm/book3s_hv.c102
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c112
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S12
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c10
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xive.c47
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S137
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c70
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c21
-rw-r--r--arch/powerpc/kvm/book3s_xics.c35
-rw-r--r--arch/powerpc/kvm/book3s_xics.h7
-rw-r--r--arch/powerpc/kvm/book3s_xive.c1894
-rw-r--r--arch/powerpc/kvm/book3s_xive.h256
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c503
-rw-r--r--arch/powerpc/kvm/irq.h1
-rw-r--r--arch/powerpc/kvm/powerpc.c21
-rw-r--r--arch/powerpc/mm/dump_linuxpagetables.c7
-rw-r--r--arch/powerpc/mm/hugetlbpage-radix.c2
-rw-r--r--arch/powerpc/mm/mmap.c4
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c2
-rw-r--r--arch/powerpc/mm/slice.c2
-rw-r--r--arch/powerpc/perf/perf_regs.c3
-rw-r--r--arch/powerpc/perf/power9-pmu.c4
-rw-r--r--arch/powerpc/platforms/Kconfig11
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c3
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c102
-rw-r--r--arch/powerpc/platforms/powernv/opal.c1
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c8
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c2
-rw-r--r--arch/powerpc/sysdev/cpm1.c25
-rw-r--r--arch/powerpc/sysdev/simple_gpio.c3
-rw-r--r--arch/powerpc/sysdev/xive/common.c144
-rw-r--r--arch/powerpc/sysdev/xive/native.c86
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/configs/default_defconfig39
-rw-r--r--arch/s390/configs/gcov_defconfig28
-rw-r--r--arch/s390/configs/performance_defconfig27
-rw-r--r--arch/s390/configs/zfcpdump_defconfig6
-rw-r--r--arch/s390/defconfig8
-rw-r--r--arch/s390/include/asm/debug.h3
-rw-r--r--arch/s390/include/asm/dis.h2
-rw-r--r--arch/s390/include/asm/eadm.h6
-rw-r--r--arch/s390/include/asm/kprobes.h20
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/processor.h5
-rw-r--r--arch/s390/include/asm/sysinfo.h6
-rw-r--r--arch/s390/include/uapi/asm/Kbuild46
-rw-r--r--arch/s390/kernel/debug.c8
-rw-r--r--arch/s390/kernel/entry.S40
-rw-r--r--arch/s390/kernel/ftrace.c4
-rw-r--r--arch/s390/kernel/ipl.c7
-rw-r--r--arch/s390/kernel/process.c25
-rw-r--r--arch/s390/kernel/sysinfo.c2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S8
-rw-r--r--arch/s390/kvm/gaccess.c15
-rw-r--r--arch/s390/kvm/interrupt.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/lib/probes.c1
-rw-r--r--arch/s390/lib/uaccess.c4
-rw-r--r--arch/s390/mm/mmap.c4
-rw-r--r--arch/score/include/asm/Kbuild3
-rw-r--r--arch/score/include/asm/processor.h1
-rw-r--r--arch/score/include/uapi/asm/Kbuild32
-rw-r--r--arch/score/kernel/process.c5
-rw-r--r--arch/sh/Makefile7
-rw-r--r--arch/sh/include/uapi/asm/Kbuild23
-rw-r--r--arch/sh/mm/mmap.c4
-rw-r--r--arch/sparc/Kconfig15
-rw-r--r--arch/sparc/include/asm/hugetlb.h6
-rw-r--r--arch/sparc/include/asm/mmu_64.h2
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h32
-rw-r--r--arch/sparc/include/asm/pgtable_32.h4
-rw-r--r--arch/sparc/include/asm/pil.h1
-rw-r--r--arch/sparc/include/asm/processor_32.h3
-rw-r--r--arch/sparc/include/asm/processor_64.h2
-rw-r--r--arch/sparc/include/asm/setup.h2
-rw-r--r--arch/sparc/include/asm/vio.h1
-rw-r--r--arch/sparc/include/uapi/asm/Kbuild48
-rw-r--r--arch/sparc/kernel/ds.c2
-rw-r--r--arch/sparc/kernel/ftrace.c13
-rw-r--r--arch/sparc/kernel/irq_64.c17
-rw-r--r--arch/sparc/kernel/kernel.h1
-rw-r--r--arch/sparc/kernel/process_32.c8
-rw-r--r--arch/sparc/kernel/process_64.c19
-rw-r--r--arch/sparc/kernel/smp_64.c31
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c4
-rw-r--r--arch/sparc/kernel/tsb.S11
-rw-r--r--arch/sparc/kernel/ttable_64.S2
-rw-r--r--arch/sparc/kernel/vio.c68
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/multi3.S35
-rw-r--r--arch/sparc/mm/hugetlbpage.c2
-rw-r--r--arch/sparc/mm/init_32.c2
-rw-r--r--arch/sparc/mm/init_64.c89
-rw-r--r--arch/sparc/mm/tsb.c7
-rw-r--r--arch/sparc/mm/ultra.S5
-rw-r--r--arch/tile/include/arch/Kbuild1
-rw-r--r--arch/tile/include/asm/Kbuild3
-rw-r--r--arch/tile/include/asm/processor.h7
-rw-r--r--arch/tile/include/uapi/arch/Kbuild17
-rw-r--r--arch/tile/include/uapi/asm/Kbuild17
-rw-r--r--arch/tile/lib/atomic_asm_32.S3
-rw-r--r--arch/tile/mm/hugetlbpage.c2
-rw-r--r--arch/um/Kconfig.common5
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--arch/um/include/asm/processor-generic.h2
-rw-r--r--arch/um/kernel/initrd.c4
-rw-r--r--arch/um/kernel/sysrq.c6
-rw-r--r--arch/um/kernel/um_arch.c12
-rw-r--r--arch/um/os-Linux/skas/process.c4
-rw-r--r--arch/unicore32/Makefile4
-rw-r--r--arch/unicore32/include/uapi/asm/Kbuild6
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/boot/compressed/cmdline.c2
-rw-r--r--arch/x86/boot/compressed/eboot.c73
-rw-r--r--arch/x86/boot/compressed/error.h4
-rw-r--r--arch/x86/boot/compressed/head_64.S86
-rw-r--r--arch/x86/boot/compressed/kaslr.c194
-rw-r--r--arch/x86/boot/compressed/misc.c6
-rw-r--r--arch/x86/boot/compressed/misc.h2
-rw-r--r--arch/x86/boot/compressed/pagetable.c20
-rw-r--r--arch/x86/boot/copy.S20
-rw-r--r--arch/x86/boot/string.c8
-rw-r--r--arch/x86/boot/string.h1
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/sha1-mb/Makefile2
-rw-r--r--arch/x86/crypto/sha256-mb/Makefile2
-rw-r--r--arch/x86/entry/entry_32.S30
-rw-r--r--arch/x86/entry/entry_64.S14
-rw-r--r--arch/x86/events/core.c27
-rw-r--r--arch/x86/events/intel/core.c67
-rw-r--r--arch/x86/events/intel/lbr.c4
-rw-r--r--arch/x86/events/intel/rapl.c2
-rw-r--r--arch/x86/events/intel/uncore.c2
-rw-r--r--arch/x86/events/perf_event.h3
-rw-r--r--arch/x86/include/asm/asm.h1
-rw-r--r--arch/x86/include/asm/atomic.h13
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/include/asm/extable.h1
-rw-r--r--arch/x86/include/asm/hardirq.h2
-rw-r--r--arch/x86/include/asm/init.h3
-rw-r--r--arch/x86/include/asm/kvm_emulate.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/mmu.h6
-rw-r--r--arch/x86/include/asm/mmu_context.h63
-rw-r--r--arch/x86/include/asm/mshyperv.h3
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/include/asm/paravirt.h10
-rw-r--r--arch/x86/include/asm/paravirt_types.h5
-rw-r--r--arch/x86/include/asm/pgtable-3level.h47
-rw-r--r--arch/x86/include/asm/pgtable.h55
-rw-r--r--arch/x86/include/asm/pgtable_64.h22
-rw-r--r--arch/x86/include/asm/pmem.h2
-rw-r--r--arch/x86/include/asm/processor-flags.h36
-rw-r--r--arch/x86/include/asm/processor.h10
-rw-r--r--arch/x86/include/asm/special_insns.h10
-rw-r--r--arch/x86/include/asm/timer.h8
-rw-r--r--arch/x86/include/asm/tlbbatch.h14
-rw-r--r--arch/x86/include/asm/tlbflush.h114
-rw-r--r--arch/x86/include/asm/uaccess.h11
-rw-r--r--arch/x86/include/asm/uv/uv.h11
-rw-r--r--arch/x86/include/uapi/asm/Kbuild59
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h15
-rw-r--r--arch/x86/include/uapi/asm/processor-flags.h2
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/Makefile2
-rw-r--r--arch/x86/kernel/alternative.c9
-rw-r--r--arch/x86/kernel/apic/htirq.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c22
-rw-r--r--arch/x86/kernel/apic/msi.c2
-rw-r--r--arch/x86/kernel/apic/vector.c4
-rw-r--r--arch/x86/kernel/cpu/amd.c5
-rw-r--r--arch/x86/kernel/cpu/cyrix.c1
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c13
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c22
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c13
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c26
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c18
-rw-r--r--arch/x86/kernel/espfix_64.c2
-rw-r--r--arch/x86/kernel/fpu/init.c1
-rw-r--r--arch/x86/kernel/ftrace.c20
-rw-r--r--arch/x86/kernel/head64.c145
-rw-r--r--arch/x86/kernel/head_64.S131
-rw-r--r--arch/x86/kernel/i8259.c1
-rw-r--r--arch/x86/kernel/kprobes/core.c9
-rw-r--r--arch/x86/kernel/kprobes/opt.c9
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/ldt.c56
-rw-r--r--arch/x86/kernel/machine_kexec_64.c8
-rw-r--r--arch/x86/kernel/nmi_selftest.c2
-rw-r--r--arch/x86/kernel/paravirt.c2
-rw-r--r--arch/x86/kernel/process.c11
-rw-r--r--arch/x86/kernel/process_32.c4
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/setup.c21
-rw-r--r--arch/x86/kernel/setup_percpu.c10
-rw-r--r--arch/x86/kernel/smpboot.c3
-rw-r--r--arch/x86/kernel/step.c2
-rw-r--r--arch/x86/kernel/sys_x86_64.c4
-rw-r--r--arch/x86/kernel/tboot.c2
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/kernel/tsc.c206
-rw-r--r--arch/x86/kernel/unwind_frame.c49
-rw-r--r--arch/x86/kvm/cpuid.c20
-rw-r--r--arch/x86/kvm/emulate.c3
-rw-r--r--arch/x86/kvm/lapic.c5
-rw-r--r--arch/x86/kvm/mmu.c22
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/paging_tmpl.h39
-rw-r--r--arch/x86/kvm/pmu_intel.c2
-rw-r--r--arch/x86/kvm/svm.c31
-rw-r--r--arch/x86/kvm/vmx.c280
-rw-r--r--arch/x86/kvm/x86.c117
-rw-r--r--arch/x86/lib/copy_user_64.S7
-rw-r--r--arch/x86/lib/csum-copy_64.S12
-rw-r--r--arch/x86/lib/kaslr.c3
-rw-r--r--arch/x86/lib/msr-reg.S8
-rw-r--r--arch/x86/lib/x86-opcode-map.txt2
-rw-r--r--arch/x86/math-emu/fpu_system.h2
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/dump_pagetables.c2
-rw-r--r--arch/x86/mm/extable.c3
-rw-r--r--arch/x86/mm/fault.c10
-rw-r--r--arch/x86/mm/gup.c496
-rw-r--r--arch/x86/mm/hugetlbpage.c2
-rw-r--r--arch/x86/mm/ident_map.c14
-rw-r--r--arch/x86/mm/init.c10
-rw-r--r--arch/x86/mm/init_64.c128
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/kasan_init_64.c12
-rw-r--r--arch/x86/mm/kaslr.c81
-rw-r--r--arch/x86/mm/mmap.c3
-rw-r--r--arch/x86/mm/numa_32.c1
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/mm/testmmiotrace.c2
-rw-r--r--arch/x86/mm/tlb.c458
-rw-r--r--arch/x86/net/Makefile2
-rw-r--r--arch/x86/platform/efi/Makefile1
-rw-r--r--arch/x86/platform/efi/efi.c9
-rw-r--r--arch/x86/platform/efi/efi_32.c9
-rw-r--r--arch/x86/platform/efi/efi_64.c88
-rw-r--r--arch/x86/platform/efi/quirks.c140
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-pm.c2
-rw-r--r--arch/x86/platform/uv/tlb_uv.c24
-rw-r--r--arch/x86/power/Makefile2
-rw-r--r--arch/x86/power/cpu.c2
-rw-r--r--arch/x86/power/hibernate_64.c5
-rw-r--r--arch/x86/realmode/init.c2
-rw-r--r--arch/x86/um/ptrace_64.c2
-rw-r--r--arch/x86/um/shared/sysdep/kernel-offsets.h9
-rw-r--r--arch/x86/xen/Makefile3
-rw-r--r--arch/x86/xen/efi.c45
-rw-r--r--arch/x86/xen/enlighten_pv.c57
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/mmu_pv.c192
-rw-r--r--arch/x86/xen/time.c2
-rw-r--r--arch/x86/xen/xen-pvh.S2
-rw-r--r--arch/xtensa/include/asm/irq.h3
-rw-r--r--arch/xtensa/include/asm/processor.h2
-rw-r--r--arch/xtensa/include/uapi/asm/Kbuild23
-rw-r--r--arch/xtensa/kernel/irq.c5
-rw-r--r--arch/xtensa/kernel/setup.c3
-rw-r--r--arch/xtensa/kernel/syscall.c2
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S6
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c3
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h6
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c10
655 files changed, 10147 insertions, 19840 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index dcbd462b68b1..f76b214cf7ad 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -324,6 +324,9 @@ config HAVE_CMPXCHG_LOCAL
config HAVE_CMPXCHG_DOUBLE
bool
+config ARCH_WEAK_RELEASE_ACQUIRE
+ bool
+
config ARCH_WANT_IPC_PARSE_VERSION
bool
@@ -864,4 +867,13 @@ config STRICT_MODULE_RWX
config ARCH_WANT_RELAX_ORDER
bool
+config REFCOUNT_FULL
+ bool "Perform full reference count validation at the expense of speed"
+ help
+ Enabling this switches the refcounting infrastructure from a fast
+ unchecked atomic_t implementation to a fully state checked
+ implementation, which can be (slightly) slower but provides protections
+ against various use-after-free conditions that can be used in
+ security flaw exploits.
+
source "kernel/gcov/Kconfig"
diff --git a/arch/alpha/include/uapi/asm/Kbuild b/arch/alpha/include/uapi/asm/Kbuild
index d96f2ef5b639..b15bf6bc0e94 100644
--- a/arch/alpha/include/uapi/asm/Kbuild
+++ b/arch/alpha/include/uapi/asm/Kbuild
@@ -1,43 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += a.out.h
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += compiler.h
-header-y += console.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += fpu.h
-header-y += gentrap.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += pal.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += reg.h
-header-y += regdef.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += sysinfo.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 9ec56dc97374..ce93124a850b 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1201,8 +1201,10 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
return -EFAULT;
- err = 0;
- err |= put_user(status, ustatus);
+ err = put_user(status, ustatus);
+ if (ret < 0)
+ return err ? err : ret;
+
err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 59660743237c..7083434dd241 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -46,11 +46,6 @@ AFLAGS___remqu.o = -DREM
AFLAGS___divlu.o = -DDIV -DINTSIZE
AFLAGS___remlu.o = -DREM -DINTSIZE
-$(obj)/__divqu.o: $(obj)/$(ev6-y)divide.S
- $(cmd_as_o_S)
-$(obj)/__remqu.o: $(obj)/$(ev6-y)divide.S
- $(cmd_as_o_S)
-$(obj)/__divlu.o: $(obj)/$(ev6-y)divide.S
- $(cmd_as_o_S)
-$(obj)/__remlu.o: $(obj)/$(ev6-y)divide.S
- $(cmd_as_o_S)
+$(addprefix $(obj)/,__divqu.o __remqu.o __divlu.o __remlu.o): \
+ $(src)/$(ev6-y)divide.S FORCE
+ $(call if_changed_rule,as_o_S)
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 19cce226d1a8..44ef35d33956 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -123,9 +123,9 @@ libs-y += arch/arc/lib/ $(LIBGCC)
boot := arch/arc/boot
#default target for make without any arguments.
-KBUILD_IMAGE := bootpImage
+KBUILD_IMAGE := $(boot)/bootpImage
-all: $(KBUILD_IMAGE)
+all: bootpImage
bootpImage: vmlinux
boot_targets += uImage uImage.bin uImage.gz
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 6e1242da0159..4104a0839214 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -86,8 +86,6 @@ struct task_struct;
#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0)
-#define thread_saved_pc(tsk) TSK_K_BLINK(tsk)
-
extern void start_thread(struct pt_regs * regs, unsigned long pc,
unsigned long usp);
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
index f50d02df78d5..b15bf6bc0e94 100644
--- a/arch/arc/include/uapi/asm/Kbuild
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -1,5 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += elf.h
-header-y += page.h
-header-y += cachectl.h
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 3e25e8d6486b..2e13683dfb24 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -65,7 +65,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4c1a35f15838..6491be556ddc 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1416,6 +1416,7 @@ choice
config VMSPLIT_3G
bool "3G/1G user/kernel split"
config VMSPLIT_3G_OPT
+ depends on !ARM_LPAE
bool "3G/1G user/kernel split (for full 1G low memory)"
config VMSPLIT_2G
bool "2G/2G user/kernel split"
@@ -1637,7 +1638,7 @@ config ARCH_SELECT_MEMORY_MODEL
config HAVE_ARCH_PFN_VALID
def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
-config HAVE_GENERIC_RCU_GUP
+config HAVE_GENERIC_GUP
def_bool y
depends on ARM_LPAE
@@ -2061,6 +2062,23 @@ config EFI
is only useful for kernels that may run on systems that have
UEFI firmware.
+config DMI
+ bool "Enable support for SMBIOS (DMI) tables"
+ depends on EFI
+ default y
+ help
+ This enables SMBIOS/DMI feature for systems.
+
+ This option is only useful on systems that have UEFI firmware.
+ However, even with this option, the resultant kernel should
+ continue to boot on existing non-UEFI platforms.
+
+ NOTE: This does *NOT* enable or encourage the use of DMI quirks,
+ i.e., the the practice of identifying the platform via DMI to
+ decide whether certain workarounds for buggy hardware and/or
+ firmware need to be enabled. This would require the DMI subsystem
+ to be enabled much earlier than we do on ARM, which is non-trivial.
+
endmenu
menu "CPU Power Management"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index ab30cc634d02..65f4e2a4eb94 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -297,10 +297,11 @@ drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/
libs-y := arch/arm/lib/ $(libs-y)
# Default target when executing plain make
+boot := arch/arm/boot
ifeq ($(CONFIG_XIP_KERNEL),y)
-KBUILD_IMAGE := xipImage
+KBUILD_IMAGE := $(boot)/xipImage
else
-KBUILD_IMAGE := zImage
+KBUILD_IMAGE := $(boot)/zImage
endif
# Build the DT binary blobs if we have OF configured
@@ -308,9 +309,8 @@ ifeq ($(CONFIG_USE_OF),y)
KBUILD_DTBS := dtbs
endif
-all: $(KBUILD_IMAGE) $(KBUILD_DTBS)
+all: $(notdir $(KBUILD_IMAGE)) $(KBUILD_DTBS)
-boot := arch/arm/boot
archheaders:
$(Q)$(MAKE) $(build)=arch/arm/tools uapi
diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S
index 9d5dc4fda3c1..a17ca8d78656 100644
--- a/arch/arm/boot/compressed/efi-header.S
+++ b/arch/arm/boot/compressed/efi-header.S
@@ -17,14 +17,13 @@
@ there.
.inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
#else
- mov r0, r0
+ AR_CLASS( mov r0, r0 )
+ M_CLASS( nop.w )
#endif
.endm
.macro __EFI_HEADER
#ifdef CONFIG_EFI_STUB
- b __efi_start
-
.set start_offset, __efi_start - start
.org start + 0x3c
@
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 7c711ba61417..8a756870c238 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -130,19 +130,22 @@ start:
.rept 7
__nop
.endr
- ARM( mov r0, r0 )
- ARM( b 1f )
- THUMB( badr r12, 1f )
- THUMB( bx r12 )
+#ifndef CONFIG_THUMB2_KERNEL
+ mov r0, r0
+#else
+ AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
+ M_CLASS( nop.w ) @ M: already in Thumb2 mode
+ .thumb
+#endif
+ W(b) 1f
.word _magic_sig @ Magic numbers to help the loader
.word _magic_start @ absolute load/run zImage address
.word _magic_end @ zImage end address
.word 0x04030201 @ endianness flag
- THUMB( .thumb )
-1: __EFI_HEADER
-
+ __EFI_HEADER
+1:
ARM_BE8( setend be ) @ go BE8 if compiled for BE8
AR_CLASS( mrs r9, cpsr )
#ifdef CONFIG_ARM_VIRT_EXT
diff --git a/arch/arm/boot/dts/am335x-sl50.dts b/arch/arm/boot/dts/am335x-sl50.dts
index c5d2589c55fc..fc864a855991 100644
--- a/arch/arm/boot/dts/am335x-sl50.dts
+++ b/arch/arm/boot/dts/am335x-sl50.dts
@@ -220,7 +220,7 @@
mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */
+ AM33XX_IOPAD(0x96c, PIN_INPUT | MUX_MODE7) /* uart0_rtsn.gpio1_9 */
>;
};
@@ -280,10 +280,6 @@
AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */
AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* nDispReset - gpmc_ad14.gpio1_14 */
AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */
- /* AVR Programming - SPI Bus (bit bang) - Screen and Keyboard */
- AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMOSI spi0_d0.gpio0_3 */
- AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMISO spi0_d1.gpio0_4 */
- AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattSCLK spi0_clk.gpio0_2 */
/* PDI Bus - Battery system */
AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */
AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */
@@ -384,7 +380,7 @@
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
bus-width = <4>;
- cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+ cd-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vmmcsd_fixed>;
};
diff --git a/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi b/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi
index 12c981e51134..9a0599f711ff 100644
--- a/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi
+++ b/arch/arm/boot/dts/bcm283x-rpi-smsc9512.dtsi
@@ -1,6 +1,6 @@
/ {
aliases {
- ethernet = &ethernet;
+ ethernet0 = &ethernet;
};
};
diff --git a/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi b/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
index 3f0a56ebcf1f..dc7ae776db5f 100644
--- a/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
+++ b/arch/arm/boot/dts/bcm283x-rpi-smsc9514.dtsi
@@ -1,6 +1,6 @@
/ {
aliases {
- ethernet = &ethernet;
+ ethernet0 = &ethernet;
};
};
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 35cea3fcaf5c..9444a9a9ba10 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -3,6 +3,11 @@
#include <dt-bindings/clock/bcm2835-aux.h>
#include <dt-bindings/gpio/gpio.h>
+/* firmware-provided startup stubs live here, where the secondary CPUs are
+ * spinning.
+ */
+/memreserve/ 0x00000000 0x00001000;
+
/* This include file covers the common peripherals and configuration between
* bcm2835 and bcm2836 implementations, leaving the CPU configuration to
* bcm2835.dtsi and bcm2836.dtsi.
@@ -198,8 +203,8 @@
brcm,pins = <0 1>;
brcm,function = <BCM2835_FSEL_ALT0>;
};
- i2c0_gpio32: i2c0_gpio32 {
- brcm,pins = <32 34>;
+ i2c0_gpio28: i2c0_gpio28 {
+ brcm,pins = <28 29>;
brcm,function = <BCM2835_FSEL_ALT0>;
};
i2c0_gpio44: i2c0_gpio44 {
@@ -295,20 +300,28 @@
/* Separate from the uart0_gpio14 group
* because it conflicts with spi1_gpio16, and
* people often run uart0 on the two pins
- * without flow contrl.
+ * without flow control.
*/
uart0_ctsrts_gpio16: uart0_ctsrts_gpio16 {
brcm,pins = <16 17>;
brcm,function = <BCM2835_FSEL_ALT3>;
};
- uart0_gpio30: uart0_gpio30 {
+ uart0_ctsrts_gpio30: uart0_ctsrts_gpio30 {
brcm,pins = <30 31>;
brcm,function = <BCM2835_FSEL_ALT3>;
};
- uart0_ctsrts_gpio32: uart0_ctsrts_gpio32 {
+ uart0_gpio32: uart0_gpio32 {
brcm,pins = <32 33>;
brcm,function = <BCM2835_FSEL_ALT3>;
};
+ uart0_gpio36: uart0_gpio36 {
+ brcm,pins = <36 37>;
+ brcm,function = <BCM2835_FSEL_ALT2>;
+ };
+ uart0_ctsrts_gpio38: uart0_ctsrts_gpio38 {
+ brcm,pins = <38 39>;
+ brcm,function = <BCM2835_FSEL_ALT2>;
+ };
uart1_gpio14: uart1_gpio14 {
brcm,pins = <14 15>;
@@ -326,10 +339,6 @@
brcm,pins = <30 31>;
brcm,function = <BCM2835_FSEL_ALT5>;
};
- uart1_gpio36: uart1_gpio36 {
- brcm,pins = <36 37 38 39>;
- brcm,function = <BCM2835_FSEL_ALT2>;
- };
uart1_gpio40: uart1_gpio40 {
brcm,pins = <40 41>;
brcm,function = <BCM2835_FSEL_ALT5>;
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 4bc4b575c99b..31a9e061ddd0 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -204,6 +204,8 @@
tps659038: tps659038@58 {
compatible = "ti,tps659038";
reg = <0x58>;
+ ti,palmas-override-powerhold;
+ ti,system-power-controller;
tps659038_pmic {
compatible = "ti,tps659038-pmic";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 57892f264cea..e7144662af45 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -2017,4 +2017,8 @@
coefficients = <0 2000>;
};
+&cpu_crit {
+ temperature = <120000>; /* milli Celsius */
+};
+
/include/ "dra7xx-clocks.dtsi"
diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts
index de2215832372..4e103a905dc9 100644
--- a/arch/arm/boot/dts/imx53-qsrb.dts
+++ b/arch/arm/boot/dts/imx53-qsrb.dts
@@ -23,7 +23,7 @@
imx53-qsrb {
pinctrl_pmic: pmicgrp {
fsl,pins = <
- MX53_PAD_CSI0_DAT5__GPIO5_23 0x1e4 /* IRQ */
+ MX53_PAD_CSI0_DAT5__GPIO5_23 0x1c4 /* IRQ */
>;
};
};
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 5bb8fd57e7f5..d71da30c9cff 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -12,23 +12,6 @@
model = "Freescale i.MX6 SoloX SDB RevB Board";
};
-&cpu0 {
- operating-points = <
- /* kHz uV */
- 996000 1250000
- 792000 1175000
- 396000 1175000
- 198000 1175000
- >;
- fsl,soc-operating-points = <
- /* ARM kHz SOC uV */
- 996000 1250000
- 792000 1175000
- 396000 1175000
- 198000 1175000
- >;
-};
-
&i2c1 {
clock-frequency = <100000>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dts b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
index f18e1f1d0ce2..d2be8aa3370b 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dts
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
@@ -120,10 +120,16 @@
ethphy0: ethernet-phy@2 {
reg = <2>;
+ micrel,led-mode = <1>;
+ clocks = <&clks IMX6UL_CLK_ENET_REF>;
+ clock-names = "rmii-ref";
};
ethphy1: ethernet-phy@1 {
reg = <1>;
+ micrel,led-mode = <1>;
+ clocks = <&clks IMX6UL_CLK_ENET2_REF>;
+ clock-names = "rmii-ref";
};
};
};
diff --git a/arch/arm/boot/dts/include/arm b/arch/arm/boot/dts/include/arm
deleted file mode 120000
index a96aa0ea9d8c..000000000000
--- a/arch/arm/boot/dts/include/arm
+++ /dev/null
@@ -1 +0,0 @@
-.. \ No newline at end of file
diff --git a/arch/arm/boot/dts/include/arm64 b/arch/arm/boot/dts/include/arm64
deleted file mode 120000
index 074a835fca3e..000000000000
--- a/arch/arm/boot/dts/include/arm64
+++ /dev/null
@@ -1 +0,0 @@
-../../../../arm64/boot/dts \ No newline at end of file
diff --git a/arch/arm/boot/dts/include/dt-bindings b/arch/arm/boot/dts/include/dt-bindings
deleted file mode 120000
index 08c00e4972fa..000000000000
--- a/arch/arm/boot/dts/include/dt-bindings
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
index b6f26824e83a..66f615a74118 100644
--- a/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l-netcp.dtsi
@@ -137,8 +137,8 @@ netcp: netcp@26000000 {
/* NetCP address range */
ranges = <0 0x26000000 0x1000000>;
- clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>;
- clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk";
+ clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>;
+ clock-names = "pa_clk", "ethss_clk", "cpts";
dma-coherent;
ti,navigator-dmas = <&dma_gbe 0>,
diff --git a/arch/arm/boot/dts/keystone-k2l.dtsi b/arch/arm/boot/dts/keystone-k2l.dtsi
index b58e7ebc0919..148650406cf7 100644
--- a/arch/arm/boot/dts/keystone-k2l.dtsi
+++ b/arch/arm/boot/dts/keystone-k2l.dtsi
@@ -232,6 +232,14 @@
};
};
+ osr: sram@70000000 {
+ compatible = "mmio-sram";
+ reg = <0x70000000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clocks = <&clkosr>;
+ };
+
dspgpio0: keystone_dsp_gpio@02620240 {
compatible = "ti,keystone-dsp-gpio";
gpio-controller;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
index 08cce17a25a0..43e9364083de 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
@@ -249,9 +249,9 @@
OMAP3_CORE1_IOPAD(0x2110, PIN_INPUT | MUX_MODE0) /* cam_xclka.cam_xclka */
OMAP3_CORE1_IOPAD(0x2112, PIN_INPUT | MUX_MODE0) /* cam_pclk.cam_pclk */
- OMAP3_CORE1_IOPAD(0x2114, PIN_INPUT | MUX_MODE0) /* cam_d0.cam_d0 */
- OMAP3_CORE1_IOPAD(0x2116, PIN_INPUT | MUX_MODE0) /* cam_d1.cam_d1 */
- OMAP3_CORE1_IOPAD(0x2118, PIN_INPUT | MUX_MODE0) /* cam_d2.cam_d2 */
+ OMAP3_CORE1_IOPAD(0x2116, PIN_INPUT | MUX_MODE0) /* cam_d0.cam_d0 */
+ OMAP3_CORE1_IOPAD(0x2118, PIN_INPUT | MUX_MODE0) /* cam_d1.cam_d1 */
+ OMAP3_CORE1_IOPAD(0x211a, PIN_INPUT | MUX_MODE0) /* cam_d2.cam_d2 */
OMAP3_CORE1_IOPAD(0x211c, PIN_INPUT | MUX_MODE0) /* cam_d3.cam_d3 */
OMAP3_CORE1_IOPAD(0x211e, PIN_INPUT | MUX_MODE0) /* cam_d4.cam_d4 */
OMAP3_CORE1_IOPAD(0x2120, PIN_INPUT | MUX_MODE0) /* cam_d5.cam_d5 */
diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi
index 402579ab70d2..3a9e9b6aea68 100644
--- a/arch/arm/boot/dts/mt7623.dtsi
+++ b/arch/arm/boot/dts/mt7623.dtsi
@@ -72,6 +72,8 @@
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ clock-frequency = <13000000>;
+ arm,cpu-registers-not-fw-configured;
};
watchdog: watchdog@10007000 {
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi
index b3a8b1f24499..9ec737069369 100644
--- a/arch/arm/boot/dts/omap3-gta04.dtsi
+++ b/arch/arm/boot/dts/omap3-gta04.dtsi
@@ -55,7 +55,8 @@
simple-audio-card,bitclock-master = <&telephony_link_master>;
simple-audio-card,frame-master = <&telephony_link_master>;
simple-audio-card,format = "i2s";
-
+ simple-audio-card,bitclock-inversion;
+ simple-audio-card,frame-inversion;
simple-audio-card,cpu {
sound-dai = <&mcbsp4>;
};
diff --git a/arch/arm/boot/dts/omap4-panda-a4.dts b/arch/arm/boot/dts/omap4-panda-a4.dts
index 78d363177762..f1a6476af371 100644
--- a/arch/arm/boot/dts/omap4-panda-a4.dts
+++ b/arch/arm/boot/dts/omap4-panda-a4.dts
@@ -13,7 +13,7 @@
/* Pandaboard Rev A4+ have external pullups on SCL & SDA */
&dss_hdmi_pins {
pinctrl-single,pins = <
- OMAP4_IOPAD(0x09a, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_cec.hdmi_cec */
+ OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0) /* hdmi_cec.hdmi_cec */
OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0) /* hdmi_scl.hdmi_scl */
OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0) /* hdmi_sda.hdmi_sda */
>;
diff --git a/arch/arm/boot/dts/omap4-panda-es.dts b/arch/arm/boot/dts/omap4-panda-es.dts
index 119f8e657edc..940fe4f7c5f6 100644
--- a/arch/arm/boot/dts/omap4-panda-es.dts
+++ b/arch/arm/boot/dts/omap4-panda-es.dts
@@ -34,7 +34,7 @@
/* PandaboardES has external pullups on SCL & SDA */
&dss_hdmi_pins {
pinctrl-single,pins = <
- OMAP4_IOPAD(0x09a, PIN_INPUT_PULLUP | MUX_MODE0) /* hdmi_cec.hdmi_cec */
+ OMAP4_IOPAD(0x09a, PIN_INPUT | MUX_MODE0) /* hdmi_cec.hdmi_cec */
OMAP4_IOPAD(0x09c, PIN_INPUT | MUX_MODE0) /* hdmi_scl.hdmi_scl */
OMAP4_IOPAD(0x09e, PIN_INPUT | MUX_MODE0) /* hdmi_sda.hdmi_sda */
>;
diff --git a/arch/arm/boot/dts/rk1108.dtsi b/arch/arm/boot/dts/rk1108.dtsi
index 6c8fc19d0ecd..1297924db6ad 100644
--- a/arch/arm/boot/dts/rk1108.dtsi
+++ b/arch/arm/boot/dts/rk1108.dtsi
@@ -41,7 +41,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/clock/rk1108-cru.h>
+#include <dt-bindings/clock/rv1108-cru.h>
#include <dt-bindings/pinctrl/rockchip.h>
/ {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
index 1aeeacb3a884..d4f600dbb7eb 100644
--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
@@ -558,10 +558,11 @@
};
r_ccu: clock@1f01400 {
- compatible = "allwinner,sun50i-a64-r-ccu";
+ compatible = "allwinner,sun8i-h3-r-ccu";
reg = <0x01f01400 0x100>;
- clocks = <&osc24M>, <&osc32k>, <&iosc>;
- clock-names = "hosc", "losc", "iosc";
+ clocks = <&osc24M>, <&osc32k>, <&iosc>,
+ <&ccu 9>;
+ clock-names = "hosc", "losc", "iosc", "pll-periph";
#clock-cells = <1>;
#reset-cells = <1>;
};
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index 33a8eb28374e..06e2331f666d 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -1,4 +1,4 @@
-#include <versatile-ab.dts>
+#include "versatile-ab.dts"
/ {
model = "ARM Versatile PB";
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index cf062472e07b..2b913f17d50f 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -235,7 +235,7 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
return ret;
}
-typedef void (*phys_reset_t)(unsigned long);
+typedef typeof(cpu_reset) phys_reset_t;
void mcpm_cpu_power_down(void)
{
@@ -300,7 +300,7 @@ void mcpm_cpu_power_down(void)
* on the CPU.
*/
phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
- phys_reset(__pa_symbol(mcpm_entry_point));
+ phys_reset(__pa_symbol(mcpm_entry_point), false);
/* should never get here */
BUG();
@@ -389,7 +389,7 @@ static int __init nocache_trampoline(unsigned long _arg)
__mcpm_cpu_down(cpu, cluster);
phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
- phys_reset(__pa_symbol(mcpm_entry_point));
+ phys_reset(__pa_symbol(mcpm_entry_point), false);
BUG();
}
diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig
new file mode 100644
index 000000000000..d2d75fa664a6
--- /dev/null
+++ b/arch/arm/configs/gemini_defconfig
@@ -0,0 +1,68 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_USER_NS=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_MULTI_V4=y
+# CONFIG_ARCH_MULTI_V7 is not set
+CONFIG_ARCH_GEMINI=y
+CONFIG_PCI=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_CMDLINE="console=ttyS0,115200n8"
+CONFIG_KEXEC=y
+CONFIG_BINFMT_MISC=y
+CONFIG_PM=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_GEMINI_WATCHDOG=y
+CONFIG_USB=y
+CONFIG_USB_MON=y
+CONFIG_USB_FOTG210_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GEMINI=y
+CONFIG_DMADEVICES=y
+# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ROMFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 36ec9c8f6e16..3234fe9bba6e 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -19,7 +19,8 @@ struct dev_archdata {
#ifdef CONFIG_XEN
const struct dma_map_ops *dev_dma_ops;
#endif
- bool dma_coherent;
+ unsigned int dma_coherent:1;
+ unsigned int dma_ops_setup:1;
};
struct omap_device;
diff --git a/arch/arm/include/asm/dmi.h b/arch/arm/include/asm/dmi.h
new file mode 100644
index 000000000000..df2d2ff06f5b
--- /dev/null
+++ b/arch/arm/include/asm/dmi.h
@@ -0,0 +1,19 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_DMI_H
+#define __ASM_DMI_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#define dmi_early_remap(x, l) memremap(x, l, MEMREMAP_WB)
+#define dmi_early_unmap(x, l) memunmap(x)
+#define dmi_remap(x, l) memremap(x, l, MEMREMAP_WB)
+#define dmi_unmap(x) memunmap(x)
+#define dmi_alloc(l) kzalloc(l, GFP_KERNEL)
+
+#endif
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
index 4917c2f7e459..e74ab0fbab79 100644
--- a/arch/arm/include/asm/kvm_coproc.h
+++ b/arch/arm/include/asm/kvm_coproc.h
@@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h
index 302240c19a5a..a0d726a47c8a 100644
--- a/arch/arm/include/asm/pgtable-nommu.h
+++ b/arch/arm/include/asm/pgtable-nommu.h
@@ -66,6 +66,7 @@ typedef pte_t *pte_addr_t;
#define pgprot_noncached(prot) (prot)
#define pgprot_writecombine(prot) (prot)
#define pgprot_dmacoherent(prot) (prot)
+#define pgprot_device(prot) (prot)
/*
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 46a76cd6acb6..607f702c2d62 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,23 +1,6 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += auxvec.h
-header-y += byteorder.h
-header-y += fcntl.h
-header-y += hwcap.h
-header-y += ioctls.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += perf_regs.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += signal.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += unistd.h
genhdr-y += unistd-common.h
genhdr-y += unistd-oabi.h
genhdr-y += unistd-eabi.h
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index a88726359e5f..5e3c673fa3f4 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -196,13 +196,17 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
+#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
(0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
#define VGIC_LEVEL_INFO_LINE_LEVEL 0
-#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
+#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
+#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
+#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
+#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 80254b47dc34..3ff571c2c71c 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -40,8 +40,15 @@
#ifdef CONFIG_MMU
void *module_alloc(unsigned long size)
{
- void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ gfp_t gfp_mask = GFP_KERNEL;
+ void *p;
+
+ /* Silence the initial allocation */
+ if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS))
+ gfp_mask |= __GFP_NOWARN;
+
+ p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+ gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
return p;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 32e1a9513dc7..4e80bf7420d4 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -315,7 +315,7 @@ static void __init cacheid_init(void)
if (arch >= CPU_ARCH_ARMv6) {
unsigned int cachetype = read_cpuid_cachetype();
- if ((arch == CPU_ARCH_ARMv7M) && !cachetype) {
+ if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
cacheid = 0;
} else if ((cachetype & (7 << 29)) == 4 << 29) {
/* ARMv7 register format */
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 572a8df1b766..c9a0a5299827 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -555,8 +555,7 @@ static DEFINE_RAW_SPINLOCK(stop_lock);
*/
static void ipi_cpu_stop(unsigned int cpu)
{
- if (system_state == SYSTEM_BOOTING ||
- system_state == SYSTEM_RUNNING) {
+ if (system_state <= SYSTEM_RUNNING) {
raw_spin_lock(&stop_lock);
pr_crit("CPU%u: stopping\n", cpu);
dump_stack();
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index 7b3670c2ae7b..d9beee652d36 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -18,9 +18,12 @@ KVM := ../../../virt/kvm
kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp/
+
obj-y += kvm-arm.o init.o interrupts.o
-obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
-obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o vgic-v3-coproc.o
+obj-y += handle_exit.o guest.o emulate.o reset.o
+obj-y += coproc.o coproc_a15.o coproc_a7.o vgic-v3-coproc.o
+obj-y += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
+obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
obj-y += $(KVM)/arm/aarch32.o
obj-y += $(KVM)/arm/vgic/vgic.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
deleted file mode 100644
index 8a31906bdc9b..000000000000
--- a/arch/arm/kvm/arm.c
+++ /dev/null
@@ -1,1480 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/cpu_pm.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/kvm_host.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/mman.h>
-#include <linux/sched.h>
-#include <linux/kvm.h>
-#include <trace/events/kvm.h>
-#include <kvm/arm_pmu.h>
-
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
-#include <linux/uaccess.h>
-#include <asm/ptrace.h>
-#include <asm/mman.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
-#include <asm/virt.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_mmu.h>
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_coproc.h>
-#include <asm/kvm_psci.h>
-#include <asm/sections.h>
-
-#ifdef REQUIRES_VIRT
-__asm__(".arch_extension virt");
-#endif
-
-static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
-static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
-
-/* Per-CPU variable containing the currently running vcpu. */
-static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
-
-/* The VMID used in the VTTBR */
-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
-static u32 kvm_next_vmid;
-static unsigned int kvm_vmid_bits __read_mostly;
-static DEFINE_SPINLOCK(kvm_vmid_lock);
-
-static bool vgic_present;
-
-static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
-
-static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
-{
- BUG_ON(preemptible());
- __this_cpu_write(kvm_arm_running_vcpu, vcpu);
-}
-
-/**
- * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
- * Must be called from non-preemptible context
- */
-struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
-{
- BUG_ON(preemptible());
- return __this_cpu_read(kvm_arm_running_vcpu);
-}
-
-/**
- * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
- */
-struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
-{
- return &kvm_arm_running_vcpu;
-}
-
-int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
-}
-
-int kvm_arch_hardware_setup(void)
-{
- return 0;
-}
-
-void kvm_arch_check_processor_compat(void *rtn)
-{
- *(int *)rtn = 0;
-}
-
-
-/**
- * kvm_arch_init_vm - initializes a VM data structure
- * @kvm: pointer to the KVM struct
- */
-int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
-{
- int ret, cpu;
-
- if (type)
- return -EINVAL;
-
- kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
- if (!kvm->arch.last_vcpu_ran)
- return -ENOMEM;
-
- for_each_possible_cpu(cpu)
- *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
-
- ret = kvm_alloc_stage2_pgd(kvm);
- if (ret)
- goto out_fail_alloc;
-
- ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
- if (ret)
- goto out_free_stage2_pgd;
-
- kvm_vgic_early_init(kvm);
-
- /* Mark the initial VMID generation invalid */
- kvm->arch.vmid_gen = 0;
-
- /* The maximum number of VCPUs is limited by the host's GIC model */
- kvm->arch.max_vcpus = vgic_present ?
- kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
-
- return ret;
-out_free_stage2_pgd:
- kvm_free_stage2_pgd(kvm);
-out_fail_alloc:
- free_percpu(kvm->arch.last_vcpu_ran);
- kvm->arch.last_vcpu_ran = NULL;
- return ret;
-}
-
-bool kvm_arch_has_vcpu_debugfs(void)
-{
- return false;
-}
-
-int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
-int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
-{
- return VM_FAULT_SIGBUS;
-}
-
-
-/**
- * kvm_arch_destroy_vm - destroy the VM data structure
- * @kvm: pointer to the KVM struct
- */
-void kvm_arch_destroy_vm(struct kvm *kvm)
-{
- int i;
-
- free_percpu(kvm->arch.last_vcpu_ran);
- kvm->arch.last_vcpu_ran = NULL;
-
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm_arch_vcpu_free(kvm->vcpus[i]);
- kvm->vcpus[i] = NULL;
- }
- }
-
- kvm_vgic_destroy(kvm);
-}
-
-int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
-{
- int r;
- switch (ext) {
- case KVM_CAP_IRQCHIP:
- r = vgic_present;
- break;
- case KVM_CAP_IOEVENTFD:
- case KVM_CAP_DEVICE_CTRL:
- case KVM_CAP_USER_MEMORY:
- case KVM_CAP_SYNC_MMU:
- case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
- case KVM_CAP_ONE_REG:
- case KVM_CAP_ARM_PSCI:
- case KVM_CAP_ARM_PSCI_0_2:
- case KVM_CAP_READONLY_MEM:
- case KVM_CAP_MP_STATE:
- case KVM_CAP_IMMEDIATE_EXIT:
- r = 1;
- break;
- case KVM_CAP_ARM_SET_DEVICE_ADDR:
- r = 1;
- break;
- case KVM_CAP_NR_VCPUS:
- r = num_online_cpus();
- break;
- case KVM_CAP_MAX_VCPUS:
- r = KVM_MAX_VCPUS;
- break;
- case KVM_CAP_NR_MEMSLOTS:
- r = KVM_USER_MEM_SLOTS;
- break;
- case KVM_CAP_MSI_DEVID:
- if (!kvm)
- r = -EINVAL;
- else
- r = kvm->arch.vgic.msis_require_devid;
- break;
- case KVM_CAP_ARM_USER_IRQ:
- /*
- * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
- * (bump this number if adding more devices)
- */
- r = 1;
- break;
- default:
- r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
- break;
- }
- return r;
-}
-
-long kvm_arch_dev_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
-{
- return -EINVAL;
-}
-
-
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
-{
- int err;
- struct kvm_vcpu *vcpu;
-
- if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
- err = -EBUSY;
- goto out;
- }
-
- if (id >= kvm->arch.max_vcpus) {
- err = -EINVAL;
- goto out;
- }
-
- vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
- if (!vcpu) {
- err = -ENOMEM;
- goto out;
- }
-
- err = kvm_vcpu_init(vcpu, kvm, id);
- if (err)
- goto free_vcpu;
-
- err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
- if (err)
- goto vcpu_uninit;
-
- return vcpu;
-vcpu_uninit:
- kvm_vcpu_uninit(vcpu);
-free_vcpu:
- kmem_cache_free(kvm_vcpu_cache, vcpu);
-out:
- return ERR_PTR(err);
-}
-
-void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
-{
- kvm_vgic_vcpu_early_init(vcpu);
-}
-
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
-{
- kvm_mmu_free_memory_caches(vcpu);
- kvm_timer_vcpu_terminate(vcpu);
- kvm_vgic_vcpu_destroy(vcpu);
- kvm_pmu_vcpu_destroy(vcpu);
- kvm_vcpu_uninit(vcpu);
- kmem_cache_free(kvm_vcpu_cache, vcpu);
-}
-
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
- kvm_arch_vcpu_free(vcpu);
-}
-
-int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
-{
- return kvm_timer_should_fire(vcpu_vtimer(vcpu)) ||
- kvm_timer_should_fire(vcpu_ptimer(vcpu));
-}
-
-void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
-{
- kvm_timer_schedule(vcpu);
-}
-
-void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
-{
- kvm_timer_unschedule(vcpu);
-}
-
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
- /* Force users to call KVM_ARM_VCPU_INIT */
- vcpu->arch.target = -1;
- bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
-
- /* Set up the timer */
- kvm_timer_vcpu_init(vcpu);
-
- kvm_arm_reset_debug_ptr(vcpu);
-
- return 0;
-}
-
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
- int *last_ran;
-
- last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
-
- /*
- * We might get preempted before the vCPU actually runs, but
- * over-invalidation doesn't affect correctness.
- */
- if (*last_ran != vcpu->vcpu_id) {
- kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
- *last_ran = vcpu->vcpu_id;
- }
-
- vcpu->cpu = cpu;
- vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
-
- kvm_arm_set_running_vcpu(vcpu);
-
- kvm_vgic_load(vcpu);
-}
-
-void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
-{
- kvm_vgic_put(vcpu);
-
- vcpu->cpu = -1;
-
- kvm_arm_set_running_vcpu(NULL);
- kvm_timer_vcpu_put(vcpu);
-}
-
-int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
- struct kvm_mp_state *mp_state)
-{
- if (vcpu->arch.power_off)
- mp_state->mp_state = KVM_MP_STATE_STOPPED;
- else
- mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
-
- return 0;
-}
-
-int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
- struct kvm_mp_state *mp_state)
-{
- switch (mp_state->mp_state) {
- case KVM_MP_STATE_RUNNABLE:
- vcpu->arch.power_off = false;
- break;
- case KVM_MP_STATE_STOPPED:
- vcpu->arch.power_off = true;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
- * @v: The VCPU pointer
- *
- * If the guest CPU is not waiting for interrupts or an interrupt line is
- * asserted, the CPU is by definition runnable.
- */
-int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
-{
- return ((!!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v))
- && !v->arch.power_off && !v->arch.pause);
-}
-
-/* Just ensure a guest exit from a particular CPU */
-static void exit_vm_noop(void *info)
-{
-}
-
-void force_vm_exit(const cpumask_t *mask)
-{
- preempt_disable();
- smp_call_function_many(mask, exit_vm_noop, NULL, true);
- preempt_enable();
-}
-
-/**
- * need_new_vmid_gen - check that the VMID is still valid
- * @kvm: The VM's VMID to check
- *
- * return true if there is a new generation of VMIDs being used
- *
- * The hardware supports only 256 values with the value zero reserved for the
- * host, so we check if an assigned value belongs to a previous generation,
- * which which requires us to assign a new value. If we're the first to use a
- * VMID for the new generation, we must flush necessary caches and TLBs on all
- * CPUs.
- */
-static bool need_new_vmid_gen(struct kvm *kvm)
-{
- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
-}
-
-/**
- * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
- * @kvm The guest that we are about to run
- *
- * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
- * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
- * caches and TLBs.
- */
-static void update_vttbr(struct kvm *kvm)
-{
- phys_addr_t pgd_phys;
- u64 vmid;
-
- if (!need_new_vmid_gen(kvm))
- return;
-
- spin_lock(&kvm_vmid_lock);
-
- /*
- * We need to re-check the vmid_gen here to ensure that if another vcpu
- * already allocated a valid vmid for this vm, then this vcpu should
- * use the same vmid.
- */
- if (!need_new_vmid_gen(kvm)) {
- spin_unlock(&kvm_vmid_lock);
- return;
- }
-
- /* First user of a new VMID generation? */
- if (unlikely(kvm_next_vmid == 0)) {
- atomic64_inc(&kvm_vmid_gen);
- kvm_next_vmid = 1;
-
- /*
- * On SMP we know no other CPUs can use this CPU's or each
- * other's VMID after force_vm_exit returns since the
- * kvm_vmid_lock blocks them from reentry to the guest.
- */
- force_vm_exit(cpu_all_mask);
- /*
- * Now broadcast TLB + ICACHE invalidation over the inner
- * shareable domain to make sure all data structures are
- * clean.
- */
- kvm_call_hyp(__kvm_flush_vm_context);
- }
-
- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
- kvm->arch.vmid = kvm_next_vmid;
- kvm_next_vmid++;
- kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
-
- /* update vttbr to be used with the new vmid */
- pgd_phys = virt_to_phys(kvm->arch.pgd);
- BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
- vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
- kvm->arch.vttbr = pgd_phys | vmid;
-
- spin_unlock(&kvm_vmid_lock);
-}
-
-static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
-{
- struct kvm *kvm = vcpu->kvm;
- int ret = 0;
-
- if (likely(vcpu->arch.has_run_once))
- return 0;
-
- vcpu->arch.has_run_once = true;
-
- /*
- * Map the VGIC hardware resources before running a vcpu the first
- * time on this VM.
- */
- if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
- ret = kvm_vgic_map_resources(kvm);
- if (ret)
- return ret;
- }
-
- ret = kvm_timer_enable(vcpu);
-
- return ret;
-}
-
-bool kvm_arch_intc_initialized(struct kvm *kvm)
-{
- return vgic_initialized(kvm);
-}
-
-void kvm_arm_halt_guest(struct kvm *kvm)
-{
- int i;
- struct kvm_vcpu *vcpu;
-
- kvm_for_each_vcpu(i, vcpu, kvm)
- vcpu->arch.pause = true;
- kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
-}
-
-void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.pause = true;
- kvm_vcpu_kick(vcpu);
-}
-
-void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
-{
- struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
-
- vcpu->arch.pause = false;
- swake_up(wq);
-}
-
-void kvm_arm_resume_guest(struct kvm *kvm)
-{
- int i;
- struct kvm_vcpu *vcpu;
-
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_arm_resume_vcpu(vcpu);
-}
-
-static void vcpu_sleep(struct kvm_vcpu *vcpu)
-{
- struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
-
- swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
- (!vcpu->arch.pause)));
-}
-
-static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.target >= 0;
-}
-
-/**
- * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
- * @vcpu: The VCPU pointer
- * @run: The kvm_run structure pointer used for userspace state exchange
- *
- * This function is called through the VCPU_RUN ioctl called from user space. It
- * will execute VM code in a loop until the time slice for the process is used
- * or some emulation is needed from user space in which case the function will
- * return with return value 0 and with the kvm_run structure filled in with the
- * required data for the requested emulation.
- */
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- int ret;
- sigset_t sigsaved;
-
- if (unlikely(!kvm_vcpu_initialized(vcpu)))
- return -ENOEXEC;
-
- ret = kvm_vcpu_first_run_init(vcpu);
- if (ret)
- return ret;
-
- if (run->exit_reason == KVM_EXIT_MMIO) {
- ret = kvm_handle_mmio_return(vcpu, vcpu->run);
- if (ret)
- return ret;
- }
-
- if (run->immediate_exit)
- return -EINTR;
-
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
-
- ret = 1;
- run->exit_reason = KVM_EXIT_UNKNOWN;
- while (ret > 0) {
- /*
- * Check conditions before entering the guest
- */
- cond_resched();
-
- update_vttbr(vcpu->kvm);
-
- if (vcpu->arch.power_off || vcpu->arch.pause)
- vcpu_sleep(vcpu);
-
- /*
- * Preparing the interrupts to be injected also
- * involves poking the GIC, which must be done in a
- * non-preemptible context.
- */
- preempt_disable();
-
- kvm_pmu_flush_hwstate(vcpu);
-
- kvm_timer_flush_hwstate(vcpu);
- kvm_vgic_flush_hwstate(vcpu);
-
- local_irq_disable();
-
- /*
- * If we have a singal pending, or need to notify a userspace
- * irqchip about timer or PMU level changes, then we exit (and
- * update the timer level state in kvm_timer_update_run
- * below).
- */
- if (signal_pending(current) ||
- kvm_timer_should_notify_user(vcpu) ||
- kvm_pmu_should_notify_user(vcpu)) {
- ret = -EINTR;
- run->exit_reason = KVM_EXIT_INTR;
- }
-
- if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
- vcpu->arch.power_off || vcpu->arch.pause) {
- local_irq_enable();
- kvm_pmu_sync_hwstate(vcpu);
- kvm_timer_sync_hwstate(vcpu);
- kvm_vgic_sync_hwstate(vcpu);
- preempt_enable();
- continue;
- }
-
- kvm_arm_setup_debug(vcpu);
-
- /**************************************************************
- * Enter the guest
- */
- trace_kvm_entry(*vcpu_pc(vcpu));
- guest_enter_irqoff();
- vcpu->mode = IN_GUEST_MODE;
-
- ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
-
- vcpu->mode = OUTSIDE_GUEST_MODE;
- vcpu->stat.exits++;
- /*
- * Back from guest
- *************************************************************/
-
- kvm_arm_clear_debug(vcpu);
-
- /*
- * We may have taken a host interrupt in HYP mode (ie
- * while executing the guest). This interrupt is still
- * pending, as we haven't serviced it yet!
- *
- * We're now back in SVC mode, with interrupts
- * disabled. Enabling the interrupts now will have
- * the effect of taking the interrupt again, in SVC
- * mode this time.
- */
- local_irq_enable();
-
- /*
- * We do local_irq_enable() before calling guest_exit() so
- * that if a timer interrupt hits while running the guest we
- * account that tick as being spent in the guest. We enable
- * preemption after calling guest_exit() so that if we get
- * preempted we make sure ticks after that is not counted as
- * guest time.
- */
- guest_exit();
- trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
-
- /*
- * We must sync the PMU and timer state before the vgic state so
- * that the vgic can properly sample the updated state of the
- * interrupt line.
- */
- kvm_pmu_sync_hwstate(vcpu);
- kvm_timer_sync_hwstate(vcpu);
-
- kvm_vgic_sync_hwstate(vcpu);
-
- preempt_enable();
-
- ret = handle_exit(vcpu, run, ret);
- }
-
- /* Tell userspace about in-kernel device output levels */
- if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
- kvm_timer_update_run(vcpu);
- kvm_pmu_update_run(vcpu);
- }
-
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
- return ret;
-}
-
-static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
-{
- int bit_index;
- bool set;
- unsigned long *ptr;
-
- if (number == KVM_ARM_IRQ_CPU_IRQ)
- bit_index = __ffs(HCR_VI);
- else /* KVM_ARM_IRQ_CPU_FIQ */
- bit_index = __ffs(HCR_VF);
-
- ptr = (unsigned long *)&vcpu->arch.irq_lines;
- if (level)
- set = test_and_set_bit(bit_index, ptr);
- else
- set = test_and_clear_bit(bit_index, ptr);
-
- /*
- * If we didn't change anything, no need to wake up or kick other CPUs
- */
- if (set == level)
- return 0;
-
- /*
- * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
- * trigger a world-switch round on the running physical CPU to set the
- * virtual IRQ/FIQ fields in the HCR appropriately.
- */
- kvm_vcpu_kick(vcpu);
-
- return 0;
-}
-
-int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
- bool line_status)
-{
- u32 irq = irq_level->irq;
- unsigned int irq_type, vcpu_idx, irq_num;
- int nrcpus = atomic_read(&kvm->online_vcpus);
- struct kvm_vcpu *vcpu = NULL;
- bool level = irq_level->level;
-
- irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
- vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
- irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
-
- trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
-
- switch (irq_type) {
- case KVM_ARM_IRQ_TYPE_CPU:
- if (irqchip_in_kernel(kvm))
- return -ENXIO;
-
- if (vcpu_idx >= nrcpus)
- return -EINVAL;
-
- vcpu = kvm_get_vcpu(kvm, vcpu_idx);
- if (!vcpu)
- return -EINVAL;
-
- if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
- return -EINVAL;
-
- return vcpu_interrupt_line(vcpu, irq_num, level);
- case KVM_ARM_IRQ_TYPE_PPI:
- if (!irqchip_in_kernel(kvm))
- return -ENXIO;
-
- if (vcpu_idx >= nrcpus)
- return -EINVAL;
-
- vcpu = kvm_get_vcpu(kvm, vcpu_idx);
- if (!vcpu)
- return -EINVAL;
-
- if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
- return -EINVAL;
-
- return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level);
- case KVM_ARM_IRQ_TYPE_SPI:
- if (!irqchip_in_kernel(kvm))
- return -ENXIO;
-
- if (irq_num < VGIC_NR_PRIVATE_IRQS)
- return -EINVAL;
-
- return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
- }
-
- return -EINVAL;
-}
-
-static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
- const struct kvm_vcpu_init *init)
-{
- unsigned int i;
- int phys_target = kvm_target_cpu();
-
- if (init->target != phys_target)
- return -EINVAL;
-
- /*
- * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
- * use the same target.
- */
- if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
- return -EINVAL;
-
- /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
- for (i = 0; i < sizeof(init->features) * 8; i++) {
- bool set = (init->features[i / 32] & (1 << (i % 32)));
-
- if (set && i >= KVM_VCPU_MAX_FEATURES)
- return -ENOENT;
-
- /*
- * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
- * use the same feature set.
- */
- if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
- test_bit(i, vcpu->arch.features) != set)
- return -EINVAL;
-
- if (set)
- set_bit(i, vcpu->arch.features);
- }
-
- vcpu->arch.target = phys_target;
-
- /* Now we know what it is, we can reset it. */
- return kvm_reset_vcpu(vcpu);
-}
-
-
-static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
- struct kvm_vcpu_init *init)
-{
- int ret;
-
- ret = kvm_vcpu_set_target(vcpu, init);
- if (ret)
- return ret;
-
- /*
- * Ensure a rebooted VM will fault in RAM pages and detect if the
- * guest MMU is turned off and flush the caches as needed.
- */
- if (vcpu->arch.has_run_once)
- stage2_unmap_vm(vcpu->kvm);
-
- vcpu_reset_hcr(vcpu);
-
- /*
- * Handle the "start in power-off" case.
- */
- if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
- vcpu->arch.power_off = true;
- else
- vcpu->arch.power_off = false;
-
- return 0;
-}
-
-static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr)
-{
- int ret = -ENXIO;
-
- switch (attr->group) {
- default:
- ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
- break;
- }
-
- return ret;
-}
-
-static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr)
-{
- int ret = -ENXIO;
-
- switch (attr->group) {
- default:
- ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
- break;
- }
-
- return ret;
-}
-
-static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr)
-{
- int ret = -ENXIO;
-
- switch (attr->group) {
- default:
- ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
- break;
- }
-
- return ret;
-}
-
-long kvm_arch_vcpu_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
-{
- struct kvm_vcpu *vcpu = filp->private_data;
- void __user *argp = (void __user *)arg;
- struct kvm_device_attr attr;
-
- switch (ioctl) {
- case KVM_ARM_VCPU_INIT: {
- struct kvm_vcpu_init init;
-
- if (copy_from_user(&init, argp, sizeof(init)))
- return -EFAULT;
-
- return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
- }
- case KVM_SET_ONE_REG:
- case KVM_GET_ONE_REG: {
- struct kvm_one_reg reg;
-
- if (unlikely(!kvm_vcpu_initialized(vcpu)))
- return -ENOEXEC;
-
- if (copy_from_user(&reg, argp, sizeof(reg)))
- return -EFAULT;
- if (ioctl == KVM_SET_ONE_REG)
- return kvm_arm_set_reg(vcpu, &reg);
- else
- return kvm_arm_get_reg(vcpu, &reg);
- }
- case KVM_GET_REG_LIST: {
- struct kvm_reg_list __user *user_list = argp;
- struct kvm_reg_list reg_list;
- unsigned n;
-
- if (unlikely(!kvm_vcpu_initialized(vcpu)))
- return -ENOEXEC;
-
- if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
- return -EFAULT;
- n = reg_list.n;
- reg_list.n = kvm_arm_num_regs(vcpu);
- if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
- return -EFAULT;
- if (n < reg_list.n)
- return -E2BIG;
- return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
- }
- case KVM_SET_DEVICE_ATTR: {
- if (copy_from_user(&attr, argp, sizeof(attr)))
- return -EFAULT;
- return kvm_arm_vcpu_set_attr(vcpu, &attr);
- }
- case KVM_GET_DEVICE_ATTR: {
- if (copy_from_user(&attr, argp, sizeof(attr)))
- return -EFAULT;
- return kvm_arm_vcpu_get_attr(vcpu, &attr);
- }
- case KVM_HAS_DEVICE_ATTR: {
- if (copy_from_user(&attr, argp, sizeof(attr)))
- return -EFAULT;
- return kvm_arm_vcpu_has_attr(vcpu, &attr);
- }
- default:
- return -EINVAL;
- }
-}
-
-/**
- * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
- * @kvm: kvm instance
- * @log: slot id and address to which we copy the log
- *
- * Steps 1-4 below provide general overview of dirty page logging. See
- * kvm_get_dirty_log_protect() function description for additional details.
- *
- * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
- * always flush the TLB (step 4) even if previous step failed and the dirty
- * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
- * does not preclude user space subsequent dirty log read. Flushing TLB ensures
- * writes will be marked dirty for next log read.
- *
- * 1. Take a snapshot of the bit and clear it if needed.
- * 2. Write protect the corresponding page.
- * 3. Copy the snapshot to the userspace.
- * 4. Flush TLB's if needed.
- */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
-{
- bool is_dirty = false;
- int r;
-
- mutex_lock(&kvm->slots_lock);
-
- r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
-
- if (is_dirty)
- kvm_flush_remote_tlbs(kvm);
-
- mutex_unlock(&kvm->slots_lock);
- return r;
-}
-
-static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
- struct kvm_arm_device_addr *dev_addr)
-{
- unsigned long dev_id, type;
-
- dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
- KVM_ARM_DEVICE_ID_SHIFT;
- type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
- KVM_ARM_DEVICE_TYPE_SHIFT;
-
- switch (dev_id) {
- case KVM_ARM_DEVICE_VGIC_V2:
- if (!vgic_present)
- return -ENXIO;
- return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
- default:
- return -ENODEV;
- }
-}
-
-long kvm_arch_vm_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
-{
- struct kvm *kvm = filp->private_data;
- void __user *argp = (void __user *)arg;
-
- switch (ioctl) {
- case KVM_CREATE_IRQCHIP: {
- int ret;
- if (!vgic_present)
- return -ENXIO;
- mutex_lock(&kvm->lock);
- ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
- mutex_unlock(&kvm->lock);
- return ret;
- }
- case KVM_ARM_SET_DEVICE_ADDR: {
- struct kvm_arm_device_addr dev_addr;
-
- if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
- return -EFAULT;
- return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
- }
- case KVM_ARM_PREFERRED_TARGET: {
- int err;
- struct kvm_vcpu_init init;
-
- err = kvm_vcpu_preferred_target(&init);
- if (err)
- return err;
-
- if (copy_to_user(argp, &init, sizeof(init)))
- return -EFAULT;
-
- return 0;
- }
- default:
- return -EINVAL;
- }
-}
-
-static void cpu_init_hyp_mode(void *dummy)
-{
- phys_addr_t pgd_ptr;
- unsigned long hyp_stack_ptr;
- unsigned long stack_page;
- unsigned long vector_ptr;
-
- /* Switch from the HYP stub to our own HYP init vector */
- __hyp_set_vectors(kvm_get_idmap_vector());
-
- pgd_ptr = kvm_mmu_get_httbr();
- stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
- hyp_stack_ptr = stack_page + PAGE_SIZE;
- vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
-
- __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
- __cpu_init_stage2();
-
- if (is_kernel_in_hyp_mode())
- kvm_timer_init_vhe();
-
- kvm_arm_init_debug();
-}
-
-static void cpu_hyp_reset(void)
-{
- if (!is_kernel_in_hyp_mode())
- __hyp_reset_vectors();
-}
-
-static void cpu_hyp_reinit(void)
-{
- cpu_hyp_reset();
-
- if (is_kernel_in_hyp_mode()) {
- /*
- * __cpu_init_stage2() is safe to call even if the PM
- * event was cancelled before the CPU was reset.
- */
- __cpu_init_stage2();
- } else {
- cpu_init_hyp_mode(NULL);
- }
-
- if (vgic_present)
- kvm_vgic_init_cpu_hardware();
-}
-
-static void _kvm_arch_hardware_enable(void *discard)
-{
- if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
- cpu_hyp_reinit();
- __this_cpu_write(kvm_arm_hardware_enabled, 1);
- }
-}
-
-int kvm_arch_hardware_enable(void)
-{
- _kvm_arch_hardware_enable(NULL);
- return 0;
-}
-
-static void _kvm_arch_hardware_disable(void *discard)
-{
- if (__this_cpu_read(kvm_arm_hardware_enabled)) {
- cpu_hyp_reset();
- __this_cpu_write(kvm_arm_hardware_enabled, 0);
- }
-}
-
-void kvm_arch_hardware_disable(void)
-{
- _kvm_arch_hardware_disable(NULL);
-}
-
-#ifdef CONFIG_CPU_PM
-static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
- unsigned long cmd,
- void *v)
-{
- /*
- * kvm_arm_hardware_enabled is left with its old value over
- * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
- * re-enable hyp.
- */
- switch (cmd) {
- case CPU_PM_ENTER:
- if (__this_cpu_read(kvm_arm_hardware_enabled))
- /*
- * don't update kvm_arm_hardware_enabled here
- * so that the hardware will be re-enabled
- * when we resume. See below.
- */
- cpu_hyp_reset();
-
- return NOTIFY_OK;
- case CPU_PM_EXIT:
- if (__this_cpu_read(kvm_arm_hardware_enabled))
- /* The hardware was enabled before suspend. */
- cpu_hyp_reinit();
-
- return NOTIFY_OK;
-
- default:
- return NOTIFY_DONE;
- }
-}
-
-static struct notifier_block hyp_init_cpu_pm_nb = {
- .notifier_call = hyp_init_cpu_pm_notifier,
-};
-
-static void __init hyp_cpu_pm_init(void)
-{
- cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
-}
-static void __init hyp_cpu_pm_exit(void)
-{
- cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
-}
-#else
-static inline void hyp_cpu_pm_init(void)
-{
-}
-static inline void hyp_cpu_pm_exit(void)
-{
-}
-#endif
-
-static void teardown_common_resources(void)
-{
- free_percpu(kvm_host_cpu_state);
-}
-
-static int init_common_resources(void)
-{
- kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
- if (!kvm_host_cpu_state) {
- kvm_err("Cannot allocate host CPU state\n");
- return -ENOMEM;
- }
-
- /* set size of VMID supported by CPU */
- kvm_vmid_bits = kvm_get_vmid_bits();
- kvm_info("%d-bit VMID\n", kvm_vmid_bits);
-
- return 0;
-}
-
-static int init_subsystems(void)
-{
- int err = 0;
-
- /*
- * Enable hardware so that subsystem initialisation can access EL2.
- */
- on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
-
- /*
- * Register CPU lower-power notifier
- */
- hyp_cpu_pm_init();
-
- /*
- * Init HYP view of VGIC
- */
- err = kvm_vgic_hyp_init();
- switch (err) {
- case 0:
- vgic_present = true;
- break;
- case -ENODEV:
- case -ENXIO:
- vgic_present = false;
- err = 0;
- break;
- default:
- goto out;
- }
-
- /*
- * Init HYP architected timer support
- */
- err = kvm_timer_hyp_init();
- if (err)
- goto out;
-
- kvm_perf_init();
- kvm_coproc_table_init();
-
-out:
- on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
-
- return err;
-}
-
-static void teardown_hyp_mode(void)
-{
- int cpu;
-
- if (is_kernel_in_hyp_mode())
- return;
-
- free_hyp_pgds();
- for_each_possible_cpu(cpu)
- free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
- hyp_cpu_pm_exit();
-}
-
-static int init_vhe_mode(void)
-{
- kvm_info("VHE mode initialized successfully\n");
- return 0;
-}
-
-/**
- * Inits Hyp-mode on all online CPUs
- */
-static int init_hyp_mode(void)
-{
- int cpu;
- int err = 0;
-
- /*
- * Allocate Hyp PGD and setup Hyp identity mapping
- */
- err = kvm_mmu_init();
- if (err)
- goto out_err;
-
- /*
- * Allocate stack pages for Hypervisor-mode
- */
- for_each_possible_cpu(cpu) {
- unsigned long stack_page;
-
- stack_page = __get_free_page(GFP_KERNEL);
- if (!stack_page) {
- err = -ENOMEM;
- goto out_err;
- }
-
- per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
- }
-
- /*
- * Map the Hyp-code called directly from the host
- */
- err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
- kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
- if (err) {
- kvm_err("Cannot map world-switch code\n");
- goto out_err;
- }
-
- err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
- kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
- if (err) {
- kvm_err("Cannot map rodata section\n");
- goto out_err;
- }
-
- err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
- kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
- if (err) {
- kvm_err("Cannot map bss section\n");
- goto out_err;
- }
-
- /*
- * Map the Hyp stack pages
- */
- for_each_possible_cpu(cpu) {
- char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
- err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
- PAGE_HYP);
-
- if (err) {
- kvm_err("Cannot map hyp stack\n");
- goto out_err;
- }
- }
-
- for_each_possible_cpu(cpu) {
- kvm_cpu_context_t *cpu_ctxt;
-
- cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
- err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
-
- if (err) {
- kvm_err("Cannot map host CPU state: %d\n", err);
- goto out_err;
- }
- }
-
- kvm_info("Hyp mode initialized successfully\n");
-
- return 0;
-
-out_err:
- teardown_hyp_mode();
- kvm_err("error initializing Hyp mode: %d\n", err);
- return err;
-}
-
-static void check_kvm_target_cpu(void *ret)
-{
- *(int *)ret = kvm_target_cpu();
-}
-
-struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
-{
- struct kvm_vcpu *vcpu;
- int i;
-
- mpidr &= MPIDR_HWID_BITMASK;
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
- return vcpu;
- }
- return NULL;
-}
-
-/**
- * Initialize Hyp-mode and memory mappings on all CPUs.
- */
-int kvm_arch_init(void *opaque)
-{
- int err;
- int ret, cpu;
-
- if (!is_hyp_mode_available()) {
- kvm_err("HYP mode not available\n");
- return -ENODEV;
- }
-
- for_each_online_cpu(cpu) {
- smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
- if (ret < 0) {
- kvm_err("Error, CPU %d not supported!\n", cpu);
- return -ENODEV;
- }
- }
-
- err = init_common_resources();
- if (err)
- return err;
-
- if (is_kernel_in_hyp_mode())
- err = init_vhe_mode();
- else
- err = init_hyp_mode();
- if (err)
- goto out_err;
-
- err = init_subsystems();
- if (err)
- goto out_hyp;
-
- return 0;
-
-out_hyp:
- teardown_hyp_mode();
-out_err:
- teardown_common_resources();
- return err;
-}
-
-/* NOP: Compiling as a module not supported */
-void kvm_arch_exit(void)
-{
- kvm_perf_teardown();
-}
-
-static int arm_init(void)
-{
- int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
- return rc;
-}
-
-module_init(arm_init);
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 2c14b69511e9..6d1d2e26dfe5 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -32,6 +32,7 @@
#include <asm/vfp.h>
#include "../vfp/vfpinstr.h"
+#define CREATE_TRACE_POINTS
#include "trace.h"
#include "coproc.h"
@@ -111,12 +112,6 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- kvm_inject_undefined(vcpu);
- return 1;
-}
-
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
{
/*
@@ -284,7 +279,7 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu,
* must always support PMCCNTR (the cycle counter): we just RAZ/WI for
* all PM registers, which doesn't crash the guest kernel at least.
*/
-static bool pm_fake(struct kvm_vcpu *vcpu,
+static bool trap_raz_wi(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
@@ -294,19 +289,19 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
return read_zero(vcpu, p);
}
-#define access_pmcr pm_fake
-#define access_pmcntenset pm_fake
-#define access_pmcntenclr pm_fake
-#define access_pmovsr pm_fake
-#define access_pmselr pm_fake
-#define access_pmceid0 pm_fake
-#define access_pmceid1 pm_fake
-#define access_pmccntr pm_fake
-#define access_pmxevtyper pm_fake
-#define access_pmxevcntr pm_fake
-#define access_pmuserenr pm_fake
-#define access_pmintenset pm_fake
-#define access_pmintenclr pm_fake
+#define access_pmcr trap_raz_wi
+#define access_pmcntenset trap_raz_wi
+#define access_pmcntenclr trap_raz_wi
+#define access_pmovsr trap_raz_wi
+#define access_pmselr trap_raz_wi
+#define access_pmceid0 trap_raz_wi
+#define access_pmceid1 trap_raz_wi
+#define access_pmccntr trap_raz_wi
+#define access_pmxevtyper trap_raz_wi
+#define access_pmxevcntr trap_raz_wi
+#define access_pmuserenr trap_raz_wi
+#define access_pmintenset trap_raz_wi
+#define access_pmintenclr trap_raz_wi
/* Architected CP15 registers.
* CRn denotes the primary register number, but is copied to the CRm in the
@@ -532,12 +527,7 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
return 1;
}
-/**
- * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
- * @vcpu: The VCPU pointer
- * @run: The kvm_run struct
- */
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
{
struct coproc_params params;
@@ -551,9 +541,38 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
params.CRm = 0;
+ return params;
+}
+
+/**
+ * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run: The kvm_run struct
+ */
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ struct coproc_params params = decode_64bit_hsr(vcpu);
+
return emulate_cp15(vcpu, &params);
}
+/**
+ * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
+ * @vcpu: The VCPU pointer
+ * @run: The kvm_run struct
+ */
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ struct coproc_params params = decode_64bit_hsr(vcpu);
+
+ /* raz_wi cp14 */
+ trap_raz_wi(vcpu, &params, NULL);
+
+ /* handled */
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+ return 1;
+}
+
static void reset_coproc_regs(struct kvm_vcpu *vcpu,
const struct coproc_reg *table, size_t num)
{
@@ -564,12 +583,7 @@ static void reset_coproc_regs(struct kvm_vcpu *vcpu,
table[i].reset(vcpu, &table[i]);
}
-/**
- * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
- * @vcpu: The VCPU pointer
- * @run: The kvm_run struct
- */
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
{
struct coproc_params params;
@@ -583,9 +597,37 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
params.Rt2 = 0;
+ return params;
+}
+
+/**
+ * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run: The kvm_run struct
+ */
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ struct coproc_params params = decode_32bit_hsr(vcpu);
return emulate_cp15(vcpu, &params);
}
+/**
+ * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
+ * @vcpu: The VCPU pointer
+ * @run: The kvm_run struct
+ */
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ struct coproc_params params = decode_32bit_hsr(vcpu);
+
+ /* raz_wi cp14 */
+ trap_raz_wi(vcpu, &params, NULL);
+
+ /* handled */
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+ return 1;
+}
+
/******************************************************************************
* Userspace API
*****************************************************************************/
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 5fd7968cdae9..f86a9aaef462 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -95,9 +95,9 @@ static exit_handle_fn arm_exit_handlers[] = {
[HSR_EC_WFI] = kvm_handle_wfx,
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
- [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
+ [HSR_EC_CP14_MR] = kvm_handle_cp14_32,
[HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
- [HSR_EC_CP14_64] = kvm_handle_cp14_access,
+ [HSR_EC_CP14_64] = kvm_handle_cp14_64,
[HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
[HSR_EC_CP10_ID] = kvm_handle_cp10_id,
[HSR_EC_HVC] = handle_hvc,
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
index 3023bb530edf..8679405b0b2b 100644
--- a/arch/arm/kvm/hyp/Makefile
+++ b/arch/arm/kvm/hyp/Makefile
@@ -2,6 +2,8 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
+ccflags-y += -fno-stack-protector
+
KVM=../../../../virt/kvm
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index 92678b7bd046..624a510d31df 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -48,7 +48,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
write_sysreg(HSTR_T(15), HSTR);
write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
val = read_sysreg(HDCR);
- write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR);
+ val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
+ val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
+ write_sysreg(val, HDCR);
}
static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 570ed4a9c261..5386528665b5 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -104,7 +104,6 @@ __do_hyp_init:
@ - Write permission implies XN: disabled
@ - Instruction cache: enabled
@ - Data/Unified cache: enabled
- @ - Memory alignment checks: enabled
@ - MMU: enabled (this code must be run from an identity mapping)
mrc p15, 4, r0, c1, c0, 0 @ HSCR
ldr r2, =HSCTLR_MASK
@@ -112,8 +111,8 @@ __do_hyp_init:
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
and r1, r1, r2
- ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) )
- THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
+ ARM( ldr r2, =(HSCTLR_M) )
+ THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
orr r1, r1, r2
orr r0, r0, r1
mcr p15, 4, r0, c1, c0, 0 @ HSCR
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
deleted file mode 100644
index b6e715fd3c90..000000000000
--- a/arch/arm/kvm/mmio.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/kvm_host.h>
-#include <asm/kvm_mmio.h>
-#include <asm/kvm_emulate.h>
-#include <trace/events/kvm.h>
-
-#include "trace.h"
-
-void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
-{
- void *datap = NULL;
- union {
- u8 byte;
- u16 hword;
- u32 word;
- u64 dword;
- } tmp;
-
- switch (len) {
- case 1:
- tmp.byte = data;
- datap = &tmp.byte;
- break;
- case 2:
- tmp.hword = data;
- datap = &tmp.hword;
- break;
- case 4:
- tmp.word = data;
- datap = &tmp.word;
- break;
- case 8:
- tmp.dword = data;
- datap = &tmp.dword;
- break;
- }
-
- memcpy(buf, datap, len);
-}
-
-unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
-{
- unsigned long data = 0;
- union {
- u16 hword;
- u32 word;
- u64 dword;
- } tmp;
-
- switch (len) {
- case 1:
- data = *(u8 *)buf;
- break;
- case 2:
- memcpy(&tmp.hword, buf, len);
- data = tmp.hword;
- break;
- case 4:
- memcpy(&tmp.word, buf, len);
- data = tmp.word;
- break;
- case 8:
- memcpy(&tmp.dword, buf, len);
- data = tmp.dword;
- break;
- }
-
- return data;
-}
-
-/**
- * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
- * or in-kernel IO emulation
- *
- * @vcpu: The VCPU pointer
- * @run: The VCPU run struct containing the mmio data
- */
-int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- unsigned long data;
- unsigned int len;
- int mask;
-
- if (!run->mmio.is_write) {
- len = run->mmio.len;
- if (len > sizeof(unsigned long))
- return -EINVAL;
-
- data = kvm_mmio_read_buf(run->mmio.data, len);
-
- if (vcpu->arch.mmio_decode.sign_extend &&
- len < sizeof(unsigned long)) {
- mask = 1U << ((len * 8) - 1);
- data = (data ^ mask) - mask;
- }
-
- trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
- data);
- data = vcpu_data_host_to_guest(vcpu, data, len);
- vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
- }
-
- return 0;
-}
-
-static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
-{
- unsigned long rt;
- int access_size;
- bool sign_extend;
-
- if (kvm_vcpu_dabt_iss1tw(vcpu)) {
- /* page table accesses IO mem: tell guest to fix its TTBR */
- kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
- return 1;
- }
-
- access_size = kvm_vcpu_dabt_get_as(vcpu);
- if (unlikely(access_size < 0))
- return access_size;
-
- *is_write = kvm_vcpu_dabt_iswrite(vcpu);
- sign_extend = kvm_vcpu_dabt_issext(vcpu);
- rt = kvm_vcpu_dabt_get_rd(vcpu);
-
- *len = access_size;
- vcpu->arch.mmio_decode.sign_extend = sign_extend;
- vcpu->arch.mmio_decode.rt = rt;
-
- /*
- * The MMIO instruction is emulated and should not be re-executed
- * in the guest.
- */
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- return 0;
-}
-
-int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
- phys_addr_t fault_ipa)
-{
- unsigned long data;
- unsigned long rt;
- int ret;
- bool is_write;
- int len;
- u8 data_buf[8];
-
- /*
- * Prepare MMIO operation. First decode the syndrome data we get
- * from the CPU. Then try if some in-kernel emulation feels
- * responsible, otherwise let user space do its magic.
- */
- if (kvm_vcpu_dabt_isvalid(vcpu)) {
- ret = decode_hsr(vcpu, &is_write, &len);
- if (ret)
- return ret;
- } else {
- kvm_err("load/store instruction decoding not implemented\n");
- return -ENOSYS;
- }
-
- rt = vcpu->arch.mmio_decode.rt;
-
- if (is_write) {
- data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
- len);
-
- trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
- kvm_mmio_write_buf(data_buf, len, data);
-
- ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
- data_buf);
- } else {
- trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
- fault_ipa, 0);
-
- ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
- data_buf);
- }
-
- /* Now prepare kvm_run for the potential return to userland. */
- run->mmio.is_write = is_write;
- run->mmio.phys_addr = fault_ipa;
- run->mmio.len = len;
-
- if (!ret) {
- /* We handled the access successfully in the kernel. */
- if (!is_write)
- memcpy(run->mmio.data, data_buf, len);
- vcpu->stat.mmio_exit_kernel++;
- kvm_handle_mmio_return(vcpu, run);
- return 1;
- }
-
- if (is_write)
- memcpy(run->mmio.data, data_buf, len);
- vcpu->stat.mmio_exit_user++;
- run->exit_reason = KVM_EXIT_MMIO;
- return 0;
-}
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
deleted file mode 100644
index 313ee646480f..000000000000
--- a/arch/arm/kvm/mmu.c
+++ /dev/null
@@ -1,1975 +0,0 @@
-/*
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.dall@virtualopensystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/mman.h>
-#include <linux/kvm_host.h>
-#include <linux/io.h>
-#include <linux/hugetlb.h>
-#include <trace/events/kvm.h>
-#include <asm/pgalloc.h>
-#include <asm/cacheflush.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_mmu.h>
-#include <asm/kvm_mmio.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_emulate.h>
-#include <asm/virt.h>
-
-#include "trace.h"
-
-static pgd_t *boot_hyp_pgd;
-static pgd_t *hyp_pgd;
-static pgd_t *merged_hyp_pgd;
-static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
-
-static unsigned long hyp_idmap_start;
-static unsigned long hyp_idmap_end;
-static phys_addr_t hyp_idmap_vector;
-
-#define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t))
-#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
-
-#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
-#define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
-
-static bool memslot_is_logging(struct kvm_memory_slot *memslot)
-{
- return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
-}
-
-/**
- * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
- * @kvm: pointer to kvm structure.
- *
- * Interface to HYP function to flush all VM TLB entries
- */
-void kvm_flush_remote_tlbs(struct kvm *kvm)
-{
- kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
-}
-
-static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
-{
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
-}
-
-/*
- * D-Cache management functions. They take the page table entries by
- * value, as they are flushing the cache using the kernel mapping (or
- * kmap on 32bit).
- */
-static void kvm_flush_dcache_pte(pte_t pte)
-{
- __kvm_flush_dcache_pte(pte);
-}
-
-static void kvm_flush_dcache_pmd(pmd_t pmd)
-{
- __kvm_flush_dcache_pmd(pmd);
-}
-
-static void kvm_flush_dcache_pud(pud_t pud)
-{
- __kvm_flush_dcache_pud(pud);
-}
-
-static bool kvm_is_device_pfn(unsigned long pfn)
-{
- return !pfn_valid(pfn);
-}
-
-/**
- * stage2_dissolve_pmd() - clear and flush huge PMD entry
- * @kvm: pointer to kvm structure.
- * @addr: IPA
- * @pmd: pmd pointer for IPA
- *
- * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
- * pages in the range dirty.
- */
-static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
-{
- if (!pmd_thp_or_huge(*pmd))
- return;
-
- pmd_clear(pmd);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- put_page(virt_to_page(pmd));
-}
-
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
- int min, int max)
-{
- void *page;
-
- BUG_ON(max > KVM_NR_MEM_OBJS);
- if (cache->nobjs >= min)
- return 0;
- while (cache->nobjs < max) {
- page = (void *)__get_free_page(PGALLOC_GFP);
- if (!page)
- return -ENOMEM;
- cache->objects[cache->nobjs++] = page;
- }
- return 0;
-}
-
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
- while (mc->nobjs)
- free_page((unsigned long)mc->objects[--mc->nobjs]);
-}
-
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
- void *p;
-
- BUG_ON(!mc || !mc->nobjs);
- p = mc->objects[--mc->nobjs];
- return p;
-}
-
-static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
-{
- pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL);
- stage2_pgd_clear(pgd);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- stage2_pud_free(pud_table);
- put_page(virt_to_page(pgd));
-}
-
-static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
-{
- pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0);
- VM_BUG_ON(stage2_pud_huge(*pud));
- stage2_pud_clear(pud);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- stage2_pmd_free(pmd_table);
- put_page(virt_to_page(pud));
-}
-
-static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
-{
- pte_t *pte_table = pte_offset_kernel(pmd, 0);
- VM_BUG_ON(pmd_thp_or_huge(*pmd));
- pmd_clear(pmd);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- pte_free_kernel(NULL, pte_table);
- put_page(virt_to_page(pmd));
-}
-
-/*
- * Unmapping vs dcache management:
- *
- * If a guest maps certain memory pages as uncached, all writes will
- * bypass the data cache and go directly to RAM. However, the CPUs
- * can still speculate reads (not writes) and fill cache lines with
- * data.
- *
- * Those cache lines will be *clean* cache lines though, so a
- * clean+invalidate operation is equivalent to an invalidate
- * operation, because no cache lines are marked dirty.
- *
- * Those clean cache lines could be filled prior to an uncached write
- * by the guest, and the cache coherent IO subsystem would therefore
- * end up writing old data to disk.
- *
- * This is why right after unmapping a page/section and invalidating
- * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
- * the IO subsystem will never hit in the cache.
- */
-static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
- phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t start_addr = addr;
- pte_t *pte, *start_pte;
-
- start_pte = pte = pte_offset_kernel(pmd, addr);
- do {
- if (!pte_none(*pte)) {
- pte_t old_pte = *pte;
-
- kvm_set_pte(pte, __pte(0));
- kvm_tlb_flush_vmid_ipa(kvm, addr);
-
- /* No need to invalidate the cache for device mappings */
- if (!kvm_is_device_pfn(pte_pfn(old_pte)))
- kvm_flush_dcache_pte(old_pte);
-
- put_page(virt_to_page(pte));
- }
- } while (pte++, addr += PAGE_SIZE, addr != end);
-
- if (stage2_pte_table_empty(start_pte))
- clear_stage2_pmd_entry(kvm, pmd, start_addr);
-}
-
-static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
- phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t next, start_addr = addr;
- pmd_t *pmd, *start_pmd;
-
- start_pmd = pmd = stage2_pmd_offset(pud, addr);
- do {
- next = stage2_pmd_addr_end(addr, end);
- if (!pmd_none(*pmd)) {
- if (pmd_thp_or_huge(*pmd)) {
- pmd_t old_pmd = *pmd;
-
- pmd_clear(pmd);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
-
- kvm_flush_dcache_pmd(old_pmd);
-
- put_page(virt_to_page(pmd));
- } else {
- unmap_stage2_ptes(kvm, pmd, addr, next);
- }
- }
- } while (pmd++, addr = next, addr != end);
-
- if (stage2_pmd_table_empty(start_pmd))
- clear_stage2_pud_entry(kvm, pud, start_addr);
-}
-
-static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
- phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t next, start_addr = addr;
- pud_t *pud, *start_pud;
-
- start_pud = pud = stage2_pud_offset(pgd, addr);
- do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
- if (stage2_pud_huge(*pud)) {
- pud_t old_pud = *pud;
-
- stage2_pud_clear(pud);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- kvm_flush_dcache_pud(old_pud);
- put_page(virt_to_page(pud));
- } else {
- unmap_stage2_pmds(kvm, pud, addr, next);
- }
- }
- } while (pud++, addr = next, addr != end);
-
- if (stage2_pud_table_empty(start_pud))
- clear_stage2_pgd_entry(kvm, pgd, start_addr);
-}
-
-/**
- * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
- * @kvm: The VM pointer
- * @start: The intermediate physical base address of the range to unmap
- * @size: The size of the area to unmap
- *
- * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
- * be called while holding mmu_lock (unless for freeing the stage2 pgd before
- * destroying the VM), otherwise another faulting VCPU may come in and mess
- * with things behind our backs.
- */
-static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
-{
- pgd_t *pgd;
- phys_addr_t addr = start, end = start + size;
- phys_addr_t next;
-
- assert_spin_locked(&kvm->mmu_lock);
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
- do {
- next = stage2_pgd_addr_end(addr, end);
- if (!stage2_pgd_none(*pgd))
- unmap_stage2_puds(kvm, pgd, addr, next);
- /*
- * If the range is too large, release the kvm->mmu_lock
- * to prevent starvation and lockup detector warnings.
- */
- if (next != end)
- cond_resched_lock(&kvm->mmu_lock);
- } while (pgd++, addr = next, addr != end);
-}
-
-static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
- phys_addr_t addr, phys_addr_t end)
-{
- pte_t *pte;
-
- pte = pte_offset_kernel(pmd, addr);
- do {
- if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
- kvm_flush_dcache_pte(*pte);
- } while (pte++, addr += PAGE_SIZE, addr != end);
-}
-
-static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
- phys_addr_t addr, phys_addr_t end)
-{
- pmd_t *pmd;
- phys_addr_t next;
-
- pmd = stage2_pmd_offset(pud, addr);
- do {
- next = stage2_pmd_addr_end(addr, end);
- if (!pmd_none(*pmd)) {
- if (pmd_thp_or_huge(*pmd))
- kvm_flush_dcache_pmd(*pmd);
- else
- stage2_flush_ptes(kvm, pmd, addr, next);
- }
- } while (pmd++, addr = next, addr != end);
-}
-
-static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
- phys_addr_t addr, phys_addr_t end)
-{
- pud_t *pud;
- phys_addr_t next;
-
- pud = stage2_pud_offset(pgd, addr);
- do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
- if (stage2_pud_huge(*pud))
- kvm_flush_dcache_pud(*pud);
- else
- stage2_flush_pmds(kvm, pud, addr, next);
- }
- } while (pud++, addr = next, addr != end);
-}
-
-static void stage2_flush_memslot(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
-{
- phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
- phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
- phys_addr_t next;
- pgd_t *pgd;
-
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
- do {
- next = stage2_pgd_addr_end(addr, end);
- stage2_flush_puds(kvm, pgd, addr, next);
- } while (pgd++, addr = next, addr != end);
-}
-
-/**
- * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
- * @kvm: The struct kvm pointer
- *
- * Go through the stage 2 page tables and invalidate any cache lines
- * backing memory already mapped to the VM.
- */
-static void stage2_flush_vm(struct kvm *kvm)
-{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- int idx;
-
- idx = srcu_read_lock(&kvm->srcu);
- spin_lock(&kvm->mmu_lock);
-
- slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots)
- stage2_flush_memslot(kvm, memslot);
-
- spin_unlock(&kvm->mmu_lock);
- srcu_read_unlock(&kvm->srcu, idx);
-}
-
-static void clear_hyp_pgd_entry(pgd_t *pgd)
-{
- pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
- pgd_clear(pgd);
- pud_free(NULL, pud_table);
- put_page(virt_to_page(pgd));
-}
-
-static void clear_hyp_pud_entry(pud_t *pud)
-{
- pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
- VM_BUG_ON(pud_huge(*pud));
- pud_clear(pud);
- pmd_free(NULL, pmd_table);
- put_page(virt_to_page(pud));
-}
-
-static void clear_hyp_pmd_entry(pmd_t *pmd)
-{
- pte_t *pte_table = pte_offset_kernel(pmd, 0);
- VM_BUG_ON(pmd_thp_or_huge(*pmd));
- pmd_clear(pmd);
- pte_free_kernel(NULL, pte_table);
- put_page(virt_to_page(pmd));
-}
-
-static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
-{
- pte_t *pte, *start_pte;
-
- start_pte = pte = pte_offset_kernel(pmd, addr);
- do {
- if (!pte_none(*pte)) {
- kvm_set_pte(pte, __pte(0));
- put_page(virt_to_page(pte));
- }
- } while (pte++, addr += PAGE_SIZE, addr != end);
-
- if (hyp_pte_table_empty(start_pte))
- clear_hyp_pmd_entry(pmd);
-}
-
-static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t next;
- pmd_t *pmd, *start_pmd;
-
- start_pmd = pmd = pmd_offset(pud, addr);
- do {
- next = pmd_addr_end(addr, end);
- /* Hyp doesn't use huge pmds */
- if (!pmd_none(*pmd))
- unmap_hyp_ptes(pmd, addr, next);
- } while (pmd++, addr = next, addr != end);
-
- if (hyp_pmd_table_empty(start_pmd))
- clear_hyp_pud_entry(pud);
-}
-
-static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
-{
- phys_addr_t next;
- pud_t *pud, *start_pud;
-
- start_pud = pud = pud_offset(pgd, addr);
- do {
- next = pud_addr_end(addr, end);
- /* Hyp doesn't use huge puds */
- if (!pud_none(*pud))
- unmap_hyp_pmds(pud, addr, next);
- } while (pud++, addr = next, addr != end);
-
- if (hyp_pud_table_empty(start_pud))
- clear_hyp_pgd_entry(pgd);
-}
-
-static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
-{
- pgd_t *pgd;
- phys_addr_t addr = start, end = start + size;
- phys_addr_t next;
-
- /*
- * We don't unmap anything from HYP, except at the hyp tear down.
- * Hence, we don't have to invalidate the TLBs here.
- */
- pgd = pgdp + pgd_index(addr);
- do {
- next = pgd_addr_end(addr, end);
- if (!pgd_none(*pgd))
- unmap_hyp_puds(pgd, addr, next);
- } while (pgd++, addr = next, addr != end);
-}
-
-/**
- * free_hyp_pgds - free Hyp-mode page tables
- *
- * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
- * therefore contains either mappings in the kernel memory area (above
- * PAGE_OFFSET), or device mappings in the vmalloc range (from
- * VMALLOC_START to VMALLOC_END).
- *
- * boot_hyp_pgd should only map two pages for the init code.
- */
-void free_hyp_pgds(void)
-{
- unsigned long addr;
-
- mutex_lock(&kvm_hyp_pgd_mutex);
-
- if (boot_hyp_pgd) {
- unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
- free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
- boot_hyp_pgd = NULL;
- }
-
- if (hyp_pgd) {
- unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
- for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
- unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
- for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
- unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
-
- free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
- hyp_pgd = NULL;
- }
- if (merged_hyp_pgd) {
- clear_page(merged_hyp_pgd);
- free_page((unsigned long)merged_hyp_pgd);
- merged_hyp_pgd = NULL;
- }
-
- mutex_unlock(&kvm_hyp_pgd_mutex);
-}
-
-static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
- unsigned long end, unsigned long pfn,
- pgprot_t prot)
-{
- pte_t *pte;
- unsigned long addr;
-
- addr = start;
- do {
- pte = pte_offset_kernel(pmd, addr);
- kvm_set_pte(pte, pfn_pte(pfn, prot));
- get_page(virt_to_page(pte));
- kvm_flush_dcache_to_poc(pte, sizeof(*pte));
- pfn++;
- } while (addr += PAGE_SIZE, addr != end);
-}
-
-static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
- unsigned long end, unsigned long pfn,
- pgprot_t prot)
-{
- pmd_t *pmd;
- pte_t *pte;
- unsigned long addr, next;
-
- addr = start;
- do {
- pmd = pmd_offset(pud, addr);
-
- BUG_ON(pmd_sect(*pmd));
-
- if (pmd_none(*pmd)) {
- pte = pte_alloc_one_kernel(NULL, addr);
- if (!pte) {
- kvm_err("Cannot allocate Hyp pte\n");
- return -ENOMEM;
- }
- pmd_populate_kernel(NULL, pmd, pte);
- get_page(virt_to_page(pmd));
- kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
- }
-
- next = pmd_addr_end(addr, end);
-
- create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
- pfn += (next - addr) >> PAGE_SHIFT;
- } while (addr = next, addr != end);
-
- return 0;
-}
-
-static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
- unsigned long end, unsigned long pfn,
- pgprot_t prot)
-{
- pud_t *pud;
- pmd_t *pmd;
- unsigned long addr, next;
- int ret;
-
- addr = start;
- do {
- pud = pud_offset(pgd, addr);
-
- if (pud_none_or_clear_bad(pud)) {
- pmd = pmd_alloc_one(NULL, addr);
- if (!pmd) {
- kvm_err("Cannot allocate Hyp pmd\n");
- return -ENOMEM;
- }
- pud_populate(NULL, pud, pmd);
- get_page(virt_to_page(pud));
- kvm_flush_dcache_to_poc(pud, sizeof(*pud));
- }
-
- next = pud_addr_end(addr, end);
- ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
- if (ret)
- return ret;
- pfn += (next - addr) >> PAGE_SHIFT;
- } while (addr = next, addr != end);
-
- return 0;
-}
-
-static int __create_hyp_mappings(pgd_t *pgdp,
- unsigned long start, unsigned long end,
- unsigned long pfn, pgprot_t prot)
-{
- pgd_t *pgd;
- pud_t *pud;
- unsigned long addr, next;
- int err = 0;
-
- mutex_lock(&kvm_hyp_pgd_mutex);
- addr = start & PAGE_MASK;
- end = PAGE_ALIGN(end);
- do {
- pgd = pgdp + pgd_index(addr);
-
- if (pgd_none(*pgd)) {
- pud = pud_alloc_one(NULL, addr);
- if (!pud) {
- kvm_err("Cannot allocate Hyp pud\n");
- err = -ENOMEM;
- goto out;
- }
- pgd_populate(NULL, pgd, pud);
- get_page(virt_to_page(pgd));
- kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
- }
-
- next = pgd_addr_end(addr, end);
- err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
- if (err)
- goto out;
- pfn += (next - addr) >> PAGE_SHIFT;
- } while (addr = next, addr != end);
-out:
- mutex_unlock(&kvm_hyp_pgd_mutex);
- return err;
-}
-
-static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
-{
- if (!is_vmalloc_addr(kaddr)) {
- BUG_ON(!virt_addr_valid(kaddr));
- return __pa(kaddr);
- } else {
- return page_to_phys(vmalloc_to_page(kaddr)) +
- offset_in_page(kaddr);
- }
-}
-
-/**
- * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
- * @from: The virtual kernel start address of the range
- * @to: The virtual kernel end address of the range (exclusive)
- * @prot: The protection to be applied to this range
- *
- * The same virtual address as the kernel virtual address is also used
- * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
- * physical pages.
- */
-int create_hyp_mappings(void *from, void *to, pgprot_t prot)
-{
- phys_addr_t phys_addr;
- unsigned long virt_addr;
- unsigned long start = kern_hyp_va((unsigned long)from);
- unsigned long end = kern_hyp_va((unsigned long)to);
-
- if (is_kernel_in_hyp_mode())
- return 0;
-
- start = start & PAGE_MASK;
- end = PAGE_ALIGN(end);
-
- for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
- int err;
-
- phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
- err = __create_hyp_mappings(hyp_pgd, virt_addr,
- virt_addr + PAGE_SIZE,
- __phys_to_pfn(phys_addr),
- prot);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/**
- * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
- * @from: The kernel start VA of the range
- * @to: The kernel end VA of the range (exclusive)
- * @phys_addr: The physical start address which gets mapped
- *
- * The resulting HYP VA is the same as the kernel VA, modulo
- * HYP_PAGE_OFFSET.
- */
-int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
-{
- unsigned long start = kern_hyp_va((unsigned long)from);
- unsigned long end = kern_hyp_va((unsigned long)to);
-
- if (is_kernel_in_hyp_mode())
- return 0;
-
- /* Check for a valid kernel IO mapping */
- if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
- return -EINVAL;
-
- return __create_hyp_mappings(hyp_pgd, start, end,
- __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
-}
-
-/**
- * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
- * @kvm: The KVM struct pointer for the VM.
- *
- * Allocates only the stage-2 HW PGD level table(s) (can support either full
- * 40-bit input addresses or limited to 32-bit input addresses). Clears the
- * allocated pages.
- *
- * Note we don't need locking here as this is only called when the VM is
- * created, which can only be done once.
- */
-int kvm_alloc_stage2_pgd(struct kvm *kvm)
-{
- pgd_t *pgd;
-
- if (kvm->arch.pgd != NULL) {
- kvm_err("kvm_arch already initialized?\n");
- return -EINVAL;
- }
-
- /* Allocate the HW PGD, making sure that each page gets its own refcount */
- pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
- if (!pgd)
- return -ENOMEM;
-
- kvm->arch.pgd = pgd;
- return 0;
-}
-
-static void stage2_unmap_memslot(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
-{
- hva_t hva = memslot->userspace_addr;
- phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
- phys_addr_t size = PAGE_SIZE * memslot->npages;
- hva_t reg_end = hva + size;
-
- /*
- * A memory region could potentially cover multiple VMAs, and any holes
- * between them, so iterate over all of them to find out if we should
- * unmap any of them.
- *
- * +--------------------------------------------+
- * +---------------+----------------+ +----------------+
- * | : VMA 1 | VMA 2 | | VMA 3 : |
- * +---------------+----------------+ +----------------+
- * | memory region |
- * +--------------------------------------------+
- */
- do {
- struct vm_area_struct *vma = find_vma(current->mm, hva);
- hva_t vm_start, vm_end;
-
- if (!vma || vma->vm_start >= reg_end)
- break;
-
- /*
- * Take the intersection of this VMA with the memory region
- */
- vm_start = max(hva, vma->vm_start);
- vm_end = min(reg_end, vma->vm_end);
-
- if (!(vma->vm_flags & VM_PFNMAP)) {
- gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
- unmap_stage2_range(kvm, gpa, vm_end - vm_start);
- }
- hva = vm_end;
- } while (hva < reg_end);
-}
-
-/**
- * stage2_unmap_vm - Unmap Stage-2 RAM mappings
- * @kvm: The struct kvm pointer
- *
- * Go through the memregions and unmap any reguler RAM
- * backing memory already mapped to the VM.
- */
-void stage2_unmap_vm(struct kvm *kvm)
-{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- int idx;
-
- idx = srcu_read_lock(&kvm->srcu);
- down_read(&current->mm->mmap_sem);
- spin_lock(&kvm->mmu_lock);
-
- slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots)
- stage2_unmap_memslot(kvm, memslot);
-
- spin_unlock(&kvm->mmu_lock);
- up_read(&current->mm->mmap_sem);
- srcu_read_unlock(&kvm->srcu, idx);
-}
-
-/**
- * kvm_free_stage2_pgd - free all stage-2 tables
- * @kvm: The KVM struct pointer for the VM.
- *
- * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
- * underlying level-2 and level-3 tables before freeing the actual level-1 table
- * and setting the struct pointer to NULL.
- *
- * Note we don't need locking here as this is only called when the VM is
- * destroyed, which can only be done once.
- */
-void kvm_free_stage2_pgd(struct kvm *kvm)
-{
- if (kvm->arch.pgd == NULL)
- return;
-
- spin_lock(&kvm->mmu_lock);
- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
- spin_unlock(&kvm->mmu_lock);
-
- /* Free the HW pgd, one page at a time */
- free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
- kvm->arch.pgd = NULL;
-}
-
-static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
- phys_addr_t addr)
-{
- pgd_t *pgd;
- pud_t *pud;
-
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
- if (WARN_ON(stage2_pgd_none(*pgd))) {
- if (!cache)
- return NULL;
- pud = mmu_memory_cache_alloc(cache);
- stage2_pgd_populate(pgd, pud);
- get_page(virt_to_page(pgd));
- }
-
- return stage2_pud_offset(pgd, addr);
-}
-
-static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
- phys_addr_t addr)
-{
- pud_t *pud;
- pmd_t *pmd;
-
- pud = stage2_get_pud(kvm, cache, addr);
- if (stage2_pud_none(*pud)) {
- if (!cache)
- return NULL;
- pmd = mmu_memory_cache_alloc(cache);
- stage2_pud_populate(pud, pmd);
- get_page(virt_to_page(pud));
- }
-
- return stage2_pmd_offset(pud, addr);
-}
-
-static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
- *cache, phys_addr_t addr, const pmd_t *new_pmd)
-{
- pmd_t *pmd, old_pmd;
-
- pmd = stage2_get_pmd(kvm, cache, addr);
- VM_BUG_ON(!pmd);
-
- /*
- * Mapping in huge pages should only happen through a fault. If a
- * page is merged into a transparent huge page, the individual
- * subpages of that huge page should be unmapped through MMU
- * notifiers before we get here.
- *
- * Merging of CompoundPages is not supported; they should become
- * splitting first, unmapped, merged, and mapped back in on-demand.
- */
- VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
-
- old_pmd = *pmd;
- if (pmd_present(old_pmd)) {
- pmd_clear(pmd);
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- } else {
- get_page(virt_to_page(pmd));
- }
-
- kvm_set_pmd(pmd, *new_pmd);
- return 0;
-}
-
-static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
- phys_addr_t addr, const pte_t *new_pte,
- unsigned long flags)
-{
- pmd_t *pmd;
- pte_t *pte, old_pte;
- bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
- bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
-
- VM_BUG_ON(logging_active && !cache);
-
- /* Create stage-2 page table mapping - Levels 0 and 1 */
- pmd = stage2_get_pmd(kvm, cache, addr);
- if (!pmd) {
- /*
- * Ignore calls from kvm_set_spte_hva for unallocated
- * address ranges.
- */
- return 0;
- }
-
- /*
- * While dirty page logging - dissolve huge PMD, then continue on to
- * allocate page.
- */
- if (logging_active)
- stage2_dissolve_pmd(kvm, addr, pmd);
-
- /* Create stage-2 page mappings - Level 2 */
- if (pmd_none(*pmd)) {
- if (!cache)
- return 0; /* ignore calls from kvm_set_spte_hva */
- pte = mmu_memory_cache_alloc(cache);
- pmd_populate_kernel(NULL, pmd, pte);
- get_page(virt_to_page(pmd));
- }
-
- pte = pte_offset_kernel(pmd, addr);
-
- if (iomap && pte_present(*pte))
- return -EFAULT;
-
- /* Create 2nd stage page table mapping - Level 3 */
- old_pte = *pte;
- if (pte_present(old_pte)) {
- kvm_set_pte(pte, __pte(0));
- kvm_tlb_flush_vmid_ipa(kvm, addr);
- } else {
- get_page(virt_to_page(pte));
- }
-
- kvm_set_pte(pte, *new_pte);
- return 0;
-}
-
-#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static int stage2_ptep_test_and_clear_young(pte_t *pte)
-{
- if (pte_young(*pte)) {
- *pte = pte_mkold(*pte);
- return 1;
- }
- return 0;
-}
-#else
-static int stage2_ptep_test_and_clear_young(pte_t *pte)
-{
- return __ptep_test_and_clear_young(pte);
-}
-#endif
-
-static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
-{
- return stage2_ptep_test_and_clear_young((pte_t *)pmd);
-}
-
-/**
- * kvm_phys_addr_ioremap - map a device range to guest IPA
- *
- * @kvm: The KVM pointer
- * @guest_ipa: The IPA at which to insert the mapping
- * @pa: The physical address of the device
- * @size: The size of the mapping
- */
-int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
- phys_addr_t pa, unsigned long size, bool writable)
-{
- phys_addr_t addr, end;
- int ret = 0;
- unsigned long pfn;
- struct kvm_mmu_memory_cache cache = { 0, };
-
- end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
- pfn = __phys_to_pfn(pa);
-
- for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
- pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
-
- if (writable)
- pte = kvm_s2pte_mkwrite(pte);
-
- ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
- KVM_NR_MEM_OBJS);
- if (ret)
- goto out;
- spin_lock(&kvm->mmu_lock);
- ret = stage2_set_pte(kvm, &cache, addr, &pte,
- KVM_S2PTE_FLAG_IS_IOMAP);
- spin_unlock(&kvm->mmu_lock);
- if (ret)
- goto out;
-
- pfn++;
- }
-
-out:
- mmu_free_memory_cache(&cache);
- return ret;
-}
-
-static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
-{
- kvm_pfn_t pfn = *pfnp;
- gfn_t gfn = *ipap >> PAGE_SHIFT;
-
- if (PageTransCompoundMap(pfn_to_page(pfn))) {
- unsigned long mask;
- /*
- * The address we faulted on is backed by a transparent huge
- * page. However, because we map the compound huge page and
- * not the individual tail page, we need to transfer the
- * refcount to the head page. We have to be careful that the
- * THP doesn't start to split while we are adjusting the
- * refcounts.
- *
- * We are sure this doesn't happen, because mmu_notifier_retry
- * was successful and we are holding the mmu_lock, so if this
- * THP is trying to split, it will be blocked in the mmu
- * notifier before touching any of the pages, specifically
- * before being able to call __split_huge_page_refcount().
- *
- * We can therefore safely transfer the refcount from PG_tail
- * to PG_head and switch the pfn from a tail page to the head
- * page accordingly.
- */
- mask = PTRS_PER_PMD - 1;
- VM_BUG_ON((gfn & mask) != (pfn & mask));
- if (pfn & mask) {
- *ipap &= PMD_MASK;
- kvm_release_pfn_clean(pfn);
- pfn &= ~mask;
- kvm_get_pfn(pfn);
- *pfnp = pfn;
- }
-
- return true;
- }
-
- return false;
-}
-
-static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
-{
- if (kvm_vcpu_trap_is_iabt(vcpu))
- return false;
-
- return kvm_vcpu_dabt_iswrite(vcpu);
-}
-
-/**
- * stage2_wp_ptes - write protect PMD range
- * @pmd: pointer to pmd entry
- * @addr: range start address
- * @end: range end address
- */
-static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
-{
- pte_t *pte;
-
- pte = pte_offset_kernel(pmd, addr);
- do {
- if (!pte_none(*pte)) {
- if (!kvm_s2pte_readonly(pte))
- kvm_set_s2pte_readonly(pte);
- }
- } while (pte++, addr += PAGE_SIZE, addr != end);
-}
-
-/**
- * stage2_wp_pmds - write protect PUD range
- * @pud: pointer to pud entry
- * @addr: range start address
- * @end: range end address
- */
-static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
-{
- pmd_t *pmd;
- phys_addr_t next;
-
- pmd = stage2_pmd_offset(pud, addr);
-
- do {
- next = stage2_pmd_addr_end(addr, end);
- if (!pmd_none(*pmd)) {
- if (pmd_thp_or_huge(*pmd)) {
- if (!kvm_s2pmd_readonly(pmd))
- kvm_set_s2pmd_readonly(pmd);
- } else {
- stage2_wp_ptes(pmd, addr, next);
- }
- }
- } while (pmd++, addr = next, addr != end);
-}
-
-/**
- * stage2_wp_puds - write protect PGD range
- * @pgd: pointer to pgd entry
- * @addr: range start address
- * @end: range end address
- *
- * Process PUD entries, for a huge PUD we cause a panic.
- */
-static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
-{
- pud_t *pud;
- phys_addr_t next;
-
- pud = stage2_pud_offset(pgd, addr);
- do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
- /* TODO:PUD not supported, revisit later if supported */
- BUG_ON(stage2_pud_huge(*pud));
- stage2_wp_pmds(pud, addr, next);
- }
- } while (pud++, addr = next, addr != end);
-}
-
-/**
- * stage2_wp_range() - write protect stage2 memory region range
- * @kvm: The KVM pointer
- * @addr: Start address of range
- * @end: End address of range
- */
-static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
-{
- pgd_t *pgd;
- phys_addr_t next;
-
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
- do {
- /*
- * Release kvm_mmu_lock periodically if the memory region is
- * large. Otherwise, we may see kernel panics with
- * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
- * CONFIG_LOCKDEP. Additionally, holding the lock too long
- * will also starve other vCPUs.
- */
- if (need_resched() || spin_needbreak(&kvm->mmu_lock))
- cond_resched_lock(&kvm->mmu_lock);
-
- next = stage2_pgd_addr_end(addr, end);
- if (stage2_pgd_present(*pgd))
- stage2_wp_puds(pgd, addr, next);
- } while (pgd++, addr = next, addr != end);
-}
-
-/**
- * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
- * @kvm: The KVM pointer
- * @slot: The memory slot to write protect
- *
- * Called to start logging dirty pages after memory region
- * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
- * all present PMD and PTEs are write protected in the memory region.
- * Afterwards read of dirty page log can be called.
- *
- * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
- * serializing operations for VM memory regions.
- */
-void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
-{
- struct kvm_memslots *slots = kvm_memslots(kvm);
- struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
- phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
- phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
-
- spin_lock(&kvm->mmu_lock);
- stage2_wp_range(kvm, start, end);
- spin_unlock(&kvm->mmu_lock);
- kvm_flush_remote_tlbs(kvm);
-}
-
-/**
- * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
- * @kvm: The KVM pointer
- * @slot: The memory slot associated with mask
- * @gfn_offset: The gfn offset in memory slot
- * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
- * slot to be write protected
- *
- * Walks bits set in mask write protects the associated pte's. Caller must
- * acquire kvm_mmu_lock.
- */
-static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- gfn_t gfn_offset, unsigned long mask)
-{
- phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
- phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
- phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
-
- stage2_wp_range(kvm, start, end);
-}
-
-/*
- * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
- * dirty pages.
- *
- * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
- * enable dirty logging for them.
- */
-void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- gfn_t gfn_offset, unsigned long mask)
-{
- kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
-}
-
-static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
- unsigned long size)
-{
- __coherent_cache_guest_page(vcpu, pfn, size);
-}
-
-static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- struct kvm_memory_slot *memslot, unsigned long hva,
- unsigned long fault_status)
-{
- int ret;
- bool write_fault, writable, hugetlb = false, force_pte = false;
- unsigned long mmu_seq;
- gfn_t gfn = fault_ipa >> PAGE_SHIFT;
- struct kvm *kvm = vcpu->kvm;
- struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
- struct vm_area_struct *vma;
- kvm_pfn_t pfn;
- pgprot_t mem_type = PAGE_S2;
- bool logging_active = memslot_is_logging(memslot);
- unsigned long flags = 0;
-
- write_fault = kvm_is_write_fault(vcpu);
- if (fault_status == FSC_PERM && !write_fault) {
- kvm_err("Unexpected L2 read permission error\n");
- return -EFAULT;
- }
-
- /* Let's check if we will get back a huge page backed by hugetlbfs */
- down_read(&current->mm->mmap_sem);
- vma = find_vma_intersection(current->mm, hva, hva + 1);
- if (unlikely(!vma)) {
- kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
- up_read(&current->mm->mmap_sem);
- return -EFAULT;
- }
-
- if (is_vm_hugetlb_page(vma) && !logging_active) {
- hugetlb = true;
- gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
- } else {
- /*
- * Pages belonging to memslots that don't have the same
- * alignment for userspace and IPA cannot be mapped using
- * block descriptors even if the pages belong to a THP for
- * the process, because the stage-2 block descriptor will
- * cover more than a single THP and we loose atomicity for
- * unmapping, updates, and splits of the THP or other pages
- * in the stage-2 block range.
- */
- if ((memslot->userspace_addr & ~PMD_MASK) !=
- ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
- force_pte = true;
- }
- up_read(&current->mm->mmap_sem);
-
- /* We need minimum second+third level pages */
- ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
- KVM_NR_MEM_OBJS);
- if (ret)
- return ret;
-
- mmu_seq = vcpu->kvm->mmu_notifier_seq;
- /*
- * Ensure the read of mmu_notifier_seq happens before we call
- * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
- * the page we just got a reference to gets unmapped before we have a
- * chance to grab the mmu_lock, which ensure that if the page gets
- * unmapped afterwards, the call to kvm_unmap_hva will take it away
- * from us again properly. This smp_rmb() interacts with the smp_wmb()
- * in kvm_mmu_notifier_invalidate_<page|range_end>.
- */
- smp_rmb();
-
- pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
- if (is_error_noslot_pfn(pfn))
- return -EFAULT;
-
- if (kvm_is_device_pfn(pfn)) {
- mem_type = PAGE_S2_DEVICE;
- flags |= KVM_S2PTE_FLAG_IS_IOMAP;
- } else if (logging_active) {
- /*
- * Faults on pages in a memslot with logging enabled
- * should not be mapped with huge pages (it introduces churn
- * and performance degradation), so force a pte mapping.
- */
- force_pte = true;
- flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
-
- /*
- * Only actually map the page as writable if this was a write
- * fault.
- */
- if (!write_fault)
- writable = false;
- }
-
- spin_lock(&kvm->mmu_lock);
- if (mmu_notifier_retry(kvm, mmu_seq))
- goto out_unlock;
-
- if (!hugetlb && !force_pte)
- hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
-
- if (hugetlb) {
- pmd_t new_pmd = pfn_pmd(pfn, mem_type);
- new_pmd = pmd_mkhuge(new_pmd);
- if (writable) {
- new_pmd = kvm_s2pmd_mkwrite(new_pmd);
- kvm_set_pfn_dirty(pfn);
- }
- coherent_cache_guest_page(vcpu, pfn, PMD_SIZE);
- ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
- } else {
- pte_t new_pte = pfn_pte(pfn, mem_type);
-
- if (writable) {
- new_pte = kvm_s2pte_mkwrite(new_pte);
- kvm_set_pfn_dirty(pfn);
- mark_page_dirty(kvm, gfn);
- }
- coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE);
- ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
- }
-
-out_unlock:
- spin_unlock(&kvm->mmu_lock);
- kvm_set_pfn_accessed(pfn);
- kvm_release_pfn_clean(pfn);
- return ret;
-}
-
-/*
- * Resolve the access fault by making the page young again.
- * Note that because the faulting entry is guaranteed not to be
- * cached in the TLB, we don't need to invalidate anything.
- * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
- * so there is no need for atomic (pte|pmd)_mkyoung operations.
- */
-static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
-{
- pmd_t *pmd;
- pte_t *pte;
- kvm_pfn_t pfn;
- bool pfn_valid = false;
-
- trace_kvm_access_fault(fault_ipa);
-
- spin_lock(&vcpu->kvm->mmu_lock);
-
- pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
- if (!pmd || pmd_none(*pmd)) /* Nothing there */
- goto out;
-
- if (pmd_thp_or_huge(*pmd)) { /* THP, HugeTLB */
- *pmd = pmd_mkyoung(*pmd);
- pfn = pmd_pfn(*pmd);
- pfn_valid = true;
- goto out;
- }
-
- pte = pte_offset_kernel(pmd, fault_ipa);
- if (pte_none(*pte)) /* Nothing there either */
- goto out;
-
- *pte = pte_mkyoung(*pte); /* Just a page... */
- pfn = pte_pfn(*pte);
- pfn_valid = true;
-out:
- spin_unlock(&vcpu->kvm->mmu_lock);
- if (pfn_valid)
- kvm_set_pfn_accessed(pfn);
-}
-
-/**
- * kvm_handle_guest_abort - handles all 2nd stage aborts
- * @vcpu: the VCPU pointer
- * @run: the kvm_run structure
- *
- * Any abort that gets to the host is almost guaranteed to be caused by a
- * missing second stage translation table entry, which can mean that either the
- * guest simply needs more memory and we must allocate an appropriate page or it
- * can mean that the guest tried to access I/O memory, which is emulated by user
- * space. The distinction is based on the IPA causing the fault and whether this
- * memory region has been registered as standard RAM by user space.
- */
-int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- unsigned long fault_status;
- phys_addr_t fault_ipa;
- struct kvm_memory_slot *memslot;
- unsigned long hva;
- bool is_iabt, write_fault, writable;
- gfn_t gfn;
- int ret, idx;
-
- is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
- if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) {
- kvm_inject_vabt(vcpu);
- return 1;
- }
-
- fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
-
- trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
- kvm_vcpu_get_hfar(vcpu), fault_ipa);
-
- /* Check the stage-2 fault is trans. fault or write fault */
- fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
- if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
- fault_status != FSC_ACCESS) {
- kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
- kvm_vcpu_trap_get_class(vcpu),
- (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
- (unsigned long)kvm_vcpu_get_hsr(vcpu));
- return -EFAULT;
- }
-
- idx = srcu_read_lock(&vcpu->kvm->srcu);
-
- gfn = fault_ipa >> PAGE_SHIFT;
- memslot = gfn_to_memslot(vcpu->kvm, gfn);
- hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
- write_fault = kvm_is_write_fault(vcpu);
- if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
- if (is_iabt) {
- /* Prefetch Abort on I/O address */
- kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
- ret = 1;
- goto out_unlock;
- }
-
- /*
- * Check for a cache maintenance operation. Since we
- * ended-up here, we know it is outside of any memory
- * slot. But we can't find out if that is for a device,
- * or if the guest is just being stupid. The only thing
- * we know for sure is that this range cannot be cached.
- *
- * So let's assume that the guest is just being
- * cautious, and skip the instruction.
- */
- if (kvm_vcpu_dabt_is_cm(vcpu)) {
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- ret = 1;
- goto out_unlock;
- }
-
- /*
- * The IPA is reported as [MAX:12], so we need to
- * complement it with the bottom 12 bits from the
- * faulting VA. This is always 12 bits, irrespective
- * of the page size.
- */
- fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
- ret = io_mem_abort(vcpu, run, fault_ipa);
- goto out_unlock;
- }
-
- /* Userspace should not be able to register out-of-bounds IPAs */
- VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
-
- if (fault_status == FSC_ACCESS) {
- handle_access_fault(vcpu, fault_ipa);
- ret = 1;
- goto out_unlock;
- }
-
- ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
- if (ret == 0)
- ret = 1;
-out_unlock:
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
- return ret;
-}
-
-static int handle_hva_to_gpa(struct kvm *kvm,
- unsigned long start,
- unsigned long end,
- int (*handler)(struct kvm *kvm,
- gpa_t gpa, u64 size,
- void *data),
- void *data)
-{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- int ret = 0;
-
- slots = kvm_memslots(kvm);
-
- /* we only care about the pages that the guest sees */
- kvm_for_each_memslot(memslot, slots) {
- unsigned long hva_start, hva_end;
- gfn_t gpa;
-
- hva_start = max(start, memslot->userspace_addr);
- hva_end = min(end, memslot->userspace_addr +
- (memslot->npages << PAGE_SHIFT));
- if (hva_start >= hva_end)
- continue;
-
- gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
- ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
- }
-
- return ret;
-}
-
-static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
-{
- unmap_stage2_range(kvm, gpa, size);
- return 0;
-}
-
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
- unsigned long end = hva + PAGE_SIZE;
-
- if (!kvm->arch.pgd)
- return 0;
-
- trace_kvm_unmap_hva(hva);
- handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
- return 0;
-}
-
-int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end)
-{
- if (!kvm->arch.pgd)
- return 0;
-
- trace_kvm_unmap_hva_range(start, end);
- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
- return 0;
-}
-
-static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
-{
- pte_t *pte = (pte_t *)data;
-
- WARN_ON(size != PAGE_SIZE);
- /*
- * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
- * flag clear because MMU notifiers will have unmapped a huge PMD before
- * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
- * therefore stage2_set_pte() never needs to clear out a huge PMD
- * through this calling path.
- */
- stage2_set_pte(kvm, NULL, gpa, pte, 0);
- return 0;
-}
-
-
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
-{
- unsigned long end = hva + PAGE_SIZE;
- pte_t stage2_pte;
-
- if (!kvm->arch.pgd)
- return;
-
- trace_kvm_set_spte_hva(hva);
- stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
- handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
-}
-
-static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
-{
- pmd_t *pmd;
- pte_t *pte;
-
- WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
- pmd = stage2_get_pmd(kvm, NULL, gpa);
- if (!pmd || pmd_none(*pmd)) /* Nothing there */
- return 0;
-
- if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */
- return stage2_pmdp_test_and_clear_young(pmd);
-
- pte = pte_offset_kernel(pmd, gpa);
- if (pte_none(*pte))
- return 0;
-
- return stage2_ptep_test_and_clear_young(pte);
-}
-
-static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
-{
- pmd_t *pmd;
- pte_t *pte;
-
- WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
- pmd = stage2_get_pmd(kvm, NULL, gpa);
- if (!pmd || pmd_none(*pmd)) /* Nothing there */
- return 0;
-
- if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */
- return pmd_young(*pmd);
-
- pte = pte_offset_kernel(pmd, gpa);
- if (!pte_none(*pte)) /* Just a page... */
- return pte_young(*pte);
-
- return 0;
-}
-
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
-{
- trace_kvm_age_hva(start, end);
- return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
-}
-
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
- trace_kvm_test_age_hva(hva);
- return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
-}
-
-void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
-{
- mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
-}
-
-phys_addr_t kvm_mmu_get_httbr(void)
-{
- if (__kvm_cpu_uses_extended_idmap())
- return virt_to_phys(merged_hyp_pgd);
- else
- return virt_to_phys(hyp_pgd);
-}
-
-phys_addr_t kvm_get_idmap_vector(void)
-{
- return hyp_idmap_vector;
-}
-
-static int kvm_map_idmap_text(pgd_t *pgd)
-{
- int err;
-
- /* Create the idmap in the boot page tables */
- err = __create_hyp_mappings(pgd,
- hyp_idmap_start, hyp_idmap_end,
- __phys_to_pfn(hyp_idmap_start),
- PAGE_HYP_EXEC);
- if (err)
- kvm_err("Failed to idmap %lx-%lx\n",
- hyp_idmap_start, hyp_idmap_end);
-
- return err;
-}
-
-int kvm_mmu_init(void)
-{
- int err;
-
- hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
- hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
- hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
-
- /*
- * We rely on the linker script to ensure at build time that the HYP
- * init code does not cross a page boundary.
- */
- BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
-
- kvm_info("IDMAP page: %lx\n", hyp_idmap_start);
- kvm_info("HYP VA range: %lx:%lx\n",
- kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
-
- if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
- hyp_idmap_start < kern_hyp_va(~0UL) &&
- hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
- /*
- * The idmap page is intersecting with the VA space,
- * it is not safe to continue further.
- */
- kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
- err = -EINVAL;
- goto out;
- }
-
- hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
- if (!hyp_pgd) {
- kvm_err("Hyp mode PGD not allocated\n");
- err = -ENOMEM;
- goto out;
- }
-
- if (__kvm_cpu_uses_extended_idmap()) {
- boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- hyp_pgd_order);
- if (!boot_hyp_pgd) {
- kvm_err("Hyp boot PGD not allocated\n");
- err = -ENOMEM;
- goto out;
- }
-
- err = kvm_map_idmap_text(boot_hyp_pgd);
- if (err)
- goto out;
-
- merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (!merged_hyp_pgd) {
- kvm_err("Failed to allocate extra HYP pgd\n");
- goto out;
- }
- __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
- hyp_idmap_start);
- } else {
- err = kvm_map_idmap_text(hyp_pgd);
- if (err)
- goto out;
- }
-
- return 0;
-out:
- free_hyp_pgds();
- return err;
-}
-
-void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change)
-{
- /*
- * At this point memslot has been committed and there is an
- * allocated dirty_bitmap[], dirty pages will be be tracked while the
- * memory slot is write protected.
- */
- if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
- kvm_mmu_wp_memory_region(kvm, mem->slot);
-}
-
-int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
-{
- hva_t hva = mem->userspace_addr;
- hva_t reg_end = hva + mem->memory_size;
- bool writable = !(mem->flags & KVM_MEM_READONLY);
- int ret = 0;
-
- if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
- change != KVM_MR_FLAGS_ONLY)
- return 0;
-
- /*
- * Prevent userspace from creating a memory region outside of the IPA
- * space addressable by the KVM guest IPA space.
- */
- if (memslot->base_gfn + memslot->npages >=
- (KVM_PHYS_SIZE >> PAGE_SHIFT))
- return -EFAULT;
-
- down_read(&current->mm->mmap_sem);
- /*
- * A memory region could potentially cover multiple VMAs, and any holes
- * between them, so iterate over all of them to find out if we can map
- * any of them right now.
- *
- * +--------------------------------------------+
- * +---------------+----------------+ +----------------+
- * | : VMA 1 | VMA 2 | | VMA 3 : |
- * +---------------+----------------+ +----------------+
- * | memory region |
- * +--------------------------------------------+
- */
- do {
- struct vm_area_struct *vma = find_vma(current->mm, hva);
- hva_t vm_start, vm_end;
-
- if (!vma || vma->vm_start >= reg_end)
- break;
-
- /*
- * Mapping a read-only VMA is only allowed if the
- * memory region is configured as read-only.
- */
- if (writable && !(vma->vm_flags & VM_WRITE)) {
- ret = -EPERM;
- break;
- }
-
- /*
- * Take the intersection of this VMA with the memory region
- */
- vm_start = max(hva, vma->vm_start);
- vm_end = min(reg_end, vma->vm_end);
-
- if (vma->vm_flags & VM_PFNMAP) {
- gpa_t gpa = mem->guest_phys_addr +
- (vm_start - mem->userspace_addr);
- phys_addr_t pa;
-
- pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
- pa += vm_start - vma->vm_start;
-
- /* IO region dirty page logging not allowed */
- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
- vm_end - vm_start,
- writable);
- if (ret)
- break;
- }
- hva = vm_end;
- } while (hva < reg_end);
-
- if (change == KVM_MR_FLAGS_ONLY)
- goto out;
-
- spin_lock(&kvm->mmu_lock);
- if (ret)
- unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
- else
- stage2_flush_memslot(kvm, memslot);
- spin_unlock(&kvm->mmu_lock);
-out:
- up_read(&current->mm->mmap_sem);
- return ret;
-}
-
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
-{
-}
-
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
-{
- return 0;
-}
-
-void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
-{
-}
-
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
- kvm_free_stage2_pgd(kvm);
-}
-
-void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
- struct kvm_memory_slot *slot)
-{
- gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
- phys_addr_t size = slot->npages << PAGE_SHIFT;
-
- spin_lock(&kvm->mmu_lock);
- unmap_stage2_range(kvm, gpa, size);
- spin_unlock(&kvm->mmu_lock);
-}
-
-/*
- * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
- *
- * Main problems:
- * - S/W ops are local to a CPU (not broadcast)
- * - We have line migration behind our back (speculation)
- * - System caches don't support S/W at all (damn!)
- *
- * In the face of the above, the best we can do is to try and convert
- * S/W ops to VA ops. Because the guest is not allowed to infer the
- * S/W to PA mapping, it can only use S/W to nuke the whole cache,
- * which is a rather good thing for us.
- *
- * Also, it is only used when turning caches on/off ("The expected
- * usage of the cache maintenance instructions that operate by set/way
- * is associated with the cache maintenance instructions associated
- * with the powerdown and powerup of caches, if this is required by
- * the implementation.").
- *
- * We use the following policy:
- *
- * - If we trap a S/W operation, we enable VM trapping to detect
- * caches being turned on/off, and do a full clean.
- *
- * - We flush the caches on both caches being turned on and off.
- *
- * - Once the caches are enabled, we stop trapping VM ops.
- */
-void kvm_set_way_flush(struct kvm_vcpu *vcpu)
-{
- unsigned long hcr = vcpu_get_hcr(vcpu);
-
- /*
- * If this is the first time we do a S/W operation
- * (i.e. HCR_TVM not set) flush the whole memory, and set the
- * VM trapping.
- *
- * Otherwise, rely on the VM trapping to wait for the MMU +
- * Caches to be turned off. At that point, we'll be able to
- * clean the caches again.
- */
- if (!(hcr & HCR_TVM)) {
- trace_kvm_set_way_flush(*vcpu_pc(vcpu),
- vcpu_has_cache_enabled(vcpu));
- stage2_flush_vm(vcpu->kvm);
- vcpu_set_hcr(vcpu, hcr | HCR_TVM);
- }
-}
-
-void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
-{
- bool now_enabled = vcpu_has_cache_enabled(vcpu);
-
- /*
- * If switching the MMU+caches on, need to invalidate the caches.
- * If switching it off, need to clean the caches.
- * Clean + invalidate does the trick always.
- */
- if (now_enabled != was_enabled)
- stage2_flush_vm(vcpu->kvm);
-
- /* Caches are now on, stop trapping VM ops (until a S/W op) */
- if (now_enabled)
- vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
-
- trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
-}
diff --git a/arch/arm/kvm/perf.c b/arch/arm/kvm/perf.c
deleted file mode 100644
index 1a3849da0b4b..000000000000
--- a/arch/arm/kvm/perf.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Based on the x86 implementation.
- *
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/perf_event.h>
-#include <linux/kvm_host.h>
-
-#include <asm/kvm_emulate.h>
-
-static int kvm_is_in_guest(void)
-{
- return kvm_arm_get_running_vcpu() != NULL;
-}
-
-static int kvm_is_user_mode(void)
-{
- struct kvm_vcpu *vcpu;
-
- vcpu = kvm_arm_get_running_vcpu();
-
- if (vcpu)
- return !vcpu_mode_priv(vcpu);
-
- return 0;
-}
-
-static unsigned long kvm_get_guest_ip(void)
-{
- struct kvm_vcpu *vcpu;
-
- vcpu = kvm_arm_get_running_vcpu();
-
- if (vcpu)
- return *vcpu_pc(vcpu);
-
- return 0;
-}
-
-static struct perf_guest_info_callbacks kvm_guest_cbs = {
- .is_in_guest = kvm_is_in_guest,
- .is_user_mode = kvm_is_user_mode,
- .get_guest_ip = kvm_get_guest_ip,
-};
-
-int kvm_perf_init(void)
-{
- return perf_register_guest_info_callbacks(&kvm_guest_cbs);
-}
-
-int kvm_perf_teardown(void)
-{
- return perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
-}
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
deleted file mode 100644
index a08d7a93aebb..000000000000
--- a/arch/arm/kvm/psci.c
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Copyright (C) 2012 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/preempt.h>
-#include <linux/kvm_host.h>
-#include <linux/wait.h>
-
-#include <asm/cputype.h>
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_psci.h>
-#include <asm/kvm_host.h>
-
-#include <uapi/linux/psci.h>
-
-/*
- * This is an implementation of the Power State Coordination Interface
- * as described in ARM document number ARM DEN 0022A.
- */
-
-#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
-
-static unsigned long psci_affinity_mask(unsigned long affinity_level)
-{
- if (affinity_level <= 3)
- return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
-
- return 0;
-}
-
-static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
-{
- /*
- * NOTE: For simplicity, we make VCPU suspend emulation to be
- * same-as WFI (Wait-for-interrupt) emulation.
- *
- * This means for KVM the wakeup events are interrupts and
- * this is consistent with intended use of StateID as described
- * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
- *
- * Further, we also treat power-down request to be same as
- * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
- * specification (ARM DEN 0022A). This means all suspend states
- * for KVM will preserve the register state.
- */
- kvm_vcpu_block(vcpu);
-
- return PSCI_RET_SUCCESS;
-}
-
-static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.power_off = true;
-}
-
-static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
-{
- struct kvm *kvm = source_vcpu->kvm;
- struct kvm_vcpu *vcpu = NULL;
- struct swait_queue_head *wq;
- unsigned long cpu_id;
- unsigned long context_id;
- phys_addr_t target_pc;
-
- cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
- if (vcpu_mode_is_32bit(source_vcpu))
- cpu_id &= ~((u32) 0);
-
- vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
-
- /*
- * Make sure the caller requested a valid CPU and that the CPU is
- * turned off.
- */
- if (!vcpu)
- return PSCI_RET_INVALID_PARAMS;
- if (!vcpu->arch.power_off) {
- if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
- return PSCI_RET_ALREADY_ON;
- else
- return PSCI_RET_INVALID_PARAMS;
- }
-
- target_pc = vcpu_get_reg(source_vcpu, 2);
- context_id = vcpu_get_reg(source_vcpu, 3);
-
- kvm_reset_vcpu(vcpu);
-
- /* Gracefully handle Thumb2 entry point */
- if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
- target_pc &= ~((phys_addr_t) 1);
- vcpu_set_thumb(vcpu);
- }
-
- /* Propagate caller endianness */
- if (kvm_vcpu_is_be(source_vcpu))
- kvm_vcpu_set_be(vcpu);
-
- *vcpu_pc(vcpu) = target_pc;
- /*
- * NOTE: We always update r0 (or x0) because for PSCI v0.1
- * the general puspose registers are undefined upon CPU_ON.
- */
- vcpu_set_reg(vcpu, 0, context_id);
- vcpu->arch.power_off = false;
- smp_mb(); /* Make sure the above is visible */
-
- wq = kvm_arch_vcpu_wq(vcpu);
- swake_up(wq);
-
- return PSCI_RET_SUCCESS;
-}
-
-static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
-{
- int i, matching_cpus = 0;
- unsigned long mpidr;
- unsigned long target_affinity;
- unsigned long target_affinity_mask;
- unsigned long lowest_affinity_level;
- struct kvm *kvm = vcpu->kvm;
- struct kvm_vcpu *tmp;
-
- target_affinity = vcpu_get_reg(vcpu, 1);
- lowest_affinity_level = vcpu_get_reg(vcpu, 2);
-
- /* Determine target affinity mask */
- target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
- if (!target_affinity_mask)
- return PSCI_RET_INVALID_PARAMS;
-
- /* Ignore other bits of target affinity */
- target_affinity &= target_affinity_mask;
-
- /*
- * If one or more VCPU matching target affinity are running
- * then ON else OFF
- */
- kvm_for_each_vcpu(i, tmp, kvm) {
- mpidr = kvm_vcpu_get_mpidr_aff(tmp);
- if ((mpidr & target_affinity_mask) == target_affinity) {
- matching_cpus++;
- if (!tmp->arch.power_off)
- return PSCI_0_2_AFFINITY_LEVEL_ON;
- }
- }
-
- if (!matching_cpus)
- return PSCI_RET_INVALID_PARAMS;
-
- return PSCI_0_2_AFFINITY_LEVEL_OFF;
-}
-
-static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
-{
- int i;
- struct kvm_vcpu *tmp;
-
- /*
- * The KVM ABI specifies that a system event exit may call KVM_RUN
- * again and may perform shutdown/reboot at a later time that when the
- * actual request is made. Since we are implementing PSCI and a
- * caller of PSCI reboot and shutdown expects that the system shuts
- * down or reboots immediately, let's make sure that VCPUs are not run
- * after this call is handled and before the VCPUs have been
- * re-initialized.
- */
- kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
- tmp->arch.power_off = true;
- kvm_vcpu_kick(tmp);
- }
-
- memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
- vcpu->run->system_event.type = type;
- vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
-}
-
-static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
-{
- kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
-}
-
-static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
-{
- kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
-}
-
-int kvm_psci_version(struct kvm_vcpu *vcpu)
-{
- if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
- return KVM_ARM_PSCI_0_2;
-
- return KVM_ARM_PSCI_0_1;
-}
-
-static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
-{
- struct kvm *kvm = vcpu->kvm;
- unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
- unsigned long val;
- int ret = 1;
-
- switch (psci_fn) {
- case PSCI_0_2_FN_PSCI_VERSION:
- /*
- * Bits[31:16] = Major Version = 0
- * Bits[15:0] = Minor Version = 2
- */
- val = 2;
- break;
- case PSCI_0_2_FN_CPU_SUSPEND:
- case PSCI_0_2_FN64_CPU_SUSPEND:
- val = kvm_psci_vcpu_suspend(vcpu);
- break;
- case PSCI_0_2_FN_CPU_OFF:
- kvm_psci_vcpu_off(vcpu);
- val = PSCI_RET_SUCCESS;
- break;
- case PSCI_0_2_FN_CPU_ON:
- case PSCI_0_2_FN64_CPU_ON:
- mutex_lock(&kvm->lock);
- val = kvm_psci_vcpu_on(vcpu);
- mutex_unlock(&kvm->lock);
- break;
- case PSCI_0_2_FN_AFFINITY_INFO:
- case PSCI_0_2_FN64_AFFINITY_INFO:
- val = kvm_psci_vcpu_affinity_info(vcpu);
- break;
- case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
- /*
- * Trusted OS is MP hence does not require migration
- * or
- * Trusted OS is not present
- */
- val = PSCI_0_2_TOS_MP;
- break;
- case PSCI_0_2_FN_SYSTEM_OFF:
- kvm_psci_system_off(vcpu);
- /*
- * We should'nt be going back to guest VCPU after
- * receiving SYSTEM_OFF request.
- *
- * If user space accidently/deliberately resumes
- * guest VCPU after SYSTEM_OFF request then guest
- * VCPU should see internal failure from PSCI return
- * value. To achieve this, we preload r0 (or x0) with
- * PSCI return value INTERNAL_FAILURE.
- */
- val = PSCI_RET_INTERNAL_FAILURE;
- ret = 0;
- break;
- case PSCI_0_2_FN_SYSTEM_RESET:
- kvm_psci_system_reset(vcpu);
- /*
- * Same reason as SYSTEM_OFF for preloading r0 (or x0)
- * with PSCI return value INTERNAL_FAILURE.
- */
- val = PSCI_RET_INTERNAL_FAILURE;
- ret = 0;
- break;
- default:
- val = PSCI_RET_NOT_SUPPORTED;
- break;
- }
-
- vcpu_set_reg(vcpu, 0, val);
- return ret;
-}
-
-static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
-{
- struct kvm *kvm = vcpu->kvm;
- unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
- unsigned long val;
-
- switch (psci_fn) {
- case KVM_PSCI_FN_CPU_OFF:
- kvm_psci_vcpu_off(vcpu);
- val = PSCI_RET_SUCCESS;
- break;
- case KVM_PSCI_FN_CPU_ON:
- mutex_lock(&kvm->lock);
- val = kvm_psci_vcpu_on(vcpu);
- mutex_unlock(&kvm->lock);
- break;
- default:
- val = PSCI_RET_NOT_SUPPORTED;
- break;
- }
-
- vcpu_set_reg(vcpu, 0, val);
- return 1;
-}
-
-/**
- * kvm_psci_call - handle PSCI call if r0 value is in range
- * @vcpu: Pointer to the VCPU struct
- *
- * Handle PSCI calls from guests through traps from HVC instructions.
- * The calling convention is similar to SMC calls to the secure world
- * where the function number is placed in r0.
- *
- * This function returns: > 0 (success), 0 (success but exit to user
- * space), and < 0 (errors)
- *
- * Errors:
- * -EINVAL: Unrecognized PSCI function
- */
-int kvm_psci_call(struct kvm_vcpu *vcpu)
-{
- switch (kvm_psci_version(vcpu)) {
- case KVM_ARM_PSCI_0_2:
- return kvm_psci_0_2_call(vcpu);
- case KVM_ARM_PSCI_0_1:
- return kvm_psci_0_1_call(vcpu);
- default:
- return -EINVAL;
- };
-}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index c25a88598eb0..b0d10648c486 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -1,138 +1,11 @@
-#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_KVM_H
+#if !defined(_TRACE_ARM_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ARM_KVM_H
#include <linux/tracepoint.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
-/*
- * Tracepoints for entry/exit to guest
- */
-TRACE_EVENT(kvm_entry,
- TP_PROTO(unsigned long vcpu_pc),
- TP_ARGS(vcpu_pc),
-
- TP_STRUCT__entry(
- __field( unsigned long, vcpu_pc )
- ),
-
- TP_fast_assign(
- __entry->vcpu_pc = vcpu_pc;
- ),
-
- TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
-);
-
-TRACE_EVENT(kvm_exit,
- TP_PROTO(int idx, unsigned int exit_reason, unsigned long vcpu_pc),
- TP_ARGS(idx, exit_reason, vcpu_pc),
-
- TP_STRUCT__entry(
- __field( int, idx )
- __field( unsigned int, exit_reason )
- __field( unsigned long, vcpu_pc )
- ),
-
- TP_fast_assign(
- __entry->idx = idx;
- __entry->exit_reason = exit_reason;
- __entry->vcpu_pc = vcpu_pc;
- ),
-
- TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
- __print_symbolic(__entry->idx, kvm_arm_exception_type),
- __entry->exit_reason,
- __print_symbolic(__entry->exit_reason, kvm_arm_exception_class),
- __entry->vcpu_pc)
-);
-
-TRACE_EVENT(kvm_guest_fault,
- TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
- unsigned long hxfar,
- unsigned long long ipa),
- TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
-
- TP_STRUCT__entry(
- __field( unsigned long, vcpu_pc )
- __field( unsigned long, hsr )
- __field( unsigned long, hxfar )
- __field( unsigned long long, ipa )
- ),
-
- TP_fast_assign(
- __entry->vcpu_pc = vcpu_pc;
- __entry->hsr = hsr;
- __entry->hxfar = hxfar;
- __entry->ipa = ipa;
- ),
-
- TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
- __entry->ipa, __entry->hsr,
- __entry->hxfar, __entry->vcpu_pc)
-);
-
-TRACE_EVENT(kvm_access_fault,
- TP_PROTO(unsigned long ipa),
- TP_ARGS(ipa),
-
- TP_STRUCT__entry(
- __field( unsigned long, ipa )
- ),
-
- TP_fast_assign(
- __entry->ipa = ipa;
- ),
-
- TP_printk("IPA: %lx", __entry->ipa)
-);
-
-TRACE_EVENT(kvm_irq_line,
- TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
- TP_ARGS(type, vcpu_idx, irq_num, level),
-
- TP_STRUCT__entry(
- __field( unsigned int, type )
- __field( int, vcpu_idx )
- __field( int, irq_num )
- __field( int, level )
- ),
-
- TP_fast_assign(
- __entry->type = type;
- __entry->vcpu_idx = vcpu_idx;
- __entry->irq_num = irq_num;
- __entry->level = level;
- ),
-
- TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d",
- (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" :
- (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" :
- (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN",
- __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level)
-);
-
-TRACE_EVENT(kvm_mmio_emulate,
- TP_PROTO(unsigned long vcpu_pc, unsigned long instr,
- unsigned long cpsr),
- TP_ARGS(vcpu_pc, instr, cpsr),
-
- TP_STRUCT__entry(
- __field( unsigned long, vcpu_pc )
- __field( unsigned long, instr )
- __field( unsigned long, cpsr )
- ),
-
- TP_fast_assign(
- __entry->vcpu_pc = vcpu_pc;
- __entry->instr = instr;
- __entry->cpsr = cpsr;
- ),
-
- TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
- __entry->vcpu_pc, __entry->instr, __entry->cpsr)
-);
-
/* Architecturally implementation defined CP15 register access */
TRACE_EVENT(kvm_emulate_cp15_imp,
TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
@@ -181,87 +54,6 @@ TRACE_EVENT(kvm_wfx,
__entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
);
-TRACE_EVENT(kvm_unmap_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
-);
-
-TRACE_EVENT(kvm_unmap_hva_range,
- TP_PROTO(unsigned long start, unsigned long end),
- TP_ARGS(start, end),
-
- TP_STRUCT__entry(
- __field( unsigned long, start )
- __field( unsigned long, end )
- ),
-
- TP_fast_assign(
- __entry->start = start;
- __entry->end = end;
- ),
-
- TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
- __entry->start, __entry->end)
-);
-
-TRACE_EVENT(kvm_set_spte_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
-);
-
-TRACE_EVENT(kvm_age_hva,
- TP_PROTO(unsigned long start, unsigned long end),
- TP_ARGS(start, end),
-
- TP_STRUCT__entry(
- __field( unsigned long, start )
- __field( unsigned long, end )
- ),
-
- TP_fast_assign(
- __entry->start = start;
- __entry->end = end;
- ),
-
- TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
- __entry->start, __entry->end)
-);
-
-TRACE_EVENT(kvm_test_age_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
-);
-
TRACE_EVENT(kvm_hvc,
TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
TP_ARGS(vcpu_pc, r0, imm),
@@ -282,49 +74,10 @@ TRACE_EVENT(kvm_hvc,
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
-TRACE_EVENT(kvm_set_way_flush,
- TP_PROTO(unsigned long vcpu_pc, bool cache),
- TP_ARGS(vcpu_pc, cache),
-
- TP_STRUCT__entry(
- __field( unsigned long, vcpu_pc )
- __field( bool, cache )
- ),
-
- TP_fast_assign(
- __entry->vcpu_pc = vcpu_pc;
- __entry->cache = cache;
- ),
-
- TP_printk("S/W flush at 0x%016lx (cache %s)",
- __entry->vcpu_pc, __entry->cache ? "on" : "off")
-);
-
-TRACE_EVENT(kvm_toggle_cache,
- TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
- TP_ARGS(vcpu_pc, was, now),
-
- TP_STRUCT__entry(
- __field( unsigned long, vcpu_pc )
- __field( bool, was )
- __field( bool, now )
- ),
-
- TP_fast_assign(
- __entry->vcpu_pc = vcpu_pc;
- __entry->was = was;
- __entry->now = now;
- ),
-
- TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
- __entry->vcpu_pc, __entry->was ? "on" : "off",
- __entry->now ? "on" : "off")
-);
-
-#endif /* _TRACE_KVM_H */
+#endif /* _TRACE_ARM_KVM_H */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH arch/arm/kvm
+#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 841e924143f9..cbd959b73654 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -1,6 +1,7 @@
menuconfig ARCH_AT91
bool "Atmel SoCs"
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
+ select ARM_CPU_SUSPEND if PM
select COMMON_CLK_AT91
select GPIOLIB
select PINCTRL
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 2cd27c830ab6..283e79ab587d 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -335,7 +335,7 @@ static const struct ramc_info ramc_infos[] __initconst = {
{ .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
};
-static const struct of_device_id const ramc_ids[] __initconst = {
+static const struct of_device_id ramc_ids[] __initconst = {
{ .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] },
{ .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] },
{ .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] },
diff --git a/arch/arm/mach-bcm/bcm_kona_smc.c b/arch/arm/mach-bcm/bcm_kona_smc.c
index cf3f8658f0e5..a55a7ecf146a 100644
--- a/arch/arm/mach-bcm/bcm_kona_smc.c
+++ b/arch/arm/mach-bcm/bcm_kona_smc.c
@@ -33,7 +33,7 @@ struct bcm_kona_smc_data {
unsigned result;
};
-static const struct of_device_id const bcm_kona_smc_ids[] __initconst = {
+static const struct of_device_id bcm_kona_smc_ids[] __initconst = {
{.compatible = "brcm,kona-smc"},
{.compatible = "bcm,kona-smc"}, /* deprecated name */
{},
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index 03da3813f1ab..7d5a44a06648 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -346,7 +346,7 @@ static struct usb_ohci_pdata cns3xxx_usb_ohci_pdata = {
.power_off = csn3xxx_usb_power_off,
};
-static const struct of_dev_auxdata const cns3xxx_auxdata[] __initconst = {
+static const struct of_dev_auxdata cns3xxx_auxdata[] __initconst = {
{ "intel,usb-ehci", CNS3XXX_USB_BASE, "ehci-platform", &cns3xxx_usb_ehci_pdata },
{ "intel,usb-ohci", CNS3XXX_USB_OHCI_BASE, "ohci-platform", &cns3xxx_usb_ohci_pdata },
{ "cavium,cns3420-ahci", CNS3XXX_SATA2_BASE, "ahci", NULL },
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index efb80354f303..b5cc05dc2cb2 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -153,7 +153,8 @@ int __init davinci_pm_init(void)
davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
if (!davinci_sram_suspend) {
pr_err("PM: cannot allocate SRAM memory\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto no_sram_mem;
}
davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
@@ -161,6 +162,10 @@ int __init davinci_pm_init(void)
suspend_set_ops(&davinci_pm_ops);
+ return 0;
+
+no_sram_mem:
+ iounmap(pm_config.ddrpsc_reg_base);
no_ddrpsc_mem:
iounmap(pm_config.ddrpll_reg_base);
no_ddrpll_mem:
diff --git a/arch/arm/mach-omap2/clkt2xxx_dpllcore.c b/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
index 59cf310bc1e9..e8d417309f33 100644
--- a/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
+++ b/arch/arm/mach-omap2/clkt2xxx_dpllcore.c
@@ -138,7 +138,8 @@ int omap2_reprogram_dpllcore(struct clk_hw *hw, unsigned long rate,
if (!dd)
return -EINVAL;
- tmpset.cm_clksel1_pll = readl_relaxed(dd->mult_div1_reg);
+ tmpset.cm_clksel1_pll =
+ omap_clk_ll_ops.clk_readl(&dd->mult_div1_reg);
tmpset.cm_clksel1_pll &= ~(dd->mult_mask |
dd->div1_mask);
div = ((curr_prcm_set->xtal_speed / 1000000) - 1);
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 1270afdcacdf..42881f21cede 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -54,9 +54,10 @@ u16 cpu_mask;
#define OMAP3PLUS_DPLL_FINT_MIN 32000
#define OMAP3PLUS_DPLL_FINT_MAX 52000000
-static struct ti_clk_ll_ops omap_clk_ll_ops = {
+struct ti_clk_ll_ops omap_clk_ll_ops = {
.clkdm_clk_enable = clkdm_clk_enable,
.clkdm_clk_disable = clkdm_clk_disable,
+ .clkdm_lookup = clkdm_lookup,
.cm_wait_module_ready = omap_cm_wait_module_ready,
.cm_split_idlest_reg = cm_split_idlest_reg,
};
@@ -78,38 +79,6 @@ int __init omap2_clk_setup_ll_ops(void)
* OMAP2+ specific clock functions
*/
-/* Public functions */
-
-/**
- * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk
- * @clk: OMAP clock struct ptr to use
- *
- * Convert a clockdomain name stored in a struct clk 'clk' into a
- * clockdomain pointer, and save it into the struct clk. Intended to be
- * called during clk_register(). No return value.
- */
-void omap2_init_clk_clkdm(struct clk_hw *hw)
-{
- struct clk_hw_omap *clk = to_clk_hw_omap(hw);
- struct clockdomain *clkdm;
- const char *clk_name;
-
- if (!clk->clkdm_name)
- return;
-
- clk_name = __clk_get_name(hw->clk);
-
- clkdm = clkdm_lookup(clk->clkdm_name);
- if (clkdm) {
- pr_debug("clock: associated clk %s to clkdm %s\n",
- clk_name, clk->clkdm_name);
- clk->clkdm = clkdm;
- } else {
- pr_debug("clock: could not associate clk %s to clkdm %s\n",
- clk_name, clk->clkdm_name);
- }
-}
-
/**
* ti_clk_init_features - init clock features struct for the SoC
*
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index 4e66295dca25..cf45550197e6 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -64,6 +64,8 @@
#define OMAP4XXX_EN_DPLL_FRBYPASS 0x6
#define OMAP4XXX_EN_DPLL_LOCKED 0x7
+extern struct ti_clk_ll_ops omap_clk_ll_ops;
+
extern u16 cpu_mask;
extern const struct clkops clkops_omap2_dflt_wait;
diff --git a/arch/arm/mach-omap2/cm.h b/arch/arm/mach-omap2/cm.h
index 1fe3e6b833d2..de75cbcdc9d1 100644
--- a/arch/arm/mach-omap2/cm.h
+++ b/arch/arm/mach-omap2/cm.h
@@ -23,6 +23,7 @@
#define MAX_MODULE_READY_TIME 2000
# ifndef __ASSEMBLER__
+#include <linux/clk/ti.h>
extern void __iomem *cm_base;
extern void __iomem *cm2_base;
extern void omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2);
@@ -50,7 +51,7 @@ extern void omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2);
* @module_disable: ptr to the SoC CM-specific module_disable impl
*/
struct cm_ll_data {
- int (*split_idlest_reg)(void __iomem *idlest_reg, s16 *prcm_inst,
+ int (*split_idlest_reg)(struct clk_omap_reg *idlest_reg, s16 *prcm_inst,
u8 *idlest_reg_id);
int (*wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg,
u8 idlest_shift);
@@ -60,7 +61,7 @@ struct cm_ll_data {
void (*module_disable)(u8 part, u16 inst, u16 clkctrl_offs);
};
-extern int cm_split_idlest_reg(void __iomem *idlest_reg, s16 *prcm_inst,
+extern int cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst,
u8 *idlest_reg_id);
int omap_cm_wait_module_ready(u8 part, s16 prcm_mod, u16 idlest_reg,
u8 idlest_shift);
diff --git a/arch/arm/mach-omap2/cm2xxx.c b/arch/arm/mach-omap2/cm2xxx.c
index 3e5fd3587eb1..cd90b4c6a06b 100644
--- a/arch/arm/mach-omap2/cm2xxx.c
+++ b/arch/arm/mach-omap2/cm2xxx.c
@@ -204,7 +204,7 @@ void omap2xxx_cm_apll96_disable(void)
* XXX This function is only needed until absolute register addresses are
* removed from the OMAP struct clk records.
*/
-static int omap2xxx_cm_split_idlest_reg(void __iomem *idlest_reg,
+static int omap2xxx_cm_split_idlest_reg(struct clk_omap_reg *idlest_reg,
s16 *prcm_inst,
u8 *idlest_reg_id)
{
@@ -212,10 +212,7 @@ static int omap2xxx_cm_split_idlest_reg(void __iomem *idlest_reg,
u8 idlest_offs;
int i;
- if (idlest_reg < cm_base || idlest_reg > (cm_base + 0x0fff))
- return -EINVAL;
-
- idlest_offs = (unsigned long)idlest_reg & 0xff;
+ idlest_offs = idlest_reg->offset & 0xff;
for (i = 0; i < ARRAY_SIZE(omap2xxx_cm_idlest_offs); i++) {
if (idlest_offs == omap2xxx_cm_idlest_offs[i]) {
*idlest_reg_id = i + 1;
@@ -226,7 +223,7 @@ static int omap2xxx_cm_split_idlest_reg(void __iomem *idlest_reg,
if (i == ARRAY_SIZE(omap2xxx_cm_idlest_offs))
return -EINVAL;
- offs = idlest_reg - cm_base;
+ offs = idlest_reg->offset;
offs &= 0xff00;
*prcm_inst = offs;
diff --git a/arch/arm/mach-omap2/cm3xxx.c b/arch/arm/mach-omap2/cm3xxx.c
index d91ae8206d1e..55b046a719dc 100644
--- a/arch/arm/mach-omap2/cm3xxx.c
+++ b/arch/arm/mach-omap2/cm3xxx.c
@@ -118,7 +118,7 @@ static int omap3xxx_cm_wait_module_ready(u8 part, s16 prcm_mod, u16 idlest_id,
* XXX This function is only needed until absolute register addresses are
* removed from the OMAP struct clk records.
*/
-static int omap3xxx_cm_split_idlest_reg(void __iomem *idlest_reg,
+static int omap3xxx_cm_split_idlest_reg(struct clk_omap_reg *idlest_reg,
s16 *prcm_inst,
u8 *idlest_reg_id)
{
@@ -126,11 +126,7 @@ static int omap3xxx_cm_split_idlest_reg(void __iomem *idlest_reg,
u8 idlest_offs;
int i;
- if (idlest_reg < (cm_base + OMAP3430_IVA2_MOD) ||
- idlest_reg > (cm_base + 0x1ffff))
- return -EINVAL;
-
- idlest_offs = (unsigned long)idlest_reg & 0xff;
+ idlest_offs = idlest_reg->offset & 0xff;
for (i = 0; i < ARRAY_SIZE(omap3xxx_cm_idlest_offs); i++) {
if (idlest_offs == omap3xxx_cm_idlest_offs[i]) {
*idlest_reg_id = i + 1;
@@ -141,7 +137,7 @@ static int omap3xxx_cm_split_idlest_reg(void __iomem *idlest_reg,
if (i == ARRAY_SIZE(omap3xxx_cm_idlest_offs))
return -EINVAL;
- offs = idlest_reg - cm_base;
+ offs = idlest_reg->offset;
offs &= 0xff00;
*prcm_inst = offs;
diff --git a/arch/arm/mach-omap2/cm_common.c b/arch/arm/mach-omap2/cm_common.c
index 23e8bcec34e3..bbe41f4c9dc8 100644
--- a/arch/arm/mach-omap2/cm_common.c
+++ b/arch/arm/mach-omap2/cm_common.c
@@ -65,7 +65,7 @@ void __init omap2_set_globals_cm(void __iomem *cm, void __iomem *cm2)
* or 0 upon success. XXX This function is only needed until absolute
* register addresses are removed from the OMAP struct clk records.
*/
-int cm_split_idlest_reg(void __iomem *idlest_reg, s16 *prcm_inst,
+int cm_split_idlest_reg(struct clk_omap_reg *idlest_reg, s16 *prcm_inst,
u8 *idlest_reg_id)
{
if (!cm_ll_data->split_idlest_reg) {
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 3089d3bfa19b..8cc6338fcb12 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -266,11 +266,12 @@ extern int omap4_cpu_kill(unsigned int cpu);
extern const struct smp_operations omap4_smp_ops;
#endif
+extern u32 omap4_get_cpu1_ns_pa_addr(void);
+
#if defined(CONFIG_SMP) && defined(CONFIG_PM)
extern int omap4_mpuss_init(void);
extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
-extern u32 omap4_get_cpu1_ns_pa_addr(void);
#else
static inline int omap4_enter_lowpower(unsigned int cpu,
unsigned int power_state)
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 03ec6d307c82..4cfc4f9b2c69 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -213,11 +213,6 @@ static void __init save_l2x0_context(void)
{}
#endif
-u32 omap4_get_cpu1_ns_pa_addr(void)
-{
- return old_cpu1_ns_pa_addr;
-}
-
/**
* omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
* The purpose of this function is to manage low power programming
@@ -457,6 +452,11 @@ int __init omap4_mpuss_init(void)
#endif
+u32 omap4_get_cpu1_ns_pa_addr(void)
+{
+ return old_cpu1_ns_pa_addr;
+}
+
/*
* For kexec, we must set CPU1_WAKEUP_NS_PA_ADDR to point to
* current kernel's secondary_startup() early before
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 3faf454ba487..33e4953c61a8 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -306,7 +306,6 @@ static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
OMAP_AUX_CORE_BOOT_1);
- cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
/* Did the configured secondary_startup() get overwritten? */
if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
@@ -316,9 +315,13 @@ static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
* If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a
* deeper idle state in WFI and will wake to an invalid address.
*/
- if ((soc_is_omap44xx() || soc_is_omap54xx()) &&
- !omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
- needs_reset = true;
+ if ((soc_is_omap44xx() || soc_is_omap54xx())) {
+ cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
+ if (!omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
+ needs_reset = true;
+ } else {
+ cpu1_ns_pa_addr = 0;
+ }
if (!needs_reset || !c->cpu1_rstctrl_va)
return;
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 2b138b65129a..dc11841ca334 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -711,7 +711,7 @@ static struct omap_prcm_init_data scrm_data __initdata = {
};
#endif
-static const struct of_device_id const omap_prcm_dt_match_table[] __initconst = {
+static const struct of_device_id omap_prcm_dt_match_table[] __initconst = {
#ifdef CONFIG_SOC_AM33XX
{ .compatible = "ti,am3-prcm", .data = &am3_prm_data },
#endif
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
index 2028167fff31..d76b1e5eb8ba 100644
--- a/arch/arm/mach-omap2/vc.c
+++ b/arch/arm/mach-omap2/vc.c
@@ -559,7 +559,7 @@ struct i2c_init_data {
u8 hsscll_12;
};
-static const struct i2c_init_data const omap4_i2c_timing_data[] __initconst = {
+static const struct i2c_init_data omap4_i2c_timing_data[] __initconst = {
{
.load = 50,
.loadbits = 0x3,
diff --git a/arch/arm/mach-spear/time.c b/arch/arm/mach-spear/time.c
index 4878ba90026d..289e036c9c30 100644
--- a/arch/arm/mach-spear/time.c
+++ b/arch/arm/mach-spear/time.c
@@ -204,7 +204,7 @@ static void __init spear_clockevent_init(int irq)
setup_irq(irq, &spear_timer_irq);
}
-static const struct of_device_id const timer_of_match[] __initconst = {
+static const struct of_device_id timer_of_match[] __initconst = {
{ .compatible = "st,spear-timer", },
{ },
};
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c742dfd2967b..bd83c531828a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2311,7 +2311,14 @@ int arm_iommu_attach_device(struct device *dev,
}
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
-static void __arm_iommu_detach_device(struct device *dev)
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
{
struct dma_iommu_mapping *mapping;
@@ -2324,22 +2331,10 @@ static void __arm_iommu_detach_device(struct device *dev)
iommu_detach_device(mapping->domain, dev);
kref_put(&mapping->kref, release_iommu_mapping);
to_dma_iommu_mapping(dev) = NULL;
+ set_dma_ops(dev, NULL);
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
}
-
-/**
- * arm_iommu_detach_device
- * @dev: valid struct device pointer
- *
- * Detaches the provided device from a previously attached map.
- * This voids the dma operations (dma_map_ops pointer)
- */
-void arm_iommu_detach_device(struct device *dev)
-{
- __arm_iommu_detach_device(dev);
- set_dma_ops(dev, NULL);
-}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@@ -2379,7 +2374,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
if (!mapping)
return;
- __arm_iommu_detach_device(dev);
+ arm_iommu_detach_device(dev);
arm_iommu_release_mapping(mapping);
}
@@ -2430,9 +2425,13 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
dev->dma_ops = xen_dma_ops;
}
#endif
+ dev->archdata.dma_ops_setup = true;
}
void arch_teardown_dma_ops(struct device *dev)
{
+ if (!dev->archdata.dma_ops_setup)
+ return;
+
arm_teardown_iommu_dma_ops(dev);
}
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 2239fde10b80..f0701d8d24df 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -90,7 +90,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -141,7 +141,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 31af3cb59a60..e46a6a446cdd 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1218,15 +1218,15 @@ void __init adjust_lowmem_bounds(void)
high_memory = __va(arm_lowmem_limit - 1) + 1;
+ if (!memblock_limit)
+ memblock_limit = arm_lowmem_limit;
+
/*
* Round the memblock limit down to a pmd size. This
* helps to ensure that we will allocate memory from the
* last full pmd, which should be mapped.
*/
- if (memblock_limit)
- memblock_limit = round_down(memblock_limit, PMD_SIZE);
- if (!memblock_limit)
- memblock_limit = arm_lowmem_limit;
+ memblock_limit = round_down(memblock_limit, PMD_SIZE);
if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
if (memblock_end_of_DRAM() > arm_lowmem_limit) {
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3dcd7ec69bca..95c7ed392003 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -205,7 +205,7 @@ config GENERIC_CALIBRATE_DELAY
config ZONE_DMA
def_bool y
-config HAVE_GENERIC_RCU_GUP
+config HAVE_GENERIC_GUP
def_bool y
config ARCH_DMA_ADDR_T_64BIT
@@ -1084,10 +1084,6 @@ config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
-config KEYS_COMPAT
- def_bool y
- depends on COMPAT && KEYS
-
endmenu
menu "Power management options"
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 4afcffcb46cb..73272f43ca01 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -106,8 +106,13 @@ config ARCH_MVEBU
select ARMADA_AP806_SYSCON
select ARMADA_CP110_SYSCON
select ARMADA_37XX_CLK
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
select MVEBU_ODMI
select MVEBU_PIC
+ select OF_GPIO
+ select PINCTRL
+ select PINCTRL_ARMADA_37XX
help
This enables support for Marvell EBU familly, including:
- Armada 3700 SoC Family
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 7dedf2d8494e..f839ecd919f9 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -102,12 +102,12 @@ libs-y := arch/arm64/lib/ $(libs-y)
core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
# Default target when executing plain make
-KBUILD_IMAGE := Image.gz
+boot := arch/arm64/boot
+KBUILD_IMAGE := $(boot)/Image.gz
KBUILD_DTBS := dtbs
-all: $(KBUILD_IMAGE) $(KBUILD_DTBS)
+all: Image.gz $(KBUILD_DTBS)
-boot := arch/arm64/boot
Image: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index c7f669f5884f..166c9ef884dc 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -406,8 +406,9 @@
r_ccu: clock@1f01400 {
compatible = "allwinner,sun50i-a64-r-ccu";
reg = <0x01f01400 0x100>;
- clocks = <&osc24M>, <&osc32k>, <&iosc>;
- clock-names = "hosc", "losc", "iosc";
+ clocks = <&osc24M>, <&osc32k>, <&iosc>,
+ <&ccu 11>;
+ clock-names = "hosc", "losc", "iosc", "pll-periph";
#clock-cells = <1>;
#reset-cells = <1>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
index 4d314a253fd9..732e2e06f503 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
@@ -40,7 +40,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "sunxi-h3-h5.dtsi"
+#include <arm/sunxi-h3-h5.dtsi>
/ {
cpus {
diff --git a/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi b/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi
deleted file mode 120000
index 036f01dc2b9b..000000000000
--- a/arch/arm64/boot/dts/allwinner/sunxi-h3-h5.dtsi
+++ /dev/null
@@ -1 +0,0 @@
-../../../../arm/boot/dts/sunxi-h3-h5.dtsi \ No newline at end of file
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index 9b4ba7169210..49f6a6242cf9 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -81,6 +81,45 @@
};
};
+ reg_sys_5v: regulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "SYS_5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ reg_vdd_3v3: regulator@1 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ vin-supply = <&reg_sys_5v>;
+ };
+
+ reg_5v_hub: regulator@2 {
+ compatible = "regulator-fixed";
+ regulator-name = "5V_HUB";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ gpio = <&gpio0 7 0>;
+ regulator-always-on;
+ vin-supply = <&reg_sys_5v>;
+ };
+
+ wl1835_pwrseq: wl1835-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ /* WLAN_EN GPIO */
+ reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+ clocks = <&pmic>;
+ clock-names = "ext_clock";
+ power-off-delay-us = <10>;
+ };
+
soc {
spi0: spi@f7106000 {
status = "ok";
@@ -256,11 +295,31 @@
/* GPIO blocks 16 thru 19 do not appear to be routed to pins */
+ dwmmc_0: dwmmc0@f723d000 {
+ cap-mmc-highspeed;
+ non-removable;
+ bus-width = <0x8>;
+ vmmc-supply = <&ldo19>;
+ };
+
+ dwmmc_1: dwmmc1@f723e000 {
+ card-detect-delay = <200>;
+ cap-sd-highspeed;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ vqmmc-supply = <&ldo7>;
+ vmmc-supply = <&ldo10>;
+ bus-width = <0x4>;
+ disable-wp;
+ cd-gpios = <&gpio1 0 1>;
+ };
+
dwmmc_2: dwmmc2@f723f000 {
- ti,non-removable;
+ bus-width = <0x4>;
non-removable;
- /* WL_EN */
- vmmc-supply = <&wlan_en_reg>;
+ vmmc-supply = <&reg_vdd_3v3>;
+ mmc-pwrseq = <&wl1835_pwrseq>;
#address-cells = <0x1>;
#size-cells = <0x0>;
@@ -272,18 +331,6 @@
interrupts = <3 IRQ_TYPE_EDGE_RISING>;
};
};
-
- wlan_en_reg: regulator@1 {
- compatible = "regulator-fixed";
- regulator-name = "wlan-en-regulator";
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- /* WLAN_EN GPIO */
- gpio = <&gpio0 5 0>;
- /* WLAN card specific delay */
- startup-delay-us = <70000>;
- enable-active-high;
- };
};
leds {
@@ -330,6 +377,7 @@
pmic: pmic@f8000000 {
compatible = "hisilicon,hi655x-pmic";
reg = <0x0 0xf8000000 0x0 0x1000>;
+ #clock-cells = <0>;
interrupt-controller;
#interrupt-cells = <2>;
pmic-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
@@ -411,6 +459,13 @@
};
};
};
+
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+ };
};
&uart2 {
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 1e5129b19280..5013e4b2ea71 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -725,20 +725,10 @@
status = "disabled";
};
- fixed_5v_hub: regulator@0 {
- compatible = "regulator-fixed";
- regulator-name = "fixed_5v_hub";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- regulator-boot-on;
- gpio = <&gpio0 7 0>;
- regulator-always-on;
- };
-
usb_phy: usbphy {
compatible = "hisilicon,hi6220-usb-phy";
#phy-cells = <0>;
- phy-supply = <&fixed_5v_hub>;
+ phy-supply = <&reg_5v_hub>;
hisilicon,peripheral-syscon = <&sys_ctrl>;
};
@@ -766,17 +756,12 @@
dwmmc_0: dwmmc0@f723d000 {
compatible = "hisilicon,hi6220-dw-mshc";
- num-slots = <0x1>;
- cap-mmc-highspeed;
- non-removable;
reg = <0x0 0xf723d000 0x0 0x1000>;
interrupts = <0x0 0x48 0x4>;
clocks = <&sys_ctrl 2>, <&sys_ctrl 1>;
clock-names = "ciu", "biu";
resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>;
reset-names = "reset";
- bus-width = <0x8>;
- vmmc-supply = <&ldo19>;
pinctrl-names = "default";
pinctrl-0 = <&emmc_pmx_func &emmc_clk_cfg_func
&emmc_cfg_func &emmc_rst_cfg_func>;
@@ -784,13 +769,7 @@
dwmmc_1: dwmmc1@f723e000 {
compatible = "hisilicon,hi6220-dw-mshc";
- num-slots = <0x1>;
- card-detect-delay = <200>;
hisilicon,peripheral-syscon = <&ao_ctrl>;
- cap-sd-highspeed;
- sd-uhs-sdr12;
- sd-uhs-sdr25;
- sd-uhs-sdr50;
reg = <0x0 0xf723e000 0x0 0x1000>;
interrupts = <0x0 0x49 0x4>;
#address-cells = <0x1>;
@@ -799,11 +778,6 @@
clock-names = "ciu", "biu";
resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>;
reset-names = "reset";
- vqmmc-supply = <&ldo7>;
- vmmc-supply = <&ldo10>;
- bus-width = <0x4>;
- disable-wp;
- cd-gpios = <&gpio1 0 1>;
pinctrl-names = "default", "idle";
pinctrl-0 = <&sd_pmx_func &sd_clk_cfg_func &sd_cfg_func>;
pinctrl-1 = <&sd_pmx_idle &sd_clk_cfg_idle &sd_cfg_idle>;
@@ -811,15 +785,12 @@
dwmmc_2: dwmmc2@f723f000 {
compatible = "hisilicon,hi6220-dw-mshc";
- num-slots = <0x1>;
reg = <0x0 0xf723f000 0x0 0x1000>;
interrupts = <0x0 0x4a 0x4>;
clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>;
clock-names = "ciu", "biu";
resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>;
reset-names = "reset";
- bus-width = <0x4>;
- broken-cd;
pinctrl-names = "default", "idle";
pinctrl-0 = <&sdio_pmx_func &sdio_clk_cfg_func &sdio_cfg_func>;
pinctrl-1 = <&sdio_pmx_idle &sdio_clk_cfg_idle &sdio_cfg_idle>;
diff --git a/arch/arm64/boot/dts/include/arm b/arch/arm64/boot/dts/include/arm
deleted file mode 120000
index cf63d80e2b93..000000000000
--- a/arch/arm64/boot/dts/include/arm
+++ /dev/null
@@ -1 +0,0 @@
-../../../../arm/boot/dts \ No newline at end of file
diff --git a/arch/arm64/boot/dts/include/arm64 b/arch/arm64/boot/dts/include/arm64
deleted file mode 120000
index a96aa0ea9d8c..000000000000
--- a/arch/arm64/boot/dts/include/arm64
+++ /dev/null
@@ -1 +0,0 @@
-.. \ No newline at end of file
diff --git a/arch/arm64/boot/dts/include/dt-bindings b/arch/arm64/boot/dts/include/dt-bindings
deleted file mode 120000
index 08c00e4972fa..000000000000
--- a/arch/arm64/boot/dts/include/dt-bindings
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
index cef5f976bc0f..a89855f57091 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
@@ -79,6 +79,8 @@
};
&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
status = "okay";
gpio_exp: pca9555@22 {
@@ -113,6 +115,8 @@
&spi0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi_quad_pins>;
m25p80@0 {
compatible = "jedec,spi-nor";
@@ -143,6 +147,8 @@
/* Exported on the micro USB connector CON32 through an FTDI */
&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart1_pins>;
status = "okay";
};
@@ -184,6 +190,8 @@
};
&eth0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>;
phy-mode = "rgmii-id";
phy = <&phy0>;
status = "okay";
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 58ae9e095af2..4d495ec39202 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -161,16 +161,83 @@
#clock-cells = <1>;
};
- gpio1: gpio@13800 {
- compatible = "marvell,mvebu-gpio-3700",
+ pinctrl_nb: pinctrl@13800 {
+ compatible = "marvell,armada3710-nb-pinctrl",
"syscon", "simple-mfd";
- reg = <0x13800 0x500>;
+ reg = <0x13800 0x100>, <0x13C00 0x20>;
+ gpionb: gpio {
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_nb 0 0 36>;
+ gpio-controller;
+ interrupts =
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+
+ };
xtalclk: xtal-clk {
compatible = "marvell,armada-3700-xtal-clock";
clock-output-names = "xtal";
#clock-cells = <0>;
};
+
+ spi_quad_pins: spi-quad-pins {
+ groups = "spi_quad";
+ function = "spi";
+ };
+
+ i2c1_pins: i2c1-pins {
+ groups = "i2c1";
+ function = "i2c";
+ };
+
+ i2c2_pins: i2c2-pins {
+ groups = "i2c2";
+ function = "i2c";
+ };
+
+ uart1_pins: uart1-pins {
+ groups = "uart1";
+ function = "uart";
+ };
+
+ uart2_pins: uart2-pins {
+ groups = "uart2";
+ function = "uart";
+ };
+ };
+
+ pinctrl_sb: pinctrl@18800 {
+ compatible = "marvell,armada3710-sb-pinctrl",
+ "syscon", "simple-mfd";
+ reg = <0x18800 0x100>, <0x18C00 0x20>;
+ gpiosb: gpio {
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl_sb 0 0 29>;
+ gpio-controller;
+ interrupts =
+ <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ rgmii_pins: mii-pins {
+ groups = "rgmii";
+ function = "mii";
+ };
+
};
eth0: ethernet@30000 {
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index ac8df5201cd6..b4bc42ece754 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -231,8 +231,7 @@
cpm_crypto: crypto@800000 {
compatible = "inside-secure,safexcel-eip197";
reg = <0x800000 0x200000>;
- interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
- | IRQ_TYPE_LEVEL_HIGH)>,
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index 7740a75a8230..6e2058847ddc 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -221,8 +221,7 @@
cps_crypto: crypto@800000 {
compatible = "inside-secure,safexcel-eip197";
reg = <0x800000 0x200000>;
- interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
- | IRQ_TYPE_LEVEL_HIGH)>,
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index 0ecaad4333a7..1c3634fa94bf 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -134,6 +134,9 @@
bus-width = <8>;
max-frequency = <50000000>;
cap-mmc-highspeed;
+ mediatek,hs200-cmd-int-delay=<26>;
+ mediatek,hs400-cmd-int-delay=<14>;
+ mediatek,hs400-cmd-resp-sel-rising;
vmmc-supply = <&mt6397_vemc_3v3_reg>;
vqmmc-supply = <&mt6397_vio18_reg>;
non-removable;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
index 658bb9dc9dfd..7bd31066399b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
@@ -44,7 +44,7 @@
/dts-v1/;
#include "rk3399-gru.dtsi"
-#include <include/dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/input/linux-event-codes.h>
/*
* Kevin-specific things
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index ce072859e3b2..97c123e09e45 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -30,7 +30,6 @@ CONFIG_PROFILING=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_SUNXI=y
CONFIG_ARCH_ALPINE=y
@@ -62,16 +61,16 @@ CONFIG_ARCH_XGENE=y
CONFIG_ARCH_ZX=y
CONFIG_ARCH_ZYNQMP=y
CONFIG_PCI=y
-CONFIG_PCI_MSI=y
CONFIG_PCI_IOV=y
-CONFIG_PCI_AARDVARK=y
-CONFIG_PCIE_RCAR=y
-CONFIG_PCI_HOST_GENERIC=y
-CONFIG_PCI_XGENE=y
CONFIG_PCI_LAYERSCAPE=y
CONFIG_PCI_HISI=y
CONFIG_PCIE_QCOM=y
CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=m
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
CONFIG_ARM64_VA_BITS_48=y
CONFIG_SCHED_MC=y
CONFIG_NUMA=y
@@ -80,12 +79,11 @@ CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA=y
CONFIG_SECCOMP=y
-CONFIG_XEN=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
-CONFIG_CPU_IDLE=y
CONFIG_HIBERNATION=y
CONFIG_ARM_CPUIDLE=y
CONFIG_CPU_FREQ=y
@@ -155,8 +153,8 @@ CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_VIRTIO_BLK=y
-CONFIG_EEPROM_AT25=m
CONFIG_SRAM=y
+CONFIG_EEPROM_AT25=m
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_SAS_ATA=y
@@ -168,8 +166,8 @@ CONFIG_AHCI_CEVA=y
CONFIG_AHCI_MVEBU=y
CONFIG_AHCI_XGENE=y
CONFIG_AHCI_QORIQ=y
-CONFIG_SATA_RCAR=y
CONFIG_SATA_SIL24=y
+CONFIG_SATA_RCAR=y
CONFIG_PATA_PLATFORM=y
CONFIG_PATA_OF_PLATFORM=y
CONFIG_NETDEVICES=y
@@ -186,18 +184,17 @@ CONFIG_HNS_ENET=y
CONFIG_E1000E=y
CONFIG_IGB=y
CONFIG_IGBVF=y
-CONFIG_MVPP2=y
CONFIG_MVNETA=y
+CONFIG_MVPP2=y
CONFIG_SKY2=y
CONFIG_RAVB=y
CONFIG_SMC91X=y
CONFIG_SMSC911X=y
CONFIG_STMMAC_ETH=m
-CONFIG_REALTEK_PHY=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
CONFIG_MESON_GXL_PHY=m
CONFIG_MICREL_PHY=y
-CONFIG_MDIO_BUS_MUX=y
-CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_REALTEK_PHY=m
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
CONFIG_USB_RTL8152=m
@@ -212,6 +209,8 @@ CONFIG_BRCMFMAC=m
CONFIG_WL18XX=m
CONFIG_WLCORE_SDIO=m
CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PM8941_PWRKEY=y
@@ -230,14 +229,14 @@ CONFIG_SERIAL_8250_UNIPHIER=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
CONFIG_SERIAL_SAMSUNG=y
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
CONFIG_SERIAL_TEGRA=y
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_SH_SCI_NR_UARTS=11
CONFIG_SERIAL_SH_SCI_CONSOLE=y
-CONFIG_SERIAL_MESON=y
-CONFIG_SERIAL_MESON_CONSOLE=y
CONFIG_SERIAL_MSM=y
CONFIG_SERIAL_MSM_CONSOLE=y
CONFIG_SERIAL_XILINX_PS_UART=y
@@ -261,14 +260,15 @@ CONFIG_I2C_UNIPHIER_F=y
CONFIG_I2C_RCAR=y
CONFIG_I2C_CROS_EC_TUNNEL=y
CONFIG_SPI=y
-CONFIG_SPI_MESON_SPIFC=m
CONFIG_SPI_BCM2835=m
CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_MESON_SPIFC=m
CONFIG_SPI_ORION=y
CONFIG_SPI_PL022=y
CONFIG_SPI_QUP=y
-CONFIG_SPI_SPIDEV=m
+CONFIG_SPI_ROCKCHIP=y
CONFIG_SPI_S3C64XX=y
+CONFIG_SPI_SPIDEV=m
CONFIG_SPMI=y
CONFIG_PINCTRL_SINGLE=y
CONFIG_PINCTRL_MAX77620=y
@@ -286,33 +286,33 @@ CONFIG_GPIO_PCA953X=y
CONFIG_GPIO_PCA953X_IRQ=y
CONFIG_GPIO_MAX77620=y
CONFIG_POWER_RESET_MSM=y
-CONFIG_BATTERY_BQ27XXX=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCPI=y
CONFIG_SENSORS_LM90=m
CONFIG_SENSORS_INA2XX=m
-CONFIG_SENSORS_ARM_SCPI=y
-CONFIG_THERMAL=y
-CONFIG_THERMAL_EMULATION=y
CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
CONFIG_CPU_THERMAL=y
-CONFIG_BCM2835_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
CONFIG_EXYNOS_THERMAL=y
+CONFIG_ROCKCHIP_THERMAL=m
CONFIG_WATCHDOG=y
-CONFIG_BCM2835_WDT=y
-CONFIG_RENESAS_WDT=y
CONFIG_S3C2410_WATCHDOG=y
CONFIG_MESON_GXBB_WATCHDOG=m
CONFIG_MESON_WATCHDOG=m
+CONFIG_RENESAS_WDT=y
+CONFIG_BCM2835_WDT=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
CONFIG_MFD_EXYNOS_LPASS=m
+CONFIG_MFD_HI655X_PMIC=y
CONFIG_MFD_MAX77620=y
-CONFIG_MFD_RK808=y
CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
CONFIG_MFD_SEC_CORE=y
-CONFIG_MFD_HI655X_PMIC=y
-CONFIG_REGULATOR=y
-CONFIG_MFD_CROS_EC=y
-CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_GPIO=y
CONFIG_REGULATOR_HI655X=y
@@ -345,13 +345,12 @@ CONFIG_DRM_EXYNOS_DSI=y
CONFIG_DRM_EXYNOS_HDMI=y
CONFIG_DRM_EXYNOS_MIC=y
CONFIG_DRM_RCAR_DU=m
-CONFIG_DRM_RCAR_HDMI=y
CONFIG_DRM_RCAR_LVDS=y
CONFIG_DRM_RCAR_VSP=y
CONFIG_DRM_TEGRA=m
-CONFIG_DRM_VC4=m
CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_VC4=m
CONFIG_DRM_HISI_KIRIN=m
CONFIG_DRM_MESON=m
CONFIG_FB=y
@@ -366,39 +365,37 @@ CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
CONFIG_SND_BCM2835_SOC_I2S=m
-CONFIG_SND_SOC_RCAR=y
CONFIG_SND_SOC_SAMSUNG=y
+CONFIG_SND_SOC_RCAR=y
CONFIG_SND_SOC_AK4613=y
CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
-CONFIG_USB_XHCI_PLATFORM=y
-CONFIG_USB_XHCI_RCAR=y
-CONFIG_USB_EHCI_EXYNOS=y
CONFIG_USB_XHCI_TEGRA=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_MSM=y
+CONFIG_USB_EHCI_EXYNOS=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_OHCI_EXYNOS=y
CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_RENESAS_USBHS=m
CONFIG_USB_STORAGE=y
-CONFIG_USB_DWC2=y
CONFIG_USB_DWC3=y
+CONFIG_USB_DWC2=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_CHIPIDEA_HOST=y
CONFIG_USB_ISP1760=y
CONFIG_USB_HSIC_USB3503=y
CONFIG_USB_MSM_OTG=y
+CONFIG_USB_QCOM_8X16_PHY=y
CONFIG_USB_ULPI=y
CONFIG_USB_GADGET=y
CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_MMC=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_ARMMMCI=y
-CONFIG_MMC_MESON_GX=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_ACPI=y
CONFIG_MMC_SDHCI_PLTFM=y
@@ -406,6 +403,7 @@ CONFIG_MMC_SDHCI_OF_ARASAN=y
CONFIG_MMC_SDHCI_OF_ESDHC=y
CONFIG_MMC_SDHCI_CADENCE=y
CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_MESON_GX=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SPI=y
CONFIG_MMC_SDHI=y
@@ -414,32 +412,31 @@ CONFIG_MMC_DW_EXYNOS=y
CONFIG_MMC_DW_K3=y
CONFIG_MMC_DW_ROCKCHIP=y
CONFIG_MMC_SUNXI=y
-CONFIG_MMC_SDHCI_XENON=y
CONFIG_MMC_BCM2835=y
+CONFIG_MMC_SDHCI_XENON=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_PWM=y
CONFIG_LEDS_SYSCON=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=m
CONFIG_RTC_DRV_S5M=y
CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_S3C=y
CONFIG_RTC_DRV_PL031=y
CONFIG_RTC_DRV_SUN6I=y
-CONFIG_RTC_DRV_RK808=m
CONFIG_RTC_DRV_TEGRA=y
CONFIG_RTC_DRV_XGENE=y
-CONFIG_RTC_DRV_S3C=y
CONFIG_DMADEVICES=y
+CONFIG_DMA_BCM2835=m
CONFIG_MV_XOR_V2=y
CONFIG_PL330_DMA=y
-CONFIG_DMA_BCM2835=m
CONFIG_TEGRA20_APB_DMA=y
CONFIG_QCOM_BAM_DMA=y
CONFIG_QCOM_HIDMA_MGMT=y
@@ -452,52 +449,56 @@ CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_MMIO=y
CONFIG_XEN_GNTDEV=y
CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_COMMON_CLK_RK808=y
CONFIG_COMMON_CLK_SCPI=y
CONFIG_COMMON_CLK_CS2000_CP=y
CONFIG_COMMON_CLK_S2MPS11=y
-CONFIG_COMMON_CLK_PWM=y
-CONFIG_COMMON_CLK_RK808=y
CONFIG_CLK_QORIQ=y
+CONFIG_COMMON_CLK_PWM=y
CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_MSM_GCC_8916=y
CONFIG_MSM_GCC_8994=y
CONFIG_MSM_MMCC_8996=y
CONFIG_HWSPINLOCK_QCOM=y
-CONFIG_MAILBOX=y
CONFIG_ARM_MHU=y
CONFIG_PLATFORM_MHU=y
CONFIG_BCM2835_MBOX=y
CONFIG_HI6220_MBOX=y
CONFIG_ARM_SMMU=y
CONFIG_ARM_SMMU_V3=y
+CONFIG_RPMSG_QCOM_SMD=y
CONFIG_RASPBERRYPI_POWER=y
CONFIG_QCOM_SMEM=y
-CONFIG_QCOM_SMD=y
CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
CONFIG_ROCKCHIP_PM_DOMAINS=y
CONFIG_ARCH_TEGRA_132_SOC=y
CONFIG_ARCH_TEGRA_210_SOC=y
CONFIG_ARCH_TEGRA_186_SOC=y
CONFIG_EXTCON_USB_GPIO=y
+CONFIG_IIO=y
+CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
CONFIG_PWM=y
CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_MESON=m
CONFIG_PWM_ROCKCHIP=y
+CONFIG_PWM_SAMSUNG=y
CONFIG_PWM_TEGRA=m
-CONFIG_PWM_MESON=m
-CONFIG_COMMON_RESET_HI6220=y
CONFIG_PHY_RCAR_GEN3_USB2=y
CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_SUN4I_USB=y
CONFIG_PHY_ROCKCHIP_INNO_USB2=y
CONFIG_PHY_ROCKCHIP_EMMC=y
-CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
CONFIG_PHY_XGENE=y
CONFIG_PHY_TEGRA_XUSB=y
CONFIG_ARM_SCPI_PROTOCOL=y
-CONFIG_ACPI=y
-CONFIG_IIO=y
-CONFIG_EXYNOS_ADC=y
-CONFIG_PWM_SAMSUNG=y
CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_ACPI=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -511,7 +512,6 @@ CONFIG_FUSE_FS=m
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=y
CONFIG_EFIVAR_FS=y
@@ -539,11 +539,9 @@ CONFIG_MEMTEST=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_ANSI_CPRNG=y
-CONFIG_CRYPTO_DEV_SAFEXCEL=m
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
CONFIG_CRYPTO_GHASH_ARM64_CE=y
CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
-# CONFIG_CRYPTO_AES_ARM64_NEON_BLK is not set
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 0e99978da3f0..59cca1d6ec54 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -23,9 +23,9 @@
#define ACPI_MADT_GICC_LENGTH \
(acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
-#define BAD_MADT_GICC_ENTRY(entry, end) \
- (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
- (entry)->header.length != ACPI_MADT_GICC_LENGTH)
+#define BAD_MADT_GICC_ENTRY(entry, end) \
+ (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
+ (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index df411f3e083c..ecd9788cd298 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -62,4 +62,13 @@ alternative_if ARM64_ALT_PAN_NOT_UAO
alternative_else_nop_endif
.endm
+/*
+ * Remove the address tag from a virtual address, if present.
+ */
+ .macro clear_address_tag, dst, addr
+ tst \addr, #(1 << 55)
+ bic \dst, \addr, #(0xff << 56)
+ csel \dst, \dst, \addr, eq
+ .endm
+
#endif
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index f819fdcff1ac..f5a2d09afb38 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -264,7 +264,6 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
" st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
" cbnz %w[tmp], 1b\n" \
" " #mb "\n" \
- " mov %" #w "[oldval], %" #w "[old]\n" \
"2:" \
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
[v] "+Q" (*(unsigned long *)ptr) \
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 7457ce082b5f..99fa69c9c3cf 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -322,7 +322,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
{ \
- register long x0 asm ("w0") = i; \
+ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
@@ -394,7 +394,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
{ \
- register long x0 asm ("w0") = i; \
+ register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 4e0497f581a0..0fe7e43b7fbc 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -42,25 +42,35 @@
#define __smp_rmb() dmb(ishld)
#define __smp_wmb() dmb(ishst)
-#define __smp_store_release(p, v) \
+#define __smp_store_release(p, v) \
do { \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(*p)) (v) }; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
asm volatile ("stlrb %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u8 *)__u.__c) \
+ : "memory"); \
break; \
case 2: \
asm volatile ("stlrh %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u16 *)__u.__c) \
+ : "memory"); \
break; \
case 4: \
asm volatile ("stlr %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u32 *)__u.__c) \
+ : "memory"); \
break; \
case 8: \
asm volatile ("stlr %1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u64 *)__u.__c) \
+ : "memory"); \
break; \
} \
} while (0)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 91b26d26af8a..ae852add053d 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -46,7 +46,7 @@ static inline unsigned long __xchg_case_##name(unsigned long x, \
" swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
__nops(3) \
" " #nop_lse) \
- : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
: "r" (x) \
: cl); \
\
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index e7f84a7b4465..428ee1f2468c 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -115,6 +115,7 @@ struct arm64_cpu_capabilities {
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
+extern struct static_key_false arm64_const_caps_ready;
bool this_cpu_has_cap(unsigned int cap);
@@ -124,7 +125,7 @@ static inline bool cpu_have_feature(unsigned int num)
}
/* System capability check for constant caps */
-static inline bool cpus_have_const_cap(int num)
+static inline bool __cpus_have_const_cap(int num)
{
if (num >= ARM64_NCAPS)
return false;
@@ -138,6 +139,14 @@ static inline bool cpus_have_cap(unsigned int num)
return test_bit(num, cpu_hwcaps);
}
+static inline bool cpus_have_const_cap(int num)
+{
+ if (static_branch_likely(&arm64_const_caps_ready))
+ return __cpus_have_const_cap(num);
+ else
+ return cpus_have_cap(num);
+}
+
static inline void cpus_set_cap(unsigned int num)
{
if (num >= ARM64_NCAPS) {
@@ -145,7 +154,6 @@ static inline void cpus_set_cap(unsigned int num)
num, ARM64_NCAPS);
} else {
__set_bit(num, cpu_hwcaps);
- static_branch_enable(&cpu_hwcap_keys[num]);
}
}
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index f5ea0ba70f07..fe39e6841326 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -240,6 +240,12 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
}
+static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
+{
+ u32 esr = kvm_vcpu_get_hsr(vcpu);
+ return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+}
+
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 5e19165c5fa8..1f252a95bc02 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
+#include <asm/cpufeature.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
@@ -355,9 +356,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long vector_ptr)
{
/*
- * Call initialization code, and switch to the full blown
- * HYP code.
+ * Call initialization code, and switch to the full blown HYP code.
+ * If the cpucaps haven't been finalized yet, something has gone very
+ * wrong, and hyp will crash and burn when it uses any
+ * cpus_have_const_cap() wrapper.
*/
+ BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
}
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 15c142ce991c..b4d13d9267ff 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -286,6 +286,10 @@
#define SCTLR_ELx_A (1 << 1)
#define SCTLR_ELx_M 1
+#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
+ (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
+ (1 << 28) | (1 << 29))
+
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
SCTLR_ELx_SA | SCTLR_ELx_I)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index ba497172610d..7b8a04789cef 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -69,20 +69,21 @@ static inline void set_fs(mm_segment_t fs)
*/
#define __range_ok(addr, size) \
({ \
+ unsigned long __addr = (unsigned long __force)(addr); \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
: "=&r" (flag), "=&r" (roksum) \
- : "1" (addr), "Ir" (size), \
+ : "1" (__addr), "Ir" (size), \
"r" (current_thread_info()->addr_limit) \
: "cc"); \
flag; \
})
/*
- * When dealing with data aborts or instruction traps we may end up with
- * a tagged userland pointer. Clear the tag to get a sane pointer to pass
- * on to access_ok(), for instance.
+ * When dealing with data aborts, watchpoints, or instruction traps we may end
+ * up with a tagged userland pointer. Clear the tag to get a sane pointer to
+ * pass on to access_ok(), for instance.
*/
#define untagged_addr(addr) sign_extend64(addr, 55)
@@ -230,7 +231,7 @@ do { \
(err), ARM64_HAS_UAO); \
break; \
case 8: \
- __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
+ __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
(err), ARM64_HAS_UAO); \
break; \
default: \
@@ -297,7 +298,7 @@ do { \
(err), ARM64_HAS_UAO); \
break; \
case 8: \
- __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
+ __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
(err), ARM64_HAS_UAO); \
break; \
default: \
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index 825b0fe51c2b..13a97aa2285f 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -2,21 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += fcntl.h
-header-y += hwcap.h
-header-y += kvm_para.h
-header-y += perf_regs.h
-header-y += param.h
-header-y += ptrace.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += stat.h
-header-y += statfs.h
-header-y += ucontext.h
-header-y += unistd.h
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 869ee480deed..70eea2ecc663 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -216,13 +216,17 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
+#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
(0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
#define VGIC_LEVEL_INFO_LINE_LEVEL 0
-#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
+#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
+#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
+#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
+#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 657977e77ec8..f0e6d717885b 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -306,7 +306,8 @@ do { \
_ASM_EXTABLE(0b, 4b) \
_ASM_EXTABLE(1b, 4b) \
: "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
- : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \
+ : "r" ((unsigned long)addr), "i" (-EAGAIN), \
+ "i" (-EFAULT), \
"i" (__SWP_LL_SC_LOOPS) \
: "memory"); \
uaccess_disable(); \
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 94b8f7fc3310..817ce3365e20 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -985,8 +985,16 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
*/
void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{
- for (; caps->matches; caps++)
- if (caps->enable && cpus_have_cap(caps->capability))
+ for (; caps->matches; caps++) {
+ unsigned int num = caps->capability;
+
+ if (!cpus_have_cap(num))
+ continue;
+
+ /* Ensure cpus_have_const_cap(num) works */
+ static_branch_enable(&cpu_hwcap_keys[num]);
+
+ if (caps->enable) {
/*
* Use stop_machine() as it schedules the work allowing
* us to modify PSTATE, instead of on_each_cpu() which
@@ -994,6 +1002,8 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
* we return.
*/
stop_machine(caps->enable, NULL, cpu_online_mask);
+ }
+ }
}
/*
@@ -1096,6 +1106,14 @@ static void __init setup_feature_capabilities(void)
enable_cpu_capabilities(arm64_features);
}
+DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
+EXPORT_SYMBOL(arm64_const_caps_ready);
+
+static void __init mark_const_caps_ready(void)
+{
+ static_branch_enable(&arm64_const_caps_ready);
+}
+
/*
* Check if the current CPU has a given feature capability.
* Should be called from non-preemptible context.
@@ -1131,6 +1149,7 @@ void __init setup_cpu_features(void)
/* Set the CPU feature capabilies */
setup_feature_capabilities();
enable_errata_workarounds();
+ mark_const_caps_ready();
setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0())
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 5d17f377d905..82cd07592519 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -11,7 +11,6 @@
*
*/
-#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/init.h>
@@ -117,20 +116,6 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm,
set_permissions, md);
}
-static int __init arm64_dmi_init(void)
-{
- /*
- * On arm64, DMI depends on UEFI, and dmi_scan_machine() needs to
- * be called early because dmi_id_init(), which is an arch_initcall
- * itself, depends on dmi_scan_machine() having been called already.
- */
- dmi_scan_machine();
- if (dmi_available)
- dmi_set_dump_stack_arch_desc();
- return 0;
-}
-core_initcall(arm64_dmi_init);
-
/*
* UpdateCapsule() depends on the system being shutdown via
* ResetSystem().
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 43512d4d7df2..b738880350f9 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -428,12 +428,13 @@ el1_da:
/*
* Data abort handling
*/
- mrs x0, far_el1
+ mrs x3, far_el1
enable_dbg
// re-enable interrupts if they were enabled in the aborted context
tbnz x23, #7, 1f // PSR_I_BIT
enable_irq
1:
+ clear_address_tag x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort
@@ -594,7 +595,7 @@ el0_da:
// enable interrupts before calling the main handler
enable_dbg_and_irq
ct_user_exit
- bic x0, x26, #(0xff << 56)
+ clear_address_tag x0, x26
mov x1, x25
mov x2, sp
bl do_mem_abort
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 0296e7924240..749f81779420 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -36,6 +36,7 @@
#include <asm/traps.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
+#include <asm/uaccess.h>
/* Breakpoint currently in use for each BRP. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
@@ -721,6 +722,8 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
u64 wp_low, wp_high;
u32 lens, lene;
+ addr = untagged_addr(addr);
+
lens = __ffs(ctrl->len);
lene = __fls(ctrl->len);
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index c9a2ab446dc6..f035ff6fb223 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -32,11 +32,16 @@
void *module_alloc(unsigned long size)
{
+ gfp_t gfp_mask = GFP_KERNEL;
void *p;
+ /* Silence the initial allocation */
+ if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
+ gfp_mask |= __GFP_NOWARN;
+
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
module_alloc_base + MODULES_VSIZE,
- GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+ gfp_mask, PAGE_KERNEL_EXEC, 0,
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 4f0e3ebfea4b..c7e3e6387a49 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -191,8 +191,10 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
return NULL;
root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
- if (!root_ops)
+ if (!root_ops) {
+ kfree(ri);
return NULL;
+ }
ri->cfg = pci_acpi_setup_ecam_mapping(root);
if (!ri->cfg) {
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index bcc79471b38e..83a1b1ad189f 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -877,15 +877,24 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
if (attr->exclude_idle)
return -EPERM;
- if (is_kernel_in_hyp_mode() &&
- attr->exclude_kernel != attr->exclude_hv)
- return -EINVAL;
+
+ /*
+ * If we're running in hyp mode, then we *are* the hypervisor.
+ * Therefore we ignore exclude_hv in this configuration, since
+ * there's no hypervisor to sample anyway. This is consistent
+ * with other architectures (x86 and Power).
+ */
+ if (is_kernel_in_hyp_mode()) {
+ if (!attr->exclude_kernel)
+ config_base |= ARMV8_PMU_INCLUDE_EL2;
+ } else {
+ if (attr->exclude_kernel)
+ config_base |= ARMV8_PMU_EXCLUDE_EL1;
+ if (!attr->exclude_hv)
+ config_base |= ARMV8_PMU_INCLUDE_EL2;
+ }
if (attr->exclude_user)
config_base |= ARMV8_PMU_EXCLUDE_EL0;
- if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
- config_base |= ARMV8_PMU_EXCLUDE_EL1;
- if (!attr->exclude_hv)
- config_base |= ARMV8_PMU_INCLUDE_EL2;
/*
* Install the filter into config_base as this is used to
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 6e0e16a3a7d4..321119881abf 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -961,8 +961,7 @@ void smp_send_stop(void)
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
- if (system_state == SYSTEM_BOOTING ||
- system_state == SYSTEM_RUNNING)
+ if (system_state <= SYSTEM_RUNNING)
pr_crit("SMP: stopping secondary CPUs\n");
smp_cross_call(&mask, IPI_CPU_STOP);
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index d4d6ae02cd55..0805b44f986a 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -443,7 +443,7 @@ int cpu_enable_cache_maint_trap(void *__unused)
}
#define __user_cache_maint(insn, address, res) \
- if (untagged_addr(address) >= user_addr_max()) { \
+ if (address >= user_addr_max()) { \
res = -EFAULT; \
} else { \
uaccess_ttbr0_enable(); \
@@ -469,7 +469,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
int ret = 0;
- address = pt_regs_read_reg(regs, rt);
+ address = untagged_addr(pt_regs_read_reg(regs, rt));
switch (crm) {
case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 41b6e31f8f55..d0cb007fa482 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -221,10 +221,11 @@ void update_vsyscall(struct timekeeper *tk)
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->raw_time_sec = tk->raw_time.tv_sec;
- vdso_data->raw_time_nsec = tk->raw_time.tv_nsec;
+ vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec <<
+ tk->tkr_raw.shift) +
+ tk->tkr_raw.xtime_nsec;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
- /* tkr_raw.xtime_nsec == 0 */
vdso_data->cs_mono_mult = tk->tkr_mono.mult;
vdso_data->cs_raw_mult = tk->tkr_raw.mult;
/* tkr_mono.shift == tkr_raw.shift */
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index e00b4671bd7c..76320e920965 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -256,7 +256,6 @@ monotonic_raw:
seqcnt_check fail=monotonic_raw
/* All computations are done with left-shifted nsecs. */
- lsl x14, x14, x12
get_nsec_per_sec res=x9
lsl x9, x9, x12
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index afd51bebb9c5..5d9810086c25 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -7,14 +7,13 @@ CFLAGS_arm.o := -I.
CFLAGS_mmu.o := -I.
KVM=../../../virt/kvm
-ARM=../../../arch/arm/kvm
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp/
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
-kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
-kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 839425c24b1c..3f9615582377 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -106,10 +106,13 @@ __do_hyp_init:
tlbi alle2
dsb sy
- mrs x4, sctlr_el2
- and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2
- ldr x5, =SCTLR_ELx_FLAGS
- orr x4, x4, x5
+ /*
+ * Preserve all the RES1 bits while setting the default flags,
+ * as well as the EE bit on BE. Drop the A flag since the compiler
+ * is allowed to generate unaligned accesses.
+ */
+ ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
+CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
msr sctlr_el2, x4
isb
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index aaf42ae8d8c3..14c4e3b14bcb 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -2,6 +2,8 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
+ccflags-y += -fno-stack-protector
+
KVM=../../../../virt/kvm
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index efbe9e8e7a78..0fe27024a2e1 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1529,8 +1529,8 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
- int Rt = (hsr >> 5) & 0xf;
- int Rt2 = (hsr >> 10) & 0xf;
+ int Rt = kvm_vcpu_sys_get_rt(vcpu);
+ int Rt2 = (hsr >> 10) & 0x1f;
params.is_aarch32 = true;
params.is_32bit = false;
@@ -1586,7 +1586,7 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
- int Rt = (hsr >> 5) & 0xf;
+ int Rt = kvm_vcpu_sys_get_rt(vcpu);
params.is_aarch32 = true;
params.is_32bit = true;
@@ -1688,7 +1688,7 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
- int Rt = (esr >> 5) & 0x1f;
+ int Rt = kvm_vcpu_sys_get_rt(vcpu);
int ret;
trace_kvm_handle_sys_reg(esr);
diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c
index 79f37e37d367..6260b69e5622 100644
--- a/arch/arm64/kvm/vgic-sys-reg-v3.c
+++ b/arch/arm64/kvm/vgic-sys-reg-v3.c
@@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
* The vgic_set_vmcr() will convert to ICH_VMCR layout.
*/
- vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK;
- vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK;
+ vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
+ vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
vgic_set_vmcr(vcpu, &vmcr);
} else {
val = 0;
@@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
* Extract it directly using ICC_CTLR_EL1 reg definitions.
*/
- val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK;
- val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK;
+ val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
+ val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
p->regval = val;
}
@@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
p->regval = 0;
vgic_get_vmcr(vcpu, &vmcr);
- if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) {
+ if (!vmcr.cbpr) {
if (p->is_write) {
vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
ICC_BPR1_EL1_SHIFT;
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index c6e53580aefe..c870d6f01ac2 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -36,6 +36,7 @@ int bpf_jit_enable __read_mostly;
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
+#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
/* Map BPF registers to A64 registers */
static const int bpf2a64[] = {
@@ -57,6 +58,7 @@ static const int bpf2a64[] = {
/* temporary registers for internal BPF JIT */
[TMP_REG_1] = A64_R(10),
[TMP_REG_2] = A64_R(11),
+ [TMP_REG_3] = A64_R(12),
/* tail_call_cnt */
[TCALL_CNT] = A64_R(26),
/* temporary register for blinding constants */
@@ -253,8 +255,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
*/
off = offsetof(struct bpf_array, ptrs);
emit_a64_mov_i64(tmp, off, ctx);
- emit(A64_LDR64(tmp, r2, tmp), ctx);
- emit(A64_LDR64(prg, tmp, r3), ctx);
+ emit(A64_ADD(1, tmp, r2, tmp), ctx);
+ emit(A64_LSL(1, prg, r3, 3), ctx);
+ emit(A64_LDR64(prg, tmp, prg), ctx);
emit(A64_CBZ(1, prg, jmp_offset), ctx);
/* goto *(prog->bpf_func + prologue_size); */
@@ -318,6 +321,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
+ const u8 tmp3 = bpf2a64[TMP_REG_3];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
@@ -688,10 +692,10 @@ emit_cond_jmp:
emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
emit(A64_LDXR(isdw, tmp2, tmp), ctx);
emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
- emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx);
+ emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
jmp_offset = -3;
check_imm19(jmp_offset);
- emit(A64_CBNZ(0, tmp2, jmp_offset), ctx);
+ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
break;
/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h
index 85d4af97c986..dbdbb8a558df 100644
--- a/arch/blackfin/include/asm/processor.h
+++ b/arch/blackfin/include/asm/processor.h
@@ -75,11 +75,6 @@ static inline void release_thread(struct task_struct *dead_task)
{
}
-/*
- * Return saved PC of a blocked thread.
- */
-#define thread_saved_pc(tsk) (tsk->thread.pc)
-
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) \
diff --git a/arch/blackfin/include/uapi/asm/Kbuild b/arch/blackfin/include/uapi/asm/Kbuild
index 0bd28f77abc3..b15bf6bc0e94 100644
--- a/arch/blackfin/include/uapi/asm/Kbuild
+++ b/arch/blackfin/include/uapi/asm/Kbuild
@@ -1,19 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += bfin_sport.h
-header-y += byteorder.h
-header-y += cachectl.h
-header-y += fcntl.h
-header-y += fixed_code.h
-header-y += ioctls.h
-header-y += kvm_para.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += stat.h
-header-y += swab.h
-header-y += unistd.h
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h
index b9eb3da7f278..7c87b5be53b5 100644
--- a/arch/c6x/include/asm/processor.h
+++ b/arch/c6x/include/asm/processor.h
@@ -96,11 +96,6 @@ static inline void release_thread(struct task_struct *dead_task)
#define release_segments(mm) do { } while (0)
/*
- * saved PC of a blocked thread.
- */
-#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
-
-/*
* saved kernel SP and DP of a blocked thread.
*/
#ifdef _BIG_ENDIAN
diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild
index e9bc2b2b8147..13a97aa2285f 100644
--- a/arch/c6x/include/uapi/asm/Kbuild
+++ b/arch/c6x/include/uapi/asm/Kbuild
@@ -2,11 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
-
-header-y += byteorder.h
-header-y += kvm_para.h
-header-y += ptrace.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += swab.h
-header-y += unistd.h
diff --git a/arch/cris/arch-v10/kernel/process.c b/arch/cris/arch-v10/kernel/process.c
index e299d30105b5..a2cdb1521aca 100644
--- a/arch/cris/arch-v10/kernel/process.c
+++ b/arch/cris/arch-v10/kernel/process.c
@@ -69,14 +69,6 @@ void hard_reset_now (void)
while(1) /* waiting for RETRIBUTION! */ ;
}
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *t)
-{
- return task_pt_regs(t)->irp;
-}
-
/* setup the child's kernel stack with a pt_regs and switch_stack on it.
* it will be un-nested during _resume and _ret_from_sys_call when the
* new thread is scheduled.
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 2735eb7671a5..b7cd6b9209a9 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -136,7 +136,6 @@ config ETRAX_NANDFLASH
bool "NAND flash support"
depends on ETRAX_ARCH_V32
select MTD_NAND
- select MTD_NAND_IDS
help
This option enables MTD mapping of NAND flash devices. Needed to use
NAND flash memories. If unsure, say Y.
diff --git a/arch/cris/arch-v32/kernel/process.c b/arch/cris/arch-v32/kernel/process.c
index c530a8fa87ce..fe87b383fbf3 100644
--- a/arch/cris/arch-v32/kernel/process.c
+++ b/arch/cris/arch-v32/kernel/process.c
@@ -85,14 +85,6 @@ hard_reset_now(void)
}
/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *t)
-{
- return task_pt_regs(t)->erp;
-}
-
-/*
* Setup the child's kernel stack with a pt_regs and call switch_stack() on it.
* It will be unnested during _resume and _ret_from_sys_call when the new thread
* is scheduled.
diff --git a/arch/cris/boot/dts/include/dt-bindings b/arch/cris/boot/dts/include/dt-bindings
deleted file mode 120000
index 08c00e4972fa..000000000000
--- a/arch/cris/boot/dts/include/dt-bindings
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/cris/include/arch-v10/arch/Kbuild b/arch/cris/include/arch-v10/arch/Kbuild
deleted file mode 100644
index 1f0fc7a66f5f..000000000000
--- a/arch/cris/include/arch-v10/arch/Kbuild
+++ /dev/null
@@ -1 +0,0 @@
-# CRISv10 arch
diff --git a/arch/cris/include/arch-v32/arch/Kbuild b/arch/cris/include/arch-v32/arch/Kbuild
deleted file mode 100644
index 2fd65c7e15c9..000000000000
--- a/arch/cris/include/arch-v32/arch/Kbuild
+++ /dev/null
@@ -1 +0,0 @@
-# CRISv32 arch
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h
index 15b815df29c1..bc2729e4b2c9 100644
--- a/arch/cris/include/asm/processor.h
+++ b/arch/cris/include/asm/processor.h
@@ -52,8 +52,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
/* Free all resources held by a thread. */
static inline void release_thread(struct task_struct *dead_task)
{
diff --git a/arch/cris/include/uapi/arch-v10/arch/Kbuild b/arch/cris/include/uapi/arch-v10/arch/Kbuild
deleted file mode 100644
index 9048c87a782b..000000000000
--- a/arch/cris/include/uapi/arch-v10/arch/Kbuild
+++ /dev/null
@@ -1,5 +0,0 @@
-# UAPI Header export list
-header-y += sv_addr.agh
-header-y += sv_addr_ag.h
-header-y += svinto.h
-header-y += user.h
diff --git a/arch/cris/include/uapi/arch-v32/arch/Kbuild b/arch/cris/include/uapi/arch-v32/arch/Kbuild
deleted file mode 100644
index 59efffd16b61..000000000000
--- a/arch/cris/include/uapi/arch-v32/arch/Kbuild
+++ /dev/null
@@ -1,3 +0,0 @@
-# UAPI Header export list
-header-y += cryptocop.h
-header-y += user.h
diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild
index d5564a0ae66a..b15bf6bc0e94 100644
--- a/arch/cris/include/uapi/asm/Kbuild
+++ b/arch/cris/include/uapi/asm/Kbuild
@@ -1,44 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += ../arch-v10/arch/
-header-y += ../arch-v32/arch/
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += elf.h
-header-y += elf_v10.h
-header-y += elf_v32.h
-header-y += errno.h
-header-y += ethernet.h
-header-y += etraxgpio.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += ptrace_v10.h
-header-y += ptrace_v32.h
-header-y += resource.h
-header-y += rs485.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += sync_serial.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h
index ddaeb9cc9143..e4d08d74ed9f 100644
--- a/arch/frv/include/asm/processor.h
+++ b/arch/frv/include/asm/processor.h
@@ -96,11 +96,6 @@ extern asmlinkage void *restore_user_regs(const struct user_context *target, ...
#define release_segments(mm) do { } while (0)
#define forget_segments() do { } while (0)
-/*
- * Return saved PC of a blocked thread.
- */
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc)
diff --git a/arch/frv/include/asm/timex.h b/arch/frv/include/asm/timex.h
index a89bddefdacf..139093fab326 100644
--- a/arch/frv/include/asm/timex.h
+++ b/arch/frv/include/asm/timex.h
@@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void)
#define vxtime_lock() do {} while (0)
#define vxtime_unlock() do {} while (0)
+/* This attribute is used in include/linux/jiffies.h alongside with
+ * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
+ * for frv does not contain another section specification.
+ */
+#define __jiffy_arch_data __attribute__((__section__(".data")))
+
#endif
diff --git a/arch/frv/include/uapi/asm/Kbuild b/arch/frv/include/uapi/asm/Kbuild
index 42a2b33461c0..b15bf6bc0e94 100644
--- a/arch/frv/include/uapi/asm/Kbuild
+++ b/arch/frv/include/uapi/asm/Kbuild
@@ -1,35 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += registers.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/frv/kernel/asm-offsets.c b/arch/frv/kernel/asm-offsets.c
index 8414293f213a..20c5b79b55f9 100644
--- a/arch/frv/kernel/asm-offsets.c
+++ b/arch/frv/kernel/asm-offsets.c
@@ -14,21 +14,10 @@
#include <asm/thread_info.h>
#include <asm/gdb-stub.h>
-#define DEF_PTREG(sym, reg) \
- asm volatile("\n->" #sym " %0 offsetof(struct pt_regs, " #reg ")" \
- : : "i" (offsetof(struct pt_regs, reg)))
-
-#define DEF_IREG(sym, reg) \
- asm volatile("\n->" #sym " %0 offsetof(struct user_context, " #reg ")" \
- : : "i" (offsetof(struct user_context, reg)))
-
-#define DEF_FREG(sym, reg) \
- asm volatile("\n->" #sym " %0 offsetof(struct user_context, " #reg ")" \
- : : "i" (offsetof(struct user_context, reg)))
-
-#define DEF_0REG(sym, reg) \
- asm volatile("\n->" #sym " %0 offsetof(struct frv_frame0, " #reg ")" \
- : : "i" (offsetof(struct frv_frame0, reg)))
+#define DEF_PTREG(sym, reg) OFFSET(sym, pt_regs, reg)
+#define DEF_IREG(sym, reg) OFFSET(sym, user_context, reg)
+#define DEF_FREG(sym, reg) OFFSET(sym, user_context, reg)
+#define DEF_0REG(sym, reg) OFFSET(sym, frv_frame0, reg)
void foo(void)
{
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index 5a4c92abc99e..a957b374e3a6 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -198,15 +198,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- /* Check whether the thread is blocked in resume() */
- if (in_sched_functions(tsk->thread.pc))
- return ((unsigned long *)tsk->thread.fp)[2];
- else
- return tsk->thread.pc;
-}
-
int elf_check_arch(const struct elf32_hdr *hdr)
{
unsigned long hsr0 = __get_HSR(0);
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
index da82c25301e7..46aa289c5102 100644
--- a/arch/frv/mm/elf-fdpic.c
+++ b/arch/frv/mm/elf-fdpic.c
@@ -75,7 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
addr = PAGE_ALIGN(addr);
vma = find_vma(current->mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
goto success;
}
diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h
index 65132d7ae9e5..afa53147e66a 100644
--- a/arch/h8300/include/asm/processor.h
+++ b/arch/h8300/include/asm/processor.h
@@ -110,10 +110,6 @@ static inline void release_thread(struct task_struct *dead_task)
{
}
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk);
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) \
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
index fb6101a5d4f1..b15bf6bc0e94 100644
--- a/arch/h8300/include/uapi/asm/Kbuild
+++ b/arch/h8300/include/uapi/asm/Kbuild
@@ -1,30 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += siginfo.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/h8300/include/asm/bitsperlong.h b/arch/h8300/include/uapi/asm/bitsperlong.h
index e140e46729ac..34212608371e 100644
--- a/arch/h8300/include/asm/bitsperlong.h
+++ b/arch/h8300/include/uapi/asm/bitsperlong.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_H8300_BITS_PER_LONG
-#define __ASM_H8300_BITS_PER_LONG
+#ifndef _UAPI__ASM_H8300_BITS_PER_LONG
+#define _UAPI__ASM_H8300_BITS_PER_LONG
#include <asm-generic/bitsperlong.h>
@@ -11,4 +11,4 @@ typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
#endif
-#endif /* __ASM_H8300_BITS_PER_LONG */
+#endif /* _UAPI__ASM_H8300_BITS_PER_LONG */
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index 0f5db5bb561b..d1ddcabbbe83 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -129,11 +129,6 @@ int copy_thread(unsigned long clone_flags,
return 0;
}
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- return ((struct pt_regs *)tsk->thread.esp0)->pc;
-}
-
unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, pc;
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index a2036bfda8af..6b45ef79eb8f 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -1,6 +1,3 @@
-
-header-y += ucontext.h
-
generic-y += auxvec.h
generic-y += barrier.h
generic-y += bug.h
diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h
index 45a825402f63..ce67940860a5 100644
--- a/arch/hexagon/include/asm/processor.h
+++ b/arch/hexagon/include/asm/processor.h
@@ -33,9 +33,6 @@
/* task_struct, defined elsewhere, is the "process descriptor" */
struct task_struct;
-/* this is defined in arch/process.c */
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
/*
diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild
index c31706c38631..b15bf6bc0e94 100644
--- a/arch/hexagon/include/uapi/asm/Kbuild
+++ b/arch/hexagon/include/uapi/asm/Kbuild
@@ -1,15 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += kvm_para.h
-header-y += param.h
-header-y += ptrace.h
-header-y += registers.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += signal.h
-header-y += swab.h
-header-y += unistd.h
-header-y += user.h
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index de715bab7956..656050c2e6a0 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -61,14 +61,6 @@ void arch_cpu_idle(void)
}
/*
- * Return saved PC of a blocked thread
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- return 0;
-}
-
-/*
* Copy architecture-specific thread state
*/
int copy_thread(unsigned long clone_flags, unsigned long usp,
diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c
index ec90afdb3ad0..c599eb126c9e 100644
--- a/arch/hexagon/mm/uaccess.c
+++ b/arch/hexagon/mm/uaccess.c
@@ -37,15 +37,14 @@ __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
long uncleared;
while (count > PAGE_SIZE) {
- uncleared = __copy_to_user_hexagon(dest, &empty_zero_page,
- PAGE_SIZE);
+ uncleared = raw_copy_to_user(dest, &empty_zero_page, PAGE_SIZE);
if (uncleared)
return count - (PAGE_SIZE - uncleared);
count -= PAGE_SIZE;
dest += PAGE_SIZE;
}
if (count)
- count = __copy_to_user_hexagon(dest, &empty_zero_page, count);
+ count = raw_copy_to_user(dest, &empty_zero_page, count);
return count;
}
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 26a63d69c599..ab982f07ea68 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -602,23 +602,6 @@ ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
}
/*
- * Return saved PC of a blocked thread.
- * Note that the only way T can block is through a call to schedule() -> switch_to().
- */
-static inline unsigned long
-thread_saved_pc (struct task_struct *t)
-{
- struct unw_frame_info info;
- unsigned long ip;
-
- unw_init_from_blocked_task(&info, t);
- if (unw_unwind(&info) < 0)
- return 0;
- unw_get_ip(&info, &ip);
- return ip;
-}
-
-/*
* Get the current instruction/program counter value.
*/
#define current_text_addr() \
diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild
index 891002bbb995..13a97aa2285f 100644
--- a/arch/ia64/include/uapi/asm/Kbuild
+++ b/arch/ia64/include/uapi/asm/Kbuild
@@ -2,48 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += kvm_para.h
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += break.h
-header-y += byteorder.h
-header-y += cmpxchg.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += fpu.h
-header-y += gcc_intrin.h
-header-y += ia64regs.h
-header-y += intel_intrin.h
-header-y += intrinsics.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += perfmon.h
-header-y += perfmon_default_smpl.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += ptrace_offsets.h
-header-y += resource.h
-header-y += rse.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += ucontext.h
-header-y += unistd.h
-header-y += ustack.h
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 3686d6abafde..9edda5466020 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -50,32 +50,10 @@ CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
# The gate DSO image is built using a special linker script.
include $(src)/Makefile.gate
-# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
-define sed-y
- "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
-endef
-quiet_cmd_nr_irqs = GEN $@
-define cmd_nr_irqs
- (set -e; \
- echo "#ifndef __ASM_NR_IRQS_H__"; \
- echo "#define __ASM_NR_IRQS_H__"; \
- echo "/*"; \
- echo " * DO NOT MODIFY."; \
- echo " *"; \
- echo " * This file was generated by Kbuild"; \
- echo " *"; \
- echo " */"; \
- echo ""; \
- sed -ne $(sed-y) $<; \
- echo ""; \
- echo "#endif" ) > $@
-endef
-
# We use internal kbuild rules to avoid the "is up to date" message from make
arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c
$(Q)mkdir -p $(dir $@)
$(call if_changed_dep,cc_s_c)
-include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
- $(Q)mkdir -p $(dir $@)
- $(call cmd,nr_irqs)
+include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s FORCE
+ $(call filechk,offsets,__ASM_NR_IRQS_H__)
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate
index ceeffc509764..a32903ada016 100644
--- a/arch/ia64/kernel/Makefile.gate
+++ b/arch/ia64/kernel/Makefile.gate
@@ -6,7 +6,7 @@ extra-y += gate.so gate-syms.o gate.lds gate.o
CPPFLAGS_gate.lds := -P -C -U$(ARCH)
-quiet_cmd_gate = GATE $@
+quiet_cmd_gate = GATE $@
cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h
index 5767367550c6..657874eeeccc 100644
--- a/arch/m32r/include/asm/processor.h
+++ b/arch/m32r/include/asm/processor.h
@@ -122,8 +122,6 @@ extern void release_thread(struct task_struct *);
extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
extern void release_segments(struct mm_struct * mm);
-extern unsigned long thread_saved_pc(struct task_struct *);
-
/* Copy and release all segment info associated with a VM */
#define copy_segments(p, mm) do { } while (0)
#define release_segments(mm) do { } while (0)
diff --git a/arch/m32r/include/uapi/asm/Kbuild b/arch/m32r/include/uapi/asm/Kbuild
index 43937a61d6cf..b15bf6bc0e94 100644
--- a/arch/m32r/include/uapi/asm/Kbuild
+++ b/arch/m32r/include/uapi/asm/Kbuild
@@ -1,33 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index d8ffcfec599c..8cd7e03f4370 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -39,14 +39,6 @@
#include <linux/err.h>
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- return tsk->thread.lr;
-}
-
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 531cb9eb3319..ddff1164aff0 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -26,6 +26,8 @@ CONFIG_SUN_PARTITION=y
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -349,7 +351,6 @@ CONFIG_SCSI_A4000T=y
CONFIG_SCSI_ZORRO7XX=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@@ -361,6 +362,7 @@ CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
@@ -414,6 +416,7 @@ CONFIG_ZORRO8390=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -572,6 +575,8 @@ CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
@@ -590,6 +595,7 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index ca91d39555da..17384dc959a5 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -27,6 +27,8 @@ CONFIG_SUN_PARTITION=y
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -331,7 +333,6 @@ CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@@ -343,6 +344,7 @@ CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
@@ -388,6 +390,7 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -531,6 +534,8 @@ CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
@@ -549,6 +554,7 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 23a3d8a691e2..53a641d62f85 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -26,6 +26,8 @@ CONFIG_SUN_PARTITION=y
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -340,7 +342,6 @@ CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_ATARI_SCSI=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@@ -352,6 +353,7 @@ CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
@@ -399,6 +401,7 @@ CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -552,6 +555,8 @@ CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
@@ -570,6 +575,7 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 95deb95140fe..3925ae3a5eb3 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -26,6 +26,8 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
@@ -330,7 +332,6 @@ CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_BVME6000_SCSI=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@@ -342,6 +343,7 @@ CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
@@ -387,6 +389,7 @@ CONFIG_BVME6000_NET=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -523,6 +526,8 @@ CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
@@ -541,6 +546,7 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index afae6958db2d..f4a134b390b4 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -27,6 +27,8 @@ CONFIG_SUN_PARTITION=y
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -331,7 +333,6 @@ CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@@ -343,6 +344,7 @@ CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
@@ -389,6 +391,7 @@ CONFIG_HPLANCE=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -533,6 +536,8 @@ CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
@@ -551,6 +556,7 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index b010734729a7..9ed0cef632b7 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -26,6 +26,8 @@ CONFIG_SUN_PARTITION=y
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -340,7 +342,6 @@ CONFIG_MAC_SCSI=y
CONFIG_SCSI_MAC_ESP=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@@ -352,6 +353,7 @@ CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
@@ -408,6 +410,7 @@ CONFIG_MAC8390=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -555,6 +558,8 @@ CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
@@ -573,6 +578,7 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 0e414549b235..efed0d48fd53 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -22,6 +22,8 @@ CONFIG_UNIXWARE_DISKLABEL=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -373,7 +375,6 @@ CONFIG_BVME6000_SCSI=y
CONFIG_SUN3X_ESP=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@@ -385,6 +386,7 @@ CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
@@ -454,6 +456,7 @@ CONFIG_SMC91X=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PLIP=m
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -635,6 +638,8 @@ CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
@@ -653,6 +658,7 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index b2e687a0ec3d..9040457c7f9c 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -26,6 +26,8 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68030=y
@@ -329,7 +331,6 @@ CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MVME147_SCSI=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@@ -341,6 +342,7 @@ CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
@@ -387,6 +389,7 @@ CONFIG_MVME147_NET=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -523,6 +526,8 @@ CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
@@ -541,6 +546,7 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index cbd8ee24d1bc..8b17f00e0484 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -26,6 +26,8 @@ CONFIG_SUN_PARTITION=y
# CONFIG_EFI_PARTITION is not set
CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
@@ -330,7 +332,6 @@ CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MVME16x_SCSI=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@@ -342,6 +343,7 @@ CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
@@ -387,6 +389,7 @@ CONFIG_MVME16x_NET=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -523,6 +526,8 @@ CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
@@ -541,6 +546,7 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 1e82cc944339..5f3718c62c85 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -27,6 +27,8 @@ CONFIG_SUN_PARTITION=y
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68040=y
@@ -336,7 +338,6 @@ CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@@ -348,6 +349,7 @@ CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
@@ -398,6 +400,7 @@ CONFIG_NE2000=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PLIP=m
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -546,6 +549,8 @@ CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
@@ -564,6 +569,7 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index f9e77f57a972..8c979a68fca5 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -26,6 +26,8 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_SUN3=y
@@ -327,7 +329,6 @@ CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_SUN3_SCSI=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@@ -339,6 +340,7 @@ CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
@@ -385,6 +387,7 @@ CONFIG_SUN3_82586=y
# CONFIG_NET_VENDOR_SUN is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -525,6 +528,8 @@ CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
@@ -542,6 +547,7 @@ CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 3c394fcfb368..a1e79530e806 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -26,6 +26,8 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_SYSV68_PARTITION=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_SUN3X=y
@@ -327,7 +329,6 @@ CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_SUN3X_ESP=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
@@ -339,6 +340,7 @@ CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_UEVENT=y
CONFIG_DM_LOG_WRITES=m
+CONFIG_DM_INTEGRITY=m
CONFIG_TARGET_CORE=m
CONFIG_TCM_IBLOCK=m
CONFIG_TCM_FILEIO=m
@@ -385,6 +387,7 @@ CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
@@ -525,6 +528,8 @@ CONFIG_DLM=m
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
CONFIG_TEST_HEXDUMP=m
@@ -543,6 +548,7 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
+CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index 77239e81379b..94c36030440c 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -130,8 +130,6 @@ static inline void release_thread(struct task_struct *dead_task)
{
}
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) \
diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
index 8c8ce5e1ee0e..3bc64d02ba5f 100644
--- a/arch/m68k/include/asm/signal.h
+++ b/arch/m68k/include/asm/signal.h
@@ -62,9 +62,4 @@ static inline int __gen_sigismember(sigset_t *set, int _sig)
#endif /* !CONFIG_CPU_HAS_NO_BITFIELDS */
-#ifndef __uClinux__
-extern void ptrace_signal_deliver(void);
-#define ptrace_signal_deliver ptrace_signal_deliver
-#endif /* __uClinux__ */
-
#endif /* _M68K_SIGNAL_H */
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index 6a2d257bdfb2..64368077235a 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -9,27 +9,3 @@ generic-y += socket.h
generic-y += sockios.h
generic-y += termbits.h
generic-y += termios.h
-
-header-y += a.out.h
-header-y += bootinfo.h
-header-y += bootinfo-amiga.h
-header-y += bootinfo-apollo.h
-header-y += bootinfo-atari.h
-header-y += bootinfo-hp300.h
-header-y += bootinfo-mac.h
-header-y += bootinfo-q40.h
-header-y += bootinfo-vme.h
-header-y += byteorder.h
-header-y += cachectl.h
-header-y += fcntl.h
-header-y += ioctls.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += signal.h
-header-y += stat.h
-header-y += swab.h
-header-y += unistd.h
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index e475c945c8b2..7df92f8b0781 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -40,20 +40,6 @@
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
-
-/*
- * Return saved PC from a blocked thread
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
- /* Check whether the thread is blocked in resume() */
- if (in_sched_functions(sw->retpc))
- return ((unsigned long *)sw->a6)[1];
- else
- return sw->retpc;
-}
-
void arch_cpu_idle(void)
{
#if defined(MACH_ATARI_ONLY)
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 6f945bb5ffbd..e79421f5b9cd 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -109,22 +109,6 @@ int fixup_exception(struct pt_regs *regs)
return 1;
}
-void ptrace_signal_deliver(void)
-{
- struct pt_regs *regs = signal_pt_regs();
- if (regs->orig_d0 < 0)
- return;
- switch (regs->d0) {
- case -ERESTARTNOHAND:
- case -ERESTARTSYS:
- case -ERESTARTNOINTR:
- regs->d0 = regs->orig_d0;
- regs->orig_d0 = -1;
- regs->pc -= 2;
- break;
- }
-}
-
static inline void push_cache (unsigned long vaddr)
{
/*
diff --git a/arch/metag/boot/dts/include/dt-bindings b/arch/metag/boot/dts/include/dt-bindings
deleted file mode 120000
index 08c00e4972fa..000000000000
--- a/arch/metag/boot/dts/include/dt-bindings
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 5ebc2850690e..9c8fbf8fb5aa 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -24,24 +24,32 @@
#define segment_eq(a, b) ((a).seg == (b).seg)
-#define __kernel_ok (uaccess_kernel())
-/*
- * Explicitly allow NULL pointers here. Parts of the kernel such
- * as readv/writev use access_ok to validate pointers, but want
- * to allow NULL pointers for various reasons. NULL pointers are
- * safe to allow through because the first page is not mappable on
- * Meta.
- *
- * We also wish to avoid letting user code access the system area
- * and the kernel half of the address space.
- */
-#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \
- ((addr) > PAGE_OFFSET && \
- (addr) < LINCORE_BASE))
-
static inline int __access_ok(unsigned long addr, unsigned long size)
{
- return __kernel_ok || !__user_bad(addr, size);
+ /*
+ * Allow access to the user mapped memory area, but not the system area
+ * before it. The check extends to the top of the address space when
+ * kernel access is allowed (there's no real reason to user copy to the
+ * system area in any case).
+ */
+ if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg &&
+ size <= get_fs().seg - addr))
+ return true;
+ /*
+ * Explicitly allow NULL pointers here. Parts of the kernel such
+ * as readv/writev use access_ok to validate pointers, but want
+ * to allow NULL pointers for various reasons. NULL pointers are
+ * safe to allow through because the first page is not mappable on
+ * Meta.
+ */
+ if (!addr)
+ return true;
+ /* Allow access to core code memory area... */
+ if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT &&
+ size <= LINCORE_CODE_LIMIT + 1 - addr)
+ return true;
+ /* ... but no other areas. */
+ return false;
}
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \
@@ -113,7 +121,8 @@ extern long __get_user_bad(void);
#define __get_user_nocheck(x, ptr, size) \
({ \
- long __gu_err, __gu_val; \
+ long __gu_err; \
+ long long __gu_val; \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -121,7 +130,8 @@ extern long __get_user_bad(void);
#define __get_user_check(x, ptr, size) \
({ \
- long __gu_err = -EFAULT, __gu_val = 0; \
+ long __gu_err = -EFAULT; \
+ long long __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
if (access_ok(VERIFY_READ, __gu_addr, size)) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
@@ -132,6 +142,7 @@ extern long __get_user_bad(void);
extern unsigned char __get_user_asm_b(const void __user *addr, long *err);
extern unsigned short __get_user_asm_w(const void __user *addr, long *err);
extern unsigned int __get_user_asm_d(const void __user *addr, long *err);
+extern unsigned long long __get_user_asm_l(const void __user *addr, long *err);
#define __get_user_size(x, ptr, size, retval) \
do { \
@@ -143,6 +154,8 @@ do { \
x = __get_user_asm_w(ptr, &retval); break; \
case 4: \
x = __get_user_asm_d(ptr, &retval); break; \
+ case 8: \
+ x = __get_user_asm_l(ptr, &retval); break; \
default: \
(x) = __get_user_bad(); \
} \
@@ -161,8 +174,13 @@ do { \
extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
long count);
-#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count)
-
+static inline long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ if (!access_ok(VERIFY_READ, src, 1))
+ return -EFAULT;
+ return __strncpy_from_user(dst, src, count);
+}
/*
* Return the size of a string (including the ending 0)
*
diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild
index ab78be2b6eb0..b29731ebd7a9 100644
--- a/arch/metag/include/uapi/asm/Kbuild
+++ b/arch/metag/include/uapi/asm/Kbuild
@@ -1,14 +1,6 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += byteorder.h
-header-y += ech.h
-header-y += ptrace.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += swab.h
-header-y += unistd.h
-
generic-y += mman.h
generic-y += resource.h
generic-y += setup.h
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index 232a12bf3f99..2dbbb7c66043 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -567,8 +567,7 @@ static void stop_this_cpu(void *data)
{
unsigned int cpu = smp_processor_id();
- if (system_state == SYSTEM_BOOTING ||
- system_state == SYSTEM_RUNNING) {
+ if (system_state <= SYSTEM_RUNNING) {
spin_lock(&stop_lock);
pr_crit("CPU%u: stopping\n", cpu);
dump_stack();
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index e8a4ea83cabb..c941abdb8f85 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -246,65 +246,47 @@
#define __asm_copy_user_64bit_rapf_loop( \
to, from, ret, n, id, FIXUP) \
asm volatile ( \
- ".balign 8\n" \
- "MOV RAPF, %1\n" \
- "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
- "MOV D0Ar6, #0\n" \
- "LSR D1Ar5, %3, #6\n" \
- "SUB TXRPT, D1Ar5, #2\n" \
- "MOV RAPF, %1\n" \
+ ".balign 8\n" \
+ " MOV RAPF, %1\n" \
+ " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
+ " MOV D0Ar6, #0\n" \
+ " LSR D1Ar5, %3, #6\n" \
+ " SUB TXRPT, D1Ar5, #2\n" \
+ " MOV RAPF, %1\n" \
"$Lloop"id":\n" \
- "ADD RAPF, %1, #64\n" \
- "21:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "22:\n" \
- "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "23:\n" \
- "SUB %3, %3, #32\n" \
- "24:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "25:\n" \
- "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "26:\n" \
- "SUB %3, %3, #32\n" \
- "DCACHE [%1+#-64], D0Ar6\n" \
- "BR $Lloop"id"\n" \
+ " ADD RAPF, %1, #64\n" \
+ "21: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "22: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "23: SUB %3, %3, #32\n" \
+ "24: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "25: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "26: SUB %3, %3, #32\n" \
+ " DCACHE [%1+#-64], D0Ar6\n" \
+ " BR $Lloop"id"\n" \
\
- "MOV RAPF, %1\n" \
- "27:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "28:\n" \
- "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "29:\n" \
- "SUB %3, %3, #32\n" \
- "30:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "31:\n" \
- "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "32:\n" \
- "SUB %0, %0, #8\n" \
- "33:\n" \
- "SETL [%0++], D0.7, D1.7\n" \
- "SUB %3, %3, #32\n" \
- "1:" \
- "DCACHE [%1+#-64], D0Ar6\n" \
- "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
- "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
- "GETL D0.5, D1.5, [A0StP+#-24]\n" \
- "GETL D0.6, D1.6, [A0StP+#-16]\n" \
- "GETL D0.7, D1.7, [A0StP+#-8]\n" \
- "SUB A0StP, A0StP, #40\n" \
+ " MOV RAPF, %1\n" \
+ "27: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "28: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "29: SUB %3, %3, #32\n" \
+ "30: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "31: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "32: SETL [%0+#-8], D0.7, D1.7\n" \
+ " SUB %3, %3, #32\n" \
+ "1: DCACHE [%1+#-64], D0Ar6\n" \
+ " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
+ " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
+ " GETL D0.5, D1.5, [A0StP+#-24]\n" \
+ " GETL D0.6, D1.6, [A0StP+#-16]\n" \
+ " GETL D0.7, D1.7, [A0StP+#-8]\n" \
+ " SUB A0StP, A0StP, #40\n" \
" .section .fixup,\"ax\"\n" \
- "4:\n" \
- " ADD %0, %0, #8\n" \
- "3:\n" \
- " MOV D0Ar2, TXSTATUS\n" \
+ "3: MOV D0Ar2, TXSTATUS\n" \
" MOV D1Ar1, TXSTATUS\n" \
" AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
" MOV TXSTATUS, D1Ar1\n" \
FIXUP \
- " MOVT D0Ar2,#HI(1b)\n" \
- " JUMP D0Ar2,#LO(1b)\n" \
+ " MOVT D0Ar2, #HI(1b)\n" \
+ " JUMP D0Ar2, #LO(1b)\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .long 21b,3b\n" \
@@ -319,7 +301,6 @@
" .long 30b,3b\n" \
" .long 31b,3b\n" \
" .long 32b,3b\n" \
- " .long 33b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
@@ -397,89 +378,59 @@
#define __asm_copy_user_32bit_rapf_loop( \
to, from, ret, n, id, FIXUP) \
asm volatile ( \
- ".balign 8\n" \
- "MOV RAPF, %1\n" \
- "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
- "MOV D0Ar6, #0\n" \
- "LSR D1Ar5, %3, #6\n" \
- "SUB TXRPT, D1Ar5, #2\n" \
- "MOV RAPF, %1\n" \
- "$Lloop"id":\n" \
- "ADD RAPF, %1, #64\n" \
- "21:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "22:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "23:\n" \
- "SUB %3, %3, #16\n" \
- "24:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "25:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "26:\n" \
- "SUB %3, %3, #16\n" \
- "27:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "28:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "29:\n" \
- "SUB %3, %3, #16\n" \
- "30:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "31:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "32:\n" \
- "SUB %3, %3, #16\n" \
- "DCACHE [%1+#-64], D0Ar6\n" \
- "BR $Lloop"id"\n" \
+ ".balign 8\n" \
+ " MOV RAPF, %1\n" \
+ " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
+ " MOV D0Ar6, #0\n" \
+ " LSR D1Ar5, %3, #6\n" \
+ " SUB TXRPT, D1Ar5, #2\n" \
+ " MOV RAPF, %1\n" \
+ "$Lloop"id":\n" \
+ " ADD RAPF, %1, #64\n" \
+ "21: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "22: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "23: SUB %3, %3, #16\n" \
+ "24: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "25: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "26: SUB %3, %3, #16\n" \
+ "27: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "28: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "29: SUB %3, %3, #16\n" \
+ "30: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "31: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "32: SUB %3, %3, #16\n" \
+ " DCACHE [%1+#-64], D0Ar6\n" \
+ " BR $Lloop"id"\n" \
\
- "MOV RAPF, %1\n" \
- "33:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "34:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "35:\n" \
- "SUB %3, %3, #16\n" \
- "36:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "37:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "38:\n" \
- "SUB %3, %3, #16\n" \
- "39:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "40:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "41:\n" \
- "SUB %3, %3, #16\n" \
- "42:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "43:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "44:\n" \
- "SUB %0, %0, #4\n" \
- "45:\n" \
- "SETD [%0++], D0.7\n" \
- "SUB %3, %3, #16\n" \
- "1:" \
- "DCACHE [%1+#-64], D0Ar6\n" \
- "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
- "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
- "GETL D0.5, D1.5, [A0StP+#-24]\n" \
- "GETL D0.6, D1.6, [A0StP+#-16]\n" \
- "GETL D0.7, D1.7, [A0StP+#-8]\n" \
- "SUB A0StP, A0StP, #40\n" \
+ " MOV RAPF, %1\n" \
+ "33: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "34: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "35: SUB %3, %3, #16\n" \
+ "36: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "37: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "38: SUB %3, %3, #16\n" \
+ "39: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "40: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "41: SUB %3, %3, #16\n" \
+ "42: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "43: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "44: SETD [%0+#-4], D0.7\n" \
+ " SUB %3, %3, #16\n" \
+ "1: DCACHE [%1+#-64], D0Ar6\n" \
+ " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
+ " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
+ " GETL D0.5, D1.5, [A0StP+#-24]\n" \
+ " GETL D0.6, D1.6, [A0StP+#-16]\n" \
+ " GETL D0.7, D1.7, [A0StP+#-8]\n" \
+ " SUB A0StP, A0StP, #40\n" \
" .section .fixup,\"ax\"\n" \
- "4:\n" \
- " ADD %0, %0, #4\n" \
- "3:\n" \
- " MOV D0Ar2, TXSTATUS\n" \
+ "3: MOV D0Ar2, TXSTATUS\n" \
" MOV D1Ar1, TXSTATUS\n" \
" AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
" MOV TXSTATUS, D1Ar1\n" \
FIXUP \
- " MOVT D0Ar2,#HI(1b)\n" \
- " JUMP D0Ar2,#LO(1b)\n" \
+ " MOVT D0Ar2, #HI(1b)\n" \
+ " JUMP D0Ar2, #LO(1b)\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .long 21b,3b\n" \
@@ -506,7 +457,6 @@
" .long 42b,3b\n" \
" .long 43b,3b\n" \
" .long 44b,3b\n" \
- " .long 45b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
@@ -1094,6 +1044,30 @@ unsigned int __get_user_asm_d(const void __user *addr, long *err)
}
EXPORT_SYMBOL(__get_user_asm_d);
+unsigned long long __get_user_asm_l(const void __user *addr, long *err)
+{
+ register unsigned long long x asm ("D0Re0") = 0;
+ asm volatile (
+ " GETL %0,%t0,[%2]\n"
+ "1:\n"
+ " GETL %0,%t0,[%2]\n"
+ "2:\n"
+ " .section .fixup,\"ax\"\n"
+ "3: MOV D0FrT,%3\n"
+ " SETD [%1],D0FrT\n"
+ " MOVT D0FrT,#HI(2b)\n"
+ " JUMP D0FrT,#LO(2b)\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ " .long 1b,3b\n"
+ " .previous\n"
+ : "=r" (x)
+ : "r" (err), "r" (addr), "P" (-EFAULT)
+ : "D0FrT");
+ return x;
+}
+EXPORT_SYMBOL(__get_user_asm_l);
+
long __put_user_asm_b(unsigned int x, void __user *addr)
{
register unsigned int err asm ("D0Re0") = 0;
diff --git a/arch/metag/mm/mmu-meta1.c b/arch/metag/mm/mmu-meta1.c
index 91f4255bcb5c..62ebab90924d 100644
--- a/arch/metag/mm/mmu-meta1.c
+++ b/arch/metag/mm/mmu-meta1.c
@@ -152,6 +152,5 @@ void __init mmu_init(unsigned long mem_end)
p_swapper_pg_dir++;
addr += PGDIR_SIZE;
- entry++;
}
}
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index dc5dd5b69fde..92fd4e95b488 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -1,8 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
-CONFIG_AUDIT_LOGINUID_IMMUTABLE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_SYSFS_DEPRECATED=y
@@ -33,10 +31,12 @@ CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
+CONFIG_BRIDGE=m
CONFIG_MTD=y
-CONFIG_PROC_DEVICETREE=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_NETDEVICES=y
@@ -47,9 +47,9 @@ CONFIG_XILINX_LL_TEMAC=y
# CONFIG_VT is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_UARTLITE=y
CONFIG_SERIAL_UARTLITE_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_XILINX_HWICAP=y
CONFIG_I2C=y
@@ -66,7 +66,6 @@ CONFIG_FB=y
CONFIG_FB_XILINX=y
# CONFIG_USB_SUPPORT is not set
CONFIG_UIO=y
-CONFIG_UIO_PDRV=y
CONFIG_UIO_PDRV_GENIRQ=y
CONFIG_UIO_DMEM_GENIRQ=y
CONFIG_EXT2_FS=y
@@ -77,14 +76,13 @@ CONFIG_NFS_FS=y
CONFIG_CIFS=y
CONFIG_CIFS_STATS=y
CONFIG_CIFS_STATS2=y
-CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SLAB=y
+CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_INFO=y
CONFIG_KGDB=y
CONFIG_KGDB_TESTS=y
CONFIG_KGDB_KDB=y
CONFIG_EARLY_PRINTK=y
CONFIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index 4cdaf565e638..06d69a6e192d 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -1,9 +1,6 @@
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
-CONFIG_AUDIT_LOGINUID_IMMUTABLE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
@@ -34,18 +31,15 @@ CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
-# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_RAM=y
CONFIG_MTD_UCLINUX=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_NETDEVICES=y
@@ -56,9 +50,9 @@ CONFIG_XILINX_LL_TEMAC=y
# CONFIG_VT is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_UARTLITE=y
CONFIG_SERIAL_UARTLITE_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_XILINX_HWICAP=y
CONFIG_I2C=y
@@ -74,10 +68,6 @@ CONFIG_XILINX_WATCHDOG=y
CONFIG_FB=y
CONFIG_FB_XILINX=y
# CONFIG_USB_SUPPORT is not set
-CONFIG_UIO=y
-CONFIG_UIO_PDRV=y
-CONFIG_UIO_PDRV_GENIRQ=y
-CONFIG_UIO_DMEM_GENIRQ=y
CONFIG_EXT2_FS=y
# CONFIG_DNOTIFY is not set
CONFIG_CRAMFS=y
@@ -85,10 +75,10 @@ CONFIG_ROMFS_FS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NLS=y
-CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_SLAB=y
+CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_INFO=y
CONFIG_EARLY_PRINTK=y
CONFIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=y
@@ -97,4 +87,3 @@ CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 56830ff65333..83a4ef3a2495 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,14 +1,57 @@
generic-y += barrier.h
+generic-y += bitops.h
+generic-y += bitsperlong.h
+generic-y += bug.h
+generic-y += bugs.h
generic-y += clkdev.h
generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
generic-y += exec.h
generic-y += extable.h
+generic-y += fb.h
+generic-y += fcntl.h
+generic-y += hardirq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
generic-y += irq_work.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kprobes.h
+generic-y += linkage.h
+generic-y += local.h
+generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += percpu.h
+generic-y += poll.h
generic-y += preempt.h
+generic-y += resource.h
+generic-y += sembuf.h
+generic-y += serial.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += signal.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += swab.h
generic-y += syscalls.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
generic-y += trace_clock.h
+generic-y += ucontext.h
+generic-y += vga.h
generic-y += word-at-a-time.h
-generic-y += kprobes.h
+generic-y += xor.h
diff --git a/arch/microblaze/include/asm/bitops.h b/arch/microblaze/include/asm/bitops.h
deleted file mode 100644
index a72468f15c8b..000000000000
--- a/arch/microblaze/include/asm/bitops.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitops.h>
diff --git a/arch/microblaze/include/asm/bug.h b/arch/microblaze/include/asm/bug.h
deleted file mode 100644
index b12fd89e42e9..000000000000
--- a/arch/microblaze/include/asm/bug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bug.h>
diff --git a/arch/microblaze/include/asm/bugs.h b/arch/microblaze/include/asm/bugs.h
deleted file mode 100644
index 61791e1ad9f5..000000000000
--- a/arch/microblaze/include/asm/bugs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bugs.h>
diff --git a/arch/microblaze/include/asm/div64.h b/arch/microblaze/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/arch/microblaze/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/microblaze/include/asm/emergency-restart.h b/arch/microblaze/include/asm/emergency-restart.h
deleted file mode 100644
index 3711bd9d50bd..000000000000
--- a/arch/microblaze/include/asm/emergency-restart.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/emergency-restart.h>
diff --git a/arch/microblaze/include/asm/fb.h b/arch/microblaze/include/asm/fb.h
deleted file mode 100644
index 3a4988e8df45..000000000000
--- a/arch/microblaze/include/asm/fb.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fb.h>
diff --git a/arch/microblaze/include/asm/hardirq.h b/arch/microblaze/include/asm/hardirq.h
deleted file mode 100644
index fb3c05a0cbbf..000000000000
--- a/arch/microblaze/include/asm/hardirq.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/hardirq.h>
diff --git a/arch/microblaze/include/asm/irq_regs.h b/arch/microblaze/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/microblaze/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/microblaze/include/asm/kdebug.h b/arch/microblaze/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b037665..000000000000
--- a/arch/microblaze/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/microblaze/include/asm/kmap_types.h b/arch/microblaze/include/asm/kmap_types.h
deleted file mode 100644
index 25975252d83d..000000000000
--- a/arch/microblaze/include/asm/kmap_types.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_MICROBLAZE_KMAP_TYPES_H
-#define _ASM_MICROBLAZE_KMAP_TYPES_H
-
-#include <asm-generic/kmap_types.h>
-
-#endif /* _ASM_MICROBLAZE_KMAP_TYPES_H */
diff --git a/arch/microblaze/include/asm/linkage.h b/arch/microblaze/include/asm/linkage.h
deleted file mode 100644
index 0540bbaad897..000000000000
--- a/arch/microblaze/include/asm/linkage.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/linkage.h>
diff --git a/arch/microblaze/include/asm/local.h b/arch/microblaze/include/asm/local.h
deleted file mode 100644
index c11c530f74d0..000000000000
--- a/arch/microblaze/include/asm/local.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local.h>
diff --git a/arch/microblaze/include/asm/local64.h b/arch/microblaze/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/microblaze/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/microblaze/include/asm/parport.h b/arch/microblaze/include/asm/parport.h
deleted file mode 100644
index cf252af64590..000000000000
--- a/arch/microblaze/include/asm/parport.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/parport.h>
diff --git a/arch/microblaze/include/asm/percpu.h b/arch/microblaze/include/asm/percpu.h
deleted file mode 100644
index 06a959d67234..000000000000
--- a/arch/microblaze/include/asm/percpu.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/percpu.h>
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 37ef196e4519..330d556860ba 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -69,8 +69,6 @@ static inline void release_thread(struct task_struct *dead_task)
{
}
-extern unsigned long thread_saved_pc(struct task_struct *t);
-
extern unsigned long get_wchan(struct task_struct *p);
# define KSTK_EIP(tsk) (0)
@@ -121,10 +119,6 @@ static inline void release_thread(struct task_struct *dead_task)
{
}
-/* Return saved (kernel) PC of a blocked thread. */
-# define thread_saved_pc(tsk) \
- ((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0)
-
unsigned long get_wchan(struct task_struct *p);
/* The size allocated for kernel stacks. This _must_ be a power of two! */
diff --git a/arch/microblaze/include/asm/serial.h b/arch/microblaze/include/asm/serial.h
deleted file mode 100644
index a0cb0caff152..000000000000
--- a/arch/microblaze/include/asm/serial.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/serial.h>
diff --git a/arch/microblaze/include/asm/shmparam.h b/arch/microblaze/include/asm/shmparam.h
deleted file mode 100644
index 93f30deb95d0..000000000000
--- a/arch/microblaze/include/asm/shmparam.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmparam.h>
diff --git a/arch/microblaze/include/asm/topology.h b/arch/microblaze/include/asm/topology.h
deleted file mode 100644
index 5428f333a02c..000000000000
--- a/arch/microblaze/include/asm/topology.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/topology.h>
diff --git a/arch/microblaze/include/asm/ucontext.h b/arch/microblaze/include/asm/ucontext.h
deleted file mode 100644
index 9bc07b9f30fb..000000000000
--- a/arch/microblaze/include/asm/ucontext.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ucontext.h>
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 032fed71223f..9774e1d9507b 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -38,6 +38,6 @@
#endif /* __ASSEMBLY__ */
-#define __NR_syscalls 398
+#define __NR_syscalls 399
#endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/include/asm/vga.h b/arch/microblaze/include/asm/vga.h
deleted file mode 100644
index 89d82fd8fcf1..000000000000
--- a/arch/microblaze/include/asm/vga.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/vga.h>
diff --git a/arch/microblaze/include/asm/xor.h b/arch/microblaze/include/asm/xor.h
deleted file mode 100644
index c82eb12a5b18..000000000000
--- a/arch/microblaze/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild
index 1aac99f87df1..2178c78c7c1a 100644
--- a/arch/microblaze/include/uapi/asm/Kbuild
+++ b/arch/microblaze/include/uapi/asm/Kbuild
@@ -2,35 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += types.h
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += elf.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += unistd.h
diff --git a/arch/microblaze/include/uapi/asm/bitsperlong.h b/arch/microblaze/include/uapi/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b2..000000000000
--- a/arch/microblaze/include/uapi/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/microblaze/include/uapi/asm/errno.h b/arch/microblaze/include/uapi/asm/errno.h
deleted file mode 100644
index 4c82b503d92f..000000000000
--- a/arch/microblaze/include/uapi/asm/errno.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/errno.h>
diff --git a/arch/microblaze/include/uapi/asm/fcntl.h b/arch/microblaze/include/uapi/asm/fcntl.h
deleted file mode 100644
index 46ab12db5739..000000000000
--- a/arch/microblaze/include/uapi/asm/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/fcntl.h>
diff --git a/arch/microblaze/include/uapi/asm/ioctl.h b/arch/microblaze/include/uapi/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/arch/microblaze/include/uapi/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/microblaze/include/uapi/asm/ioctls.h b/arch/microblaze/include/uapi/asm/ioctls.h
deleted file mode 100644
index ec34c760665e..000000000000
--- a/arch/microblaze/include/uapi/asm/ioctls.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctls.h>
diff --git a/arch/microblaze/include/uapi/asm/ipcbuf.h b/arch/microblaze/include/uapi/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d0..000000000000
--- a/arch/microblaze/include/uapi/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/microblaze/include/uapi/asm/kvm_para.h b/arch/microblaze/include/uapi/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b957..000000000000
--- a/arch/microblaze/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/microblaze/include/uapi/asm/mman.h b/arch/microblaze/include/uapi/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab1..000000000000
--- a/arch/microblaze/include/uapi/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/microblaze/include/uapi/asm/msgbuf.h b/arch/microblaze/include/uapi/asm/msgbuf.h
deleted file mode 100644
index 809134c644a6..000000000000
--- a/arch/microblaze/include/uapi/asm/msgbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/msgbuf.h>
diff --git a/arch/microblaze/include/uapi/asm/param.h b/arch/microblaze/include/uapi/asm/param.h
deleted file mode 100644
index 965d45427975..000000000000
--- a/arch/microblaze/include/uapi/asm/param.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/param.h>
diff --git a/arch/microblaze/include/uapi/asm/poll.h b/arch/microblaze/include/uapi/asm/poll.h
deleted file mode 100644
index c98509d3149e..000000000000
--- a/arch/microblaze/include/uapi/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/microblaze/include/uapi/asm/resource.h b/arch/microblaze/include/uapi/asm/resource.h
deleted file mode 100644
index 04bc4db8921b..000000000000
--- a/arch/microblaze/include/uapi/asm/resource.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/resource.h>
diff --git a/arch/microblaze/include/uapi/asm/sembuf.h b/arch/microblaze/include/uapi/asm/sembuf.h
deleted file mode 100644
index 7673b83cfef7..000000000000
--- a/arch/microblaze/include/uapi/asm/sembuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sembuf.h>
diff --git a/arch/microblaze/include/uapi/asm/shmbuf.h b/arch/microblaze/include/uapi/asm/shmbuf.h
deleted file mode 100644
index 83c05fc2de38..000000000000
--- a/arch/microblaze/include/uapi/asm/shmbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/shmbuf.h>
diff --git a/arch/microblaze/include/uapi/asm/siginfo.h b/arch/microblaze/include/uapi/asm/siginfo.h
deleted file mode 100644
index 0815d29d82e5..000000000000
--- a/arch/microblaze/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/siginfo.h>
diff --git a/arch/microblaze/include/uapi/asm/signal.h b/arch/microblaze/include/uapi/asm/signal.h
deleted file mode 100644
index 7b1573ce19de..000000000000
--- a/arch/microblaze/include/uapi/asm/signal.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/signal.h>
diff --git a/arch/microblaze/include/uapi/asm/socket.h b/arch/microblaze/include/uapi/asm/socket.h
deleted file mode 100644
index 6b71384b9d8b..000000000000
--- a/arch/microblaze/include/uapi/asm/socket.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/socket.h>
diff --git a/arch/microblaze/include/uapi/asm/sockios.h b/arch/microblaze/include/uapi/asm/sockios.h
deleted file mode 100644
index def6d4746ee7..000000000000
--- a/arch/microblaze/include/uapi/asm/sockios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sockios.h>
diff --git a/arch/microblaze/include/uapi/asm/stat.h b/arch/microblaze/include/uapi/asm/stat.h
deleted file mode 100644
index 3dc90fa92c70..000000000000
--- a/arch/microblaze/include/uapi/asm/stat.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/stat.h>
diff --git a/arch/microblaze/include/uapi/asm/statfs.h b/arch/microblaze/include/uapi/asm/statfs.h
deleted file mode 100644
index 0b91fe198c20..000000000000
--- a/arch/microblaze/include/uapi/asm/statfs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/statfs.h>
diff --git a/arch/microblaze/include/uapi/asm/swab.h b/arch/microblaze/include/uapi/asm/swab.h
deleted file mode 100644
index 7847e563ab66..000000000000
--- a/arch/microblaze/include/uapi/asm/swab.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/swab.h>
diff --git a/arch/microblaze/include/uapi/asm/termbits.h b/arch/microblaze/include/uapi/asm/termbits.h
deleted file mode 100644
index 3935b106de79..000000000000
--- a/arch/microblaze/include/uapi/asm/termbits.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termbits.h>
diff --git a/arch/microblaze/include/uapi/asm/termios.h b/arch/microblaze/include/uapi/asm/termios.h
deleted file mode 100644
index 280d78a9d966..000000000000
--- a/arch/microblaze/include/uapi/asm/termios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/termios.h>
diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h
index d8086159d996..a88b3c11cc20 100644
--- a/arch/microblaze/include/uapi/asm/unistd.h
+++ b/arch/microblaze/include/uapi/asm/unistd.h
@@ -413,5 +413,6 @@
#define __NR_pkey_mprotect 395
#define __NR_pkey_alloc 396
#define __NR_pkey_free 397
+#define __NR_statx 398
#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 12e093a03e60..e45ada8fb006 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -65,8 +65,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
continue;
- __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
- sg->length, direction);
+ __dma_sync(sg_phys(sg), sg->length, direction);
}
return nents;
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index ef548510b951..4e1b567becd6 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -208,9 +208,7 @@ syscall_debug_table:
mfs r11, rmsr; /* save MSR */ \
swi r11, r1, PT_MSR;
-#define RESTORE_REGS \
- lwi r11, r1, PT_MSR; \
- mts rmsr , r11; \
+#define RESTORE_REGS_GP \
lwi r2, r1, PT_R2; /* restore SDA */ \
lwi r3, r1, PT_R3; \
lwi r4, r1, PT_R4; \
@@ -242,6 +240,18 @@ syscall_debug_table:
lwi r30, r1, PT_R30; \
lwi r31, r1, PT_R31; /* Restore cur task reg */
+#define RESTORE_REGS \
+ lwi r11, r1, PT_MSR; \
+ mts rmsr , r11; \
+ RESTORE_REGS_GP
+
+#define RESTORE_REGS_RTBD \
+ lwi r11, r1, PT_MSR; \
+ andni r11, r11, MSR_EIP; /* clear EIP */ \
+ ori r11, r11, MSR_EE | MSR_BIP; /* set EE and BIP */ \
+ mts rmsr , r11; \
+ RESTORE_REGS_GP
+
#define SAVE_STATE \
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
/* See if already in kernel mode.*/ \
@@ -427,7 +437,7 @@ C_ENTRY(ret_from_trap):
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
- RESTORE_REGS;
+ RESTORE_REGS_RTBD;
addik r1, r1, PT_SIZE /* Clean up stack space. */
lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
bri 6f;
@@ -436,7 +446,7 @@ C_ENTRY(ret_from_trap):
2: set_bip; /* Ints masked for state restore */
VM_OFF;
tophys(r1,r1);
- RESTORE_REGS;
+ RESTORE_REGS_RTBD;
addik r1, r1, PT_SIZE /* Clean up stack space. */
tovirt(r1,r1);
6:
@@ -612,7 +622,7 @@ C_ENTRY(ret_from_exc):
VM_OFF;
tophys(r1,r1);
- RESTORE_REGS;
+ RESTORE_REGS_RTBD;
addik r1, r1, PT_SIZE /* Clean up stack space. */
lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
@@ -621,7 +631,7 @@ C_ENTRY(ret_from_exc):
2: set_bip; /* Ints masked for state restore */
VM_OFF;
tophys(r1,r1);
- RESTORE_REGS;
+ RESTORE_REGS_RTBD;
addik r1, r1, PT_SIZE /* Clean up stack space. */
tovirt(r1,r1);
@@ -847,7 +857,7 @@ dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
VM_OFF;
tophys(r1,r1);
/* MS: Restore all regs */
- RESTORE_REGS
+ RESTORE_REGS_RTBD
addik r1, r1, PT_SIZE /* Clean up stack space */
lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
DBTRAP_return_user: /* MS: Make global symbol for debugging */
@@ -858,7 +868,7 @@ DBTRAP_return_user: /* MS: Make global symbol for debugging */
2: VM_OFF;
tophys(r1,r1);
/* MS: Restore all regs */
- RESTORE_REGS
+ RESTORE_REGS_RTBD
lwi r14, r1, PT_R14;
lwi r16, r1, PT_PC;
addik r1, r1, PT_SIZE; /* MS: Clean up stack space */
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index e92a817e645f..6527ec22f158 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -119,23 +119,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
return 0;
}
-#ifndef CONFIG_MMU
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- struct cpu_context *ctx =
- &(((struct thread_info *)(tsk->stack))->cpu_context);
-
- /* Check whether the thread is blocked in resume() */
- if (in_sched_functions(ctx->r15))
- return (unsigned long)ctx->r15;
- else
- return ctx->r14;
-}
-#endif
-
unsigned long get_wchan(struct task_struct *p)
{
/* TBD (used by procfs) */
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 6841c2df14d9..c48ff4ad2070 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -398,3 +398,4 @@ ENTRY(sys_call_table)
.long sys_pkey_mprotect /* 395 */
.long sys_pkey_alloc
.long sys_pkey_free
+ .long sys_statx
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 999066192715..545ccd46edb3 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -178,8 +178,10 @@ static __init int xilinx_clockevent_init(void)
clockevent_xilinx_timer.shift);
clockevent_xilinx_timer.max_delta_ns =
clockevent_delta2ns((u32)~0, &clockevent_xilinx_timer);
+ clockevent_xilinx_timer.max_delta_ticks = (u32)~0;
clockevent_xilinx_timer.min_delta_ns =
clockevent_delta2ns(1, &clockevent_xilinx_timer);
+ clockevent_xilinx_timer.min_delta_ticks = 1;
clockevent_xilinx_timer.cpumask = cpumask_of(0);
clockevents_register_device(&clockevent_xilinx_timer);
diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c
index 2fcc5a52d84d..ed4454c5ce35 100644
--- a/arch/microblaze/mm/highmem.c
+++ b/arch/microblaze/mm/highmem.c
@@ -60,6 +60,7 @@ void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type;
+ unsigned int idx;
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable();
@@ -68,21 +69,18 @@ void __kunmap_atomic(void *kvaddr)
}
type = kmap_atomic_idx();
-#ifdef CONFIG_DEBUG_HIGHMEM
- {
- unsigned int idx;
-
- idx = type + KM_TYPE_NR * smp_processor_id();
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_page(NULL, vaddr);
- }
+ idx = type + KM_TYPE_NR * smp_processor_id();
+#ifdef CONFIG_DEBUG_HIGHMEM
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#endif
+ /*
+ * force other mappings to Oops if they'll try to access
+ * this pte without first remap it
+ */
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
+ local_flush_tlb_page(NULL, vaddr);
+
kmap_atomic_idx_pop();
pagefault_enable();
preempt_enable();
diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
index 5c3f688a5232..5cef58651db0 100644
--- a/arch/mips/Kbuild
+++ b/arch/mips/Kbuild
@@ -1,7 +1,9 @@
# Fail on warnings - also for files referenced in subdirs
# -Werror can be disabled for specific files using:
# CFLAGS_<file.o> := -Wno-error
+ifeq ($(W),)
subdir-ccflags-y := -Werror
+endif
# platform specific definitions
include arch/mips/Kbuild.platforms
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4e9ebf65d071..2828ecde133d 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -46,6 +46,7 @@ config MIPS
select ARCH_DISCARD_MEMBLOCK
select GENERIC_SMP_IDLE_THREAD
select BUILDTIME_EXTABLE_SORT
+ select GENERIC_CPU_AUTOPROBE
select GENERIC_CLOCKEVENTS
select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
select GENERIC_CMOS_UPDATE
@@ -68,6 +69,7 @@ config MIPS
select HANDLE_DOMAIN_IRQ
select HAVE_EXIT_THREAD
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_COPY_THREAD_TLS
menu "Machine selection"
@@ -1039,14 +1041,6 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
-config ARCH_HAS_ILOG2_U32
- bool
- default n
-
-config ARCH_HAS_ILOG2_U64
- bool
- default n
-
config GENERIC_HWEIGHT
bool
default y
@@ -1372,6 +1366,7 @@ config CPU_LOONGSON3
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select MIPS_PGD_C0_CONTEXT
+ select MIPS_L1_CACHE_SHIFT_6
select GPIOLIB
help
The Loongson 3 processor implements the MIPS64R2 instruction
@@ -2120,10 +2115,13 @@ config MIPS_VA_BITS_48
bool "48 bits virtual memory"
depends on 64BIT
help
- Support a maximum at least 48 bits of application virtual memory.
- Default is 40 bits or less, depending on the CPU.
- This option result in a small memory overhead for page tables.
- This option is only supported with 16k and 64k page sizes.
+ Support a maximum at least 48 bits of application virtual
+ memory. Default is 40 bits or less, depending on the CPU.
+ For page sizes 16k and above, this option results in a small
+ memory overhead for page tables. For 4k page size, a fourth
+ level of page tables is added which imposes both a memory
+ overhead as well as slower TLB fault handling.
+
If unsure, say N.
choice
@@ -2133,7 +2131,6 @@ choice
config PAGE_SIZE_4KB
bool "4kB"
depends on !CPU_LOONGSON2 && !CPU_LOONGSON3
- depends on !MIPS_VA_BITS_48
help
This option select the standard 4kB Linux page size. On some
R3000-family processors this is the only available page size. Using
@@ -2982,6 +2979,7 @@ config HAVE_LATENCYTOP_SUPPORT
config PGTABLE_LEVELS
int
+ default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
default 3 if 64BIT && !PAGE_SIZE_64KB
default 2
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 7f975b20b20c..42a97c59200f 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -82,7 +82,7 @@ config CMDLINE_OVERRIDE
config SB1XXX_CORELIS
bool "Corelis Debugger"
depends on SIBYTE_SB1xxx_SOC
- select DEBUG_INFO
+ select DEBUG_INFO if !COMPILE_TEST
help
Select compile flags that produce code that can be processed by the
Corelis mksym utility and UDB Emulator.
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 2728a9a9c7c5..145b5ce8eb7e 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -128,19 +128,19 @@ quiet_cmd_cpp_its_S = ITS $@
-DADDR_BITS=$(ADDR_BITS) \
-DADDR_CELLS=$(itb_addr_cells)
-$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
-$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
-$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
-$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
-$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
quiet_cmd_itb-image = ITB $@
diff --git a/arch/mips/boot/dts/include/dt-bindings b/arch/mips/boot/dts/include/dt-bindings
deleted file mode 120000
index 08c00e4972fa..000000000000
--- a/arch/mips/boot/dts/include/dt-bindings
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index c370426a7322..5c0b56203bae 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -25,15 +25,6 @@ endif # CPU_CAVIUM_OCTEON
if CAVIUM_OCTEON_SOC
-config CAVIUM_OCTEON_2ND_KERNEL
- bool "Build the kernel to be used as a 2nd kernel on the same chip"
- default "n"
- help
- This option configures this kernel to be linked at a different
- address and use the 2nd uart for output. This allows a kernel built
- with this option to be run at the same time as one built without this
- option.
-
config CAVIUM_OCTEON_LOCK_L2
bool "Lock often used kernel code in the L2"
default "y"
diff --git a/arch/mips/cavium-octeon/Platform b/arch/mips/cavium-octeon/Platform
index 8a301cb12d68..45be853700e6 100644
--- a/arch/mips/cavium-octeon/Platform
+++ b/arch/mips/cavium-octeon/Platform
@@ -4,8 +4,4 @@
platform-$(CONFIG_CAVIUM_OCTEON_SOC) += cavium-octeon/
cflags-$(CONFIG_CAVIUM_OCTEON_SOC) += \
-I$(srctree)/arch/mips/include/asm/mach-cavium-octeon
-ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
-load-$(CONFIG_CAVIUM_OCTEON_SOC) += 0xffffffff84100000
-else
load-$(CONFIG_CAVIUM_OCTEON_SOC) += 0xffffffff81100000
-endif
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
index 89b5273299ab..f091c9b70603 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2010 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -239,6 +239,7 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
else {
uint64_t counter = 0;
int tad;
+
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
return counter;
@@ -249,6 +250,7 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
else {
uint64_t counter = 0;
int tad;
+
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
return counter;
@@ -259,6 +261,7 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
else {
uint64_t counter = 0;
int tad;
+
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
return counter;
@@ -270,6 +273,7 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
else {
uint64_t counter = 0;
int tad;
+
for (tad = 0; tad < CVMX_L2C_TADS; tad++)
counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
return counter;
@@ -301,7 +305,7 @@ static void fault_in(uint64_t addr, int len)
*/
CVMX_DCACHE_INVALIDATE;
while (len > 0) {
- ACCESS_ONCE(*ptr);
+ READ_ONCE(*ptr);
len -= CVMX_CACHE_LINE_SIZE;
ptr += CVMX_CACHE_LINE_SIZE;
}
@@ -375,7 +379,9 @@ int cvmx_l2c_lock_line(uint64_t addr)
if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS;
+
lckbase.s.lck_base = addr_tmp >> 7;
+
} else {
lckbase.s.lck_base = addr >> 7;
}
@@ -435,6 +441,7 @@ void cvmx_l2c_flush(void)
/* These may look like constants, but they aren't... */
int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
+
for (set = 0; set < n_set; set++) {
for (assoc = 0; assoc < n_assoc; assoc++) {
address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
@@ -519,89 +526,49 @@ int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
union __cvmx_l2c_tag {
uint64_t u64;
struct cvmx_l2c_tag_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved:40;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t addr:20; /* Phys mem addr (33..14) */
-#else
- uint64_t addr:20; /* Phys mem addr (33..14) */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t L:1; /* Line locked */
- uint64_t D:1; /* Line dirty */
- uint64_t V:1; /* Line valid */
- uint64_t reserved:40;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved:40,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:20, /* Phys addr (33..14) */
+ ;))))))
} cn50xx;
struct cvmx_l2c_tag_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved:41;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t addr:19; /* Phys mem addr (33..15) */
-#else
- uint64_t addr:19; /* Phys mem addr (33..15) */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t L:1; /* Line locked */
- uint64_t D:1; /* Line dirty */
- uint64_t V:1; /* Line valid */
- uint64_t reserved:41;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved:41,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:19, /* Phys addr (33..15) */
+ ;))))))
} cn30xx;
struct cvmx_l2c_tag_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved:42;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t addr:18; /* Phys mem addr (33..16) */
-#else
- uint64_t addr:18; /* Phys mem addr (33..16) */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t L:1; /* Line locked */
- uint64_t D:1; /* Line dirty */
- uint64_t V:1; /* Line valid */
- uint64_t reserved:42;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved:42,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:18, /* Phys addr (33..16) */
+ ;))))))
} cn31xx;
struct cvmx_l2c_tag_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved:43;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t addr:17; /* Phys mem addr (33..17) */
-#else
- uint64_t addr:17; /* Phys mem addr (33..17) */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t L:1; /* Line locked */
- uint64_t D:1; /* Line dirty */
- uint64_t V:1; /* Line valid */
- uint64_t reserved:43;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved:43,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:17, /* Phys addr (33..17) */
+ ;))))))
} cn38xx;
struct cvmx_l2c_tag_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved:44;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t addr:16; /* Phys mem addr (33..18) */
-#else
- uint64_t addr:16; /* Phys mem addr (33..18) */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t L:1; /* Line locked */
- uint64_t D:1; /* Line dirty */
- uint64_t V:1; /* Line valid */
- uint64_t reserved:44;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved:44,
+ __BITFIELD_FIELD(uint64_t V:1, /* Line valid */
+ __BITFIELD_FIELD(uint64_t D:1, /* Line dirty */
+ __BITFIELD_FIELD(uint64_t L:1, /* Line locked */
+ __BITFIELD_FIELD(uint64_t U:1, /* Use, LRU eviction */
+ __BITFIELD_FIELD(uint64_t addr:16, /* Phys addr (33..18) */
+ ;))))))
} cn58xx;
struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
@@ -629,8 +596,8 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
union __cvmx_l2c_tag tag_val;
uint64_t dbg_addr = CVMX_L2C_DBG;
unsigned long flags;
-
union cvmx_l2c_dbg debug_val;
+
debug_val.u64 = 0;
/*
* For low core count parts, the core number is always small
@@ -683,8 +650,8 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
{
union cvmx_l2c_tag tag;
- tag.u64 = 0;
+ tag.u64 = 0;
if ((int)association >= cvmx_l2c_get_num_assoc()) {
cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
return tag;
@@ -767,10 +734,12 @@ uint32_t cvmx_l2c_address_to_index(uint64_t addr)
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
union cvmx_l2c_ctl l2c_ctl;
+
l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
indxalias = !l2c_ctl.s.disidxalias;
} else {
union cvmx_l2c_cfg l2c_cfg;
+
l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
indxalias = l2c_cfg.s.idxalias;
}
@@ -778,6 +747,7 @@ uint32_t cvmx_l2c_address_to_index(uint64_t addr)
if (indxalias) {
if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
+
idx ^= idx / cvmx_l2c_get_num_sets();
idx ^= a_14_12;
} else {
@@ -801,6 +771,7 @@ int cvmx_l2c_get_cache_size_bytes(void)
int cvmx_l2c_get_set_bits(void)
{
int l2_set_bits;
+
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
l2_set_bits = 11; /* 2048 sets */
else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
@@ -828,6 +799,7 @@ int cvmx_l2c_get_num_sets(void)
int cvmx_l2c_get_num_assoc(void)
{
int l2_assoc;
+
if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
OCTEON_IS_MODEL(OCTEON_CN52XX) ||
OCTEON_IS_MODEL(OCTEON_CN58XX) ||
@@ -869,16 +841,17 @@ int cvmx_l2c_get_num_assoc(void)
else if (mio_fus_dat3.s.l2c_crip == 1)
l2_assoc = 12;
} else {
- union cvmx_l2d_fus3 val;
- val.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
+ uint64_t l2d_fus3;
+
+ l2d_fus3 = cvmx_read_csr(CVMX_L2D_FUS3);
/*
* Using shifts here, as bit position names are
* different for each model but they all mean the
* same.
*/
- if ((val.u64 >> 35) & 0x1)
+ if ((l2d_fus3 >> 35) & 0x1)
l2_assoc = l2_assoc >> 2;
- else if ((val.u64 >> 34) & 0x1)
+ else if ((l2d_fus3 >> 34) & 0x1)
l2_assoc = l2_assoc >> 1;
}
return l2_assoc;
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
index d08a2bce653c..341052387b49 100644
--- a/arch/mips/cavium-octeon/executive/octeon-model.c
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2010 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -63,16 +63,15 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
char pass[4];
int clock_mhz;
const char *suffix;
- union cvmx_l2d_fus3 fus3;
int num_cores;
union cvmx_mio_fus_dat2 fus_dat2;
union cvmx_mio_fus_dat3 fus_dat3;
char fuse_model[10];
uint32_t fuse_data = 0;
+ uint64_t l2d_fus3 = 0;
- fus3.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
- fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
+ l2d_fus3 = (cvmx_read_csr(CVMX_L2D_FUS3) >> 34) & 0x3;
fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
num_cores = cvmx_octeon_num_cores();
@@ -192,7 +191,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
/* Now figure out the family, the first two digits */
switch ((chip_id >> 8) & 0xff) {
case 0: /* CN38XX, CN37XX or CN36XX */
- if (fus3.cn38xx.crip_512k) {
+ if (l2d_fus3) {
/*
* For some unknown reason, the 16 core one is
* called 37 instead of 36.
@@ -223,7 +222,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
}
break;
case 1: /* CN31XX or CN3020 */
- if ((chip_id & 0x10) || fus3.cn31xx.crip_128k)
+ if ((chip_id & 0x10) || l2d_fus3)
family = "30";
else
family = "31";
@@ -246,7 +245,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
case 2: /* CN3010 or CN3005 */
family = "30";
/* A chip with half cache is an 05 */
- if (fus3.cn30xx.crip_64k)
+ if (l2d_fus3)
core_model = "05";
/*
* This series of chips didn't follow the standard
@@ -267,7 +266,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
case 3: /* CN58XX */
family = "58";
/* Special case. 4 core, half cache (CP with half cache) */
- if ((num_cores == 4) && fus3.cn58xx.crip_1024k && !strncmp(suffix, "CP", 2))
+ if ((num_cores == 4) && l2d_fus3 && !strncmp(suffix, "CP", 2))
core_model = "29";
/* Pass 1 uses different encodings for pass numbers */
@@ -290,7 +289,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
break;
case 4: /* CN57XX, CN56XX, CN55XX, CN54XX */
if (fus_dat2.cn56xx.raid_en) {
- if (fus3.cn56xx.crip_1024k)
+ if (l2d_fus3)
family = "55";
else
family = "57";
@@ -309,7 +308,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
if (fus_dat3.cn56xx.bar2_en)
suffix = "NSPB2";
}
- if (fus3.cn56xx.crip_1024k)
+ if (l2d_fus3)
family = "54";
else
family = "56";
@@ -319,7 +318,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
family = "50";
break;
case 7: /* CN52XX */
- if (fus3.cn52xx.crip_256k)
+ if (l2d_fus3)
family = "51";
else
family = "52";
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 3375e61daa19..8505db478904 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -3,71 +3,27 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2016 Cavium Networks
+ * Copyright (C) 2004-2017 Cavium, Inc.
* Copyright (C) 2008 Wind River Systems
*/
-#include <linux/init.h>
-#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
-#include <linux/usb/ehci_def.h>
-#include <linux/usb/ehci_pdriver.h>
-#include <linux/usb/ohci_pdriver.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-helper-board.h>
+
+#ifdef CONFIG_USB
+#include <linux/usb/ehci_def.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
#include <asm/octeon/cvmx-uctlx-defs.h>
#define CVMX_UAHCX_EHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000010ull))
#define CVMX_UAHCX_OHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000408ull))
-/* Octeon Random Number Generator. */
-static int __init octeon_rng_device_init(void)
-{
- struct platform_device *pd;
- int ret = 0;
-
- struct resource rng_resources[] = {
- {
- .flags = IORESOURCE_MEM,
- .start = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS),
- .end = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS) + 0xf
- }, {
- .flags = IORESOURCE_MEM,
- .start = cvmx_build_io_address(8, 0),
- .end = cvmx_build_io_address(8, 0) + 0x7
- }
- };
-
- pd = platform_device_alloc("octeon_rng", -1);
- if (!pd) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = platform_device_add_resources(pd, rng_resources,
- ARRAY_SIZE(rng_resources));
- if (ret)
- goto fail;
-
- ret = platform_device_add(pd);
- if (ret)
- goto fail;
-
- return ret;
-fail:
- platform_device_put(pd);
-
-out:
- return ret;
-}
-device_initcall(octeon_rng_device_init);
-
-#ifdef CONFIG_USB
-
static DEFINE_MUTEX(octeon2_usb_clocks_mutex);
static int octeon2_usb_clock_start_cnt;
@@ -440,8 +396,49 @@ device_initcall(octeon_ohci_device_init);
#endif /* CONFIG_USB */
+/* Octeon Random Number Generator. */
+static int __init octeon_rng_device_init(void)
+{
+ struct platform_device *pd;
+ int ret = 0;
-static struct of_device_id __initdata octeon_ids[] = {
+ struct resource rng_resources[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ .start = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS),
+ .end = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS) + 0xf
+ }, {
+ .flags = IORESOURCE_MEM,
+ .start = cvmx_build_io_address(8, 0),
+ .end = cvmx_build_io_address(8, 0) + 0x7
+ }
+ };
+
+ pd = platform_device_alloc("octeon_rng", -1);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = platform_device_add_resources(pd, rng_resources,
+ ARRAY_SIZE(rng_resources));
+ if (ret)
+ goto fail;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ goto fail;
+
+ return ret;
+fail:
+ platform_device_put(pd);
+
+out:
+ return ret;
+}
+device_initcall(octeon_rng_device_init);
+
+const struct of_device_id octeon_ids[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "cavium,octeon-6335-uctl", },
{ .compatible = "cavium,octeon-5750-usbn", },
@@ -481,6 +478,7 @@ static void __init octeon_fdt_set_phy(int eth, int phy_addr)
alt_phy_handle = fdt_getprop(initial_boot_params, eth, "cavium,alt-phy-handle", NULL);
if (alt_phy_handle) {
u32 alt_phandle = be32_to_cpup(alt_phy_handle);
+
alt_phy = fdt_node_offset_by_phandle(initial_boot_params, alt_phandle);
} else {
alt_phy = -1;
@@ -579,6 +577,7 @@ static void __init octeon_fdt_rm_ethernet(int node)
if (phy_handle) {
u32 ph = be32_to_cpup(phy_handle);
int p = fdt_node_offset_by_phandle(initial_boot_params, ph);
+
if (p >= 0)
fdt_nop_node(initial_boot_params, p);
}
@@ -728,6 +727,7 @@ int __init octeon_prune_device_tree(void)
for (i = 0; i < 2; i++) {
int mgmt;
+
snprintf(name_buffer, sizeof(name_buffer),
"mix%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
@@ -743,6 +743,7 @@ int __init octeon_prune_device_tree(void)
name_buffer);
} else {
int phy_addr = cvmx_helper_board_get_mii_address(CVMX_HELPER_BOARD_MGMT_IPD_PORT + i);
+
octeon_fdt_set_phy(mgmt, phy_addr);
}
}
@@ -751,6 +752,7 @@ int __init octeon_prune_device_tree(void)
pip_path = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
if (pip_path) {
int pip = fdt_path_offset(initial_boot_params, pip_path);
+
if (pip >= 0)
for (i = 0; i <= 4; i++)
octeon_fdt_pip_iface(pip, i);
@@ -767,6 +769,7 @@ int __init octeon_prune_device_tree(void)
for (i = 0; i < 2; i++) {
int i2c;
+
snprintf(name_buffer, sizeof(name_buffer),
"twsi%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
@@ -797,11 +800,11 @@ int __init octeon_prune_device_tree(void)
for (i = 0; i < 2; i++) {
int i2c;
+
snprintf(name_buffer, sizeof(name_buffer),
"smi%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
name_buffer, NULL);
-
if (alias_prop) {
i2c = fdt_path_offset(initial_boot_params, alias_prop);
if (i2c < 0)
@@ -824,6 +827,7 @@ int __init octeon_prune_device_tree(void)
for (i = 0; i < 3; i++) {
int uart;
+
snprintf(name_buffer, sizeof(name_buffer),
"uart%d", i);
alias_prop = fdt_getprop(initial_boot_params, aliases,
@@ -863,6 +867,7 @@ int __init octeon_prune_device_tree(void)
int len;
int cf = fdt_path_offset(initial_boot_params, alias_prop);
+
base_ptr = 0;
if (octeon_bootinfo->major_version == 1
&& octeon_bootinfo->minor_version >= 1) {
@@ -912,6 +917,7 @@ int __init octeon_prune_device_tree(void)
fdt_nop_property(initial_boot_params, cf, "cavium,dma-engine-handle");
if (!is_16bit) {
__be32 width = cpu_to_be32(8);
+
fdt_setprop_inplace(initial_boot_params, cf,
"cavium,bus-width", &width, sizeof(width));
}
@@ -1004,6 +1010,7 @@ end_led:
;
}
+#ifdef CONFIG_USB
/* OHCI/UHCI USB */
alias_prop = fdt_getprop(initial_boot_params, aliases,
"uctl", NULL);
@@ -1036,6 +1043,7 @@ end_led:
} else {
__be32 new_f[1];
enum cvmx_helper_board_usb_clock_types c;
+
c = __cvmx_helper_board_usb_get_clock_type();
switch (c) {
case USB_CLOCK_TYPE_REF_48:
@@ -1052,6 +1060,7 @@ end_led:
}
}
}
+#endif
return 0;
}
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index d9dbeb0b165b..a8034d0dcade 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -374,14 +374,8 @@ void octeon_write_lcd(const char *s)
*/
int octeon_get_boot_uart(void)
{
- int uart;
-#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
- uart = 1;
-#else
- uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
+ return (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
1 : 0;
-#endif
- return uart;
}
/**
@@ -901,14 +895,10 @@ void __init prom_init(void)
}
if (strstr(arcs_cmdline, "console=") == NULL) {
-#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
- strcat(arcs_cmdline, " console=ttyS0,115200");
-#else
if (octeon_uart == 1)
strcat(arcs_cmdline, " console=ttyS1,115200");
else
strcat(arcs_cmdline, " console=ttyS0,115200");
-#endif
}
mips_hpt_frequency = octeon_get_clock_rate();
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
index c95d94c7838b..91aacf2ef26d 100644
--- a/arch/mips/configs/generic_defconfig
+++ b/arch/mips/configs/generic_defconfig
@@ -36,6 +36,8 @@ CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
CONFIG_NETFILTER=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
@@ -80,6 +82,7 @@ CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_REDUCED=y
diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
index b4db69fbc40c..fc67947ed658 100644
--- a/arch/mips/include/asm/cache.h
+++ b/arch/mips/include/asm/cache.h
@@ -9,14 +9,9 @@
#ifndef _ASM_CACHE_H
#define _ASM_CACHE_H
-#include <kmalloc.h>
-
#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-#define SMP_CACHE_SHIFT L1_CACHE_SHIFT
-#define SMP_CACHE_BYTES L1_CACHE_BYTES
-
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#endif /* _ASM_CACHE_H */
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index be3b4c25f335..cd6efb07c980 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -12,10 +12,9 @@
#ifndef __ASM_CPU_INFO_H
#define __ASM_CPU_INFO_H
+#include <linux/cache.h>
#include <linux/types.h>
-#include <asm/cache.h>
-
/*
* Descriptor for a cache
*/
diff --git a/arch/mips/include/asm/cpufeature.h b/arch/mips/include/asm/cpufeature.h
new file mode 100644
index 000000000000..c63ec05313c1
--- /dev/null
+++ b/arch/mips/include/asm/cpufeature.h
@@ -0,0 +1,26 @@
+/*
+ * CPU feature definitions for module loading, used by
+ * module_cpu_feature_match(), see uapi/asm/hwcap.h for MIPS CPU features.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __ASM_CPUFEATURE_H
+#define __ASM_CPUFEATURE_H
+
+#include <uapi/asm/hwcap.h>
+#include <asm/elf.h>
+
+#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
+
+#define cpu_feature(x) ilog2(HWCAP_ ## x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+ return elf_hwcap & (1UL << num);
+}
+
+#endif /* __ASM_CPUFEATURE_H */
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index d34536e7653f..279b6d14ffeb 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -35,7 +35,12 @@ extern pte_t *pkmap_page_table;
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define LAST_PKMAP 512
+#else
#define LAST_PKMAP 1024
+#endif
+
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index 291846d9ba83..ad1a99948f27 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -43,7 +43,8 @@ typedef union mips_instruction kprobe_opcode_t;
#define flush_insn_slot(p) \
do { \
- flush_icache_range((unsigned long)p->addr, \
+ if (p->addr) \
+ flush_icache_range((unsigned long)p->addr, \
(unsigned long)p->addr + \
(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
} while (0)
diff --git a/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h b/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
index 98cf40417c5d..d38be668e338 100644
--- a/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-rm/cpu-feature-overrides.h
@@ -10,8 +10,6 @@
#ifndef __ASM_MACH_RM200_CPU_FEATURE_OVERRIDES_H
#define __ASM_MACH_RM200_CPU_FEATURE_OVERRIDES_H
-#include <cpu-feature-overrides.h>
-
#define cpu_has_tlb 1
#define cpu_has_4kex 1
#define cpu_has_4k_cache 1
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
index 10262cb6ff50..d045973ddb33 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,3140 +28,177 @@
#ifndef __CVMX_L2C_DEFS_H__
#define __CVMX_L2C_DEFS_H__
-#define CVMX_L2C_BIG_CTL (CVMX_ADD_IO_SEG(0x0001180080800030ull))
-#define CVMX_L2C_BST (CVMX_ADD_IO_SEG(0x00011800808007F8ull))
-#define CVMX_L2C_BST0 (CVMX_ADD_IO_SEG(0x00011800800007F8ull))
-#define CVMX_L2C_BST1 (CVMX_ADD_IO_SEG(0x00011800800007F0ull))
-#define CVMX_L2C_BST2 (CVMX_ADD_IO_SEG(0x00011800800007E8ull))
-#define CVMX_L2C_BST_MEMX(block_id) (CVMX_ADD_IO_SEG(0x0001180080C007F8ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_BST_TDTX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007F0ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_BST_TTGX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007F8ull) + ((block_id) & 3) * 0x40000ull)
+#include <uapi/asm/bitfield.h>
+
+#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
#define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull))
-#define CVMX_L2C_COP0_MAPX(offset) (CVMX_ADD_IO_SEG(0x0001180080940000ull) + ((offset) & 16383) * 8)
#define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull))
-#define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull))
-#define CVMX_L2C_DUT (CVMX_ADD_IO_SEG(0x0001180080000050ull))
-#define CVMX_L2C_DUT_MAPX(offset) (CVMX_ADD_IO_SEG(0x0001180080E00000ull) + ((offset) & 8191) * 8)
-#define CVMX_L2C_ERR_TDTX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_ERR_TTGX(block_id) (CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_ERR_VBFX(block_id) (CVMX_ADD_IO_SEG(0x0001180080C007F0ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_ERR_XMC (CVMX_ADD_IO_SEG(0x00011800808007D8ull))
-#define CVMX_L2C_GRPWRR0 (CVMX_ADD_IO_SEG(0x00011800800000C8ull))
-#define CVMX_L2C_GRPWRR1 (CVMX_ADD_IO_SEG(0x00011800800000D0ull))
-#define CVMX_L2C_INT_EN (CVMX_ADD_IO_SEG(0x0001180080000100ull))
-#define CVMX_L2C_INT_ENA (CVMX_ADD_IO_SEG(0x0001180080800020ull))
-#define CVMX_L2C_INT_REG (CVMX_ADD_IO_SEG(0x0001180080800018ull))
-#define CVMX_L2C_INT_STAT (CVMX_ADD_IO_SEG(0x00011800800000F8ull))
-#define CVMX_L2C_IOCX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800420ull))
-#define CVMX_L2C_IORX_PFC(block_id) (CVMX_ADD_IO_SEG(0x0001180080800428ull))
#define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull))
#define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull))
-#define CVMX_L2C_LFB0 (CVMX_ADD_IO_SEG(0x0001180080000038ull))
-#define CVMX_L2C_LFB1 (CVMX_ADD_IO_SEG(0x0001180080000040ull))
-#define CVMX_L2C_LFB2 (CVMX_ADD_IO_SEG(0x0001180080000048ull))
-#define CVMX_L2C_LFB3 (CVMX_ADD_IO_SEG(0x00011800800000B8ull))
-#define CVMX_L2C_OOB (CVMX_ADD_IO_SEG(0x00011800800000D8ull))
-#define CVMX_L2C_OOB1 (CVMX_ADD_IO_SEG(0x00011800800000E0ull))
-#define CVMX_L2C_OOB2 (CVMX_ADD_IO_SEG(0x00011800800000E8ull))
-#define CVMX_L2C_OOB3 (CVMX_ADD_IO_SEG(0x00011800800000F0ull))
+#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
+#define CVMX_L2C_PFCX(offset) (CVMX_ADD_IO_SEG(0x0001180080000098ull) + \
+ ((offset) & 3) * 8)
#define CVMX_L2C_PFC0 CVMX_L2C_PFCX(0)
#define CVMX_L2C_PFC1 CVMX_L2C_PFCX(1)
#define CVMX_L2C_PFC2 CVMX_L2C_PFCX(2)
#define CVMX_L2C_PFC3 CVMX_L2C_PFCX(3)
-#define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull))
-#define CVMX_L2C_PFCX(offset) (CVMX_ADD_IO_SEG(0x0001180080000098ull) + ((offset) & 3) * 8)
-#define CVMX_L2C_PPGRP (CVMX_ADD_IO_SEG(0x00011800800000C0ull))
-#define CVMX_L2C_QOS_IOBX(offset) (CVMX_ADD_IO_SEG(0x0001180080880200ull) + ((offset) & 1) * 8)
-#define CVMX_L2C_QOS_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080880000ull) + ((offset) & 31) * 8)
-#define CVMX_L2C_QOS_WGT (CVMX_ADD_IO_SEG(0x0001180080800008ull))
-#define CVMX_L2C_RSCX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800410ull) + ((offset) & 3) * 64)
-#define CVMX_L2C_RSDX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800418ull) + ((offset) & 3) * 64)
#define CVMX_L2C_SPAR0 (CVMX_ADD_IO_SEG(0x0001180080000068ull))
#define CVMX_L2C_SPAR1 (CVMX_ADD_IO_SEG(0x0001180080000070ull))
#define CVMX_L2C_SPAR2 (CVMX_ADD_IO_SEG(0x0001180080000078ull))
#define CVMX_L2C_SPAR3 (CVMX_ADD_IO_SEG(0x0001180080000080ull))
#define CVMX_L2C_SPAR4 (CVMX_ADD_IO_SEG(0x0001180080000088ull))
-#define CVMX_L2C_TADX_ECC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00018ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_ECC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00020ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_IEN(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00000ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_INT(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00028ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_PFC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00400ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_PFC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00408ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_PFC2(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00410ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_PFC3(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00418ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_PRF(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00008ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_TADX_TAG(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00010ull) + ((block_id) & 3) * 0x40000ull)
-#define CVMX_L2C_VER_ID (CVMX_ADD_IO_SEG(0x00011800808007E0ull))
-#define CVMX_L2C_VER_IOB (CVMX_ADD_IO_SEG(0x00011800808007F0ull))
-#define CVMX_L2C_VER_MSC (CVMX_ADD_IO_SEG(0x00011800808007D0ull))
-#define CVMX_L2C_VER_PP (CVMX_ADD_IO_SEG(0x00011800808007E8ull))
-#define CVMX_L2C_VIRTID_IOBX(offset) (CVMX_ADD_IO_SEG(0x00011800808C0200ull) + ((offset) & 1) * 8)
-#define CVMX_L2C_VIRTID_PPX(offset) (CVMX_ADD_IO_SEG(0x00011800808C0000ull) + ((offset) & 31) * 8)
-#define CVMX_L2C_VRT_CTL (CVMX_ADD_IO_SEG(0x0001180080800010ull))
-#define CVMX_L2C_VRT_MEMX(offset) (CVMX_ADD_IO_SEG(0x0001180080900000ull) + ((offset) & 1023) * 8)
-#define CVMX_L2C_WPAR_IOBX(offset) (CVMX_ADD_IO_SEG(0x0001180080840200ull) + ((offset) & 1) * 8)
-#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + ((offset) & 31) * 8)
-#define CVMX_L2C_XMCX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800400ull) + ((offset) & 3) * 64)
-#define CVMX_L2C_XMC_CMD (CVMX_ADD_IO_SEG(0x0001180080800028ull))
-#define CVMX_L2C_XMDX_PFC(offset) (CVMX_ADD_IO_SEG(0x0001180080800408ull) + ((offset) & 3) * 64)
-
-union cvmx_l2c_big_ctl {
- uint64_t u64;
- struct cvmx_l2c_big_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t maxdram:4;
- uint64_t reserved_1_3:3;
- uint64_t disable:1;
-#else
- uint64_t disable:1;
- uint64_t reserved_1_3:3;
- uint64_t maxdram:4;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_l2c_big_ctl_s cn61xx;
- struct cvmx_l2c_big_ctl_s cn63xx;
- struct cvmx_l2c_big_ctl_s cn66xx;
- struct cvmx_l2c_big_ctl_s cn68xx;
- struct cvmx_l2c_big_ctl_s cn68xxp1;
- struct cvmx_l2c_big_ctl_s cnf71xx;
-};
-
-union cvmx_l2c_bst {
- uint64_t u64;
- struct cvmx_l2c_bst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t dutfl:32;
- uint64_t rbffl:4;
- uint64_t xbffl:4;
- uint64_t tdpfl:4;
- uint64_t ioccmdfl:4;
- uint64_t iocdatfl:4;
- uint64_t dutresfl:4;
- uint64_t vrtfl:4;
- uint64_t tdffl:4;
-#else
- uint64_t tdffl:4;
- uint64_t vrtfl:4;
- uint64_t dutresfl:4;
- uint64_t iocdatfl:4;
- uint64_t ioccmdfl:4;
- uint64_t tdpfl:4;
- uint64_t xbffl:4;
- uint64_t rbffl:4;
- uint64_t dutfl:32;
-#endif
- } s;
- struct cvmx_l2c_bst_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_36_63:28;
- uint64_t dutfl:4;
- uint64_t reserved_17_31:15;
- uint64_t ioccmdfl:1;
- uint64_t reserved_13_15:3;
- uint64_t iocdatfl:1;
- uint64_t reserved_9_11:3;
- uint64_t dutresfl:1;
- uint64_t reserved_5_7:3;
- uint64_t vrtfl:1;
- uint64_t reserved_1_3:3;
- uint64_t tdffl:1;
-#else
- uint64_t tdffl:1;
- uint64_t reserved_1_3:3;
- uint64_t vrtfl:1;
- uint64_t reserved_5_7:3;
- uint64_t dutresfl:1;
- uint64_t reserved_9_11:3;
- uint64_t iocdatfl:1;
- uint64_t reserved_13_15:3;
- uint64_t ioccmdfl:1;
- uint64_t reserved_17_31:15;
- uint64_t dutfl:4;
- uint64_t reserved_36_63:28;
-#endif
- } cn61xx;
- struct cvmx_l2c_bst_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_38_63:26;
- uint64_t dutfl:6;
- uint64_t reserved_17_31:15;
- uint64_t ioccmdfl:1;
- uint64_t reserved_13_15:3;
- uint64_t iocdatfl:1;
- uint64_t reserved_9_11:3;
- uint64_t dutresfl:1;
- uint64_t reserved_5_7:3;
- uint64_t vrtfl:1;
- uint64_t reserved_1_3:3;
- uint64_t tdffl:1;
-#else
- uint64_t tdffl:1;
- uint64_t reserved_1_3:3;
- uint64_t vrtfl:1;
- uint64_t reserved_5_7:3;
- uint64_t dutresfl:1;
- uint64_t reserved_9_11:3;
- uint64_t iocdatfl:1;
- uint64_t reserved_13_15:3;
- uint64_t ioccmdfl:1;
- uint64_t reserved_17_31:15;
- uint64_t dutfl:6;
- uint64_t reserved_38_63:26;
-#endif
- } cn63xx;
- struct cvmx_l2c_bst_cn63xx cn63xxp1;
- struct cvmx_l2c_bst_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_42_63:22;
- uint64_t dutfl:10;
- uint64_t reserved_17_31:15;
- uint64_t ioccmdfl:1;
- uint64_t reserved_13_15:3;
- uint64_t iocdatfl:1;
- uint64_t reserved_9_11:3;
- uint64_t dutresfl:1;
- uint64_t reserved_5_7:3;
- uint64_t vrtfl:1;
- uint64_t reserved_1_3:3;
- uint64_t tdffl:1;
-#else
- uint64_t tdffl:1;
- uint64_t reserved_1_3:3;
- uint64_t vrtfl:1;
- uint64_t reserved_5_7:3;
- uint64_t dutresfl:1;
- uint64_t reserved_9_11:3;
- uint64_t iocdatfl:1;
- uint64_t reserved_13_15:3;
- uint64_t ioccmdfl:1;
- uint64_t reserved_17_31:15;
- uint64_t dutfl:10;
- uint64_t reserved_42_63:22;
-#endif
- } cn66xx;
- struct cvmx_l2c_bst_s cn68xx;
- struct cvmx_l2c_bst_s cn68xxp1;
- struct cvmx_l2c_bst_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_bst0 {
- uint64_t u64;
- struct cvmx_l2c_bst0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_24_63:40;
- uint64_t dtbnk:1;
- uint64_t wlb_msk:4;
- uint64_t dtcnt:13;
- uint64_t dt:1;
- uint64_t stin_msk:1;
- uint64_t wlb_dat:4;
-#else
- uint64_t wlb_dat:4;
- uint64_t stin_msk:1;
- uint64_t dt:1;
- uint64_t dtcnt:13;
- uint64_t wlb_msk:4;
- uint64_t dtbnk:1;
- uint64_t reserved_24_63:40;
-#endif
- } s;
- struct cvmx_l2c_bst0_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_23_63:41;
- uint64_t wlb_msk:4;
- uint64_t reserved_15_18:4;
- uint64_t dtcnt:9;
- uint64_t dt:1;
- uint64_t reserved_4_4:1;
- uint64_t wlb_dat:4;
-#else
- uint64_t wlb_dat:4;
- uint64_t reserved_4_4:1;
- uint64_t dt:1;
- uint64_t dtcnt:9;
- uint64_t reserved_15_18:4;
- uint64_t wlb_msk:4;
- uint64_t reserved_23_63:41;
-#endif
- } cn30xx;
- struct cvmx_l2c_bst0_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_23_63:41;
- uint64_t wlb_msk:4;
- uint64_t reserved_16_18:3;
- uint64_t dtcnt:10;
- uint64_t dt:1;
- uint64_t stin_msk:1;
- uint64_t wlb_dat:4;
-#else
- uint64_t wlb_dat:4;
- uint64_t stin_msk:1;
- uint64_t dt:1;
- uint64_t dtcnt:10;
- uint64_t reserved_16_18:3;
- uint64_t wlb_msk:4;
- uint64_t reserved_23_63:41;
-#endif
- } cn31xx;
- struct cvmx_l2c_bst0_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t dtcnt:13;
- uint64_t dt:1;
- uint64_t stin_msk:1;
- uint64_t wlb_dat:4;
-#else
- uint64_t wlb_dat:4;
- uint64_t stin_msk:1;
- uint64_t dt:1;
- uint64_t dtcnt:13;
- uint64_t reserved_19_63:45;
-#endif
- } cn38xx;
- struct cvmx_l2c_bst0_cn38xx cn38xxp2;
- struct cvmx_l2c_bst0_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_24_63:40;
- uint64_t dtbnk:1;
- uint64_t wlb_msk:4;
- uint64_t reserved_16_18:3;
- uint64_t dtcnt:10;
- uint64_t dt:1;
- uint64_t stin_msk:1;
- uint64_t wlb_dat:4;
-#else
- uint64_t wlb_dat:4;
- uint64_t stin_msk:1;
- uint64_t dt:1;
- uint64_t dtcnt:10;
- uint64_t reserved_16_18:3;
- uint64_t wlb_msk:4;
- uint64_t dtbnk:1;
- uint64_t reserved_24_63:40;
-#endif
- } cn50xx;
- struct cvmx_l2c_bst0_cn50xx cn52xx;
- struct cvmx_l2c_bst0_cn50xx cn52xxp1;
- struct cvmx_l2c_bst0_s cn56xx;
- struct cvmx_l2c_bst0_s cn56xxp1;
- struct cvmx_l2c_bst0_s cn58xx;
- struct cvmx_l2c_bst0_s cn58xxp1;
-};
-
-union cvmx_l2c_bst1 {
- uint64_t u64;
- struct cvmx_l2c_bst1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t l2t:9;
-#else
- uint64_t l2t:9;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_l2c_bst1_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t vwdf:4;
- uint64_t lrf:2;
- uint64_t vab_vwcf:1;
- uint64_t reserved_5_8:4;
- uint64_t l2t:5;
-#else
- uint64_t l2t:5;
- uint64_t reserved_5_8:4;
- uint64_t vab_vwcf:1;
- uint64_t lrf:2;
- uint64_t vwdf:4;
- uint64_t reserved_16_63:48;
-#endif
- } cn30xx;
- struct cvmx_l2c_bst1_cn30xx cn31xx;
- struct cvmx_l2c_bst1_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t vwdf:4;
- uint64_t lrf:2;
- uint64_t vab_vwcf:1;
- uint64_t l2t:9;
-#else
- uint64_t l2t:9;
- uint64_t vab_vwcf:1;
- uint64_t lrf:2;
- uint64_t vwdf:4;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_l2c_bst1_cn38xx cn38xxp2;
- struct cvmx_l2c_bst1_cn38xx cn50xx;
- struct cvmx_l2c_bst1_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t plc2:1;
- uint64_t plc1:1;
- uint64_t plc0:1;
- uint64_t vwdf:4;
- uint64_t reserved_11_11:1;
- uint64_t ilc:1;
- uint64_t vab_vwcf:1;
- uint64_t l2t:9;
-#else
- uint64_t l2t:9;
- uint64_t vab_vwcf:1;
- uint64_t ilc:1;
- uint64_t reserved_11_11:1;
- uint64_t vwdf:4;
- uint64_t plc0:1;
- uint64_t plc1:1;
- uint64_t plc2:1;
- uint64_t reserved_19_63:45;
-#endif
- } cn52xx;
- struct cvmx_l2c_bst1_cn52xx cn52xxp1;
- struct cvmx_l2c_bst1_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_24_63:40;
- uint64_t plc2:1;
- uint64_t plc1:1;
- uint64_t plc0:1;
- uint64_t ilc:1;
- uint64_t vwdf1:4;
- uint64_t vwdf0:4;
- uint64_t vab_vwcf1:1;
- uint64_t reserved_10_10:1;
- uint64_t vab_vwcf0:1;
- uint64_t l2t:9;
-#else
- uint64_t l2t:9;
- uint64_t vab_vwcf0:1;
- uint64_t reserved_10_10:1;
- uint64_t vab_vwcf1:1;
- uint64_t vwdf0:4;
- uint64_t vwdf1:4;
- uint64_t ilc:1;
- uint64_t plc0:1;
- uint64_t plc1:1;
- uint64_t plc2:1;
- uint64_t reserved_24_63:40;
-#endif
- } cn56xx;
- struct cvmx_l2c_bst1_cn56xx cn56xxp1;
- struct cvmx_l2c_bst1_cn38xx cn58xx;
- struct cvmx_l2c_bst1_cn38xx cn58xxp1;
-};
-
-union cvmx_l2c_bst2 {
- uint64_t u64;
- struct cvmx_l2c_bst2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t mrb:4;
- uint64_t reserved_4_11:8;
- uint64_t ipcbst:1;
- uint64_t picbst:1;
- uint64_t xrdmsk:1;
- uint64_t xrddat:1;
-#else
- uint64_t xrddat:1;
- uint64_t xrdmsk:1;
- uint64_t picbst:1;
- uint64_t ipcbst:1;
- uint64_t reserved_4_11:8;
- uint64_t mrb:4;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_l2c_bst2_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t mrb:4;
- uint64_t rmdf:4;
- uint64_t reserved_4_7:4;
- uint64_t ipcbst:1;
- uint64_t reserved_2_2:1;
- uint64_t xrdmsk:1;
- uint64_t xrddat:1;
-#else
- uint64_t xrddat:1;
- uint64_t xrdmsk:1;
- uint64_t reserved_2_2:1;
- uint64_t ipcbst:1;
- uint64_t reserved_4_7:4;
- uint64_t rmdf:4;
- uint64_t mrb:4;
- uint64_t reserved_16_63:48;
-#endif
- } cn30xx;
- struct cvmx_l2c_bst2_cn30xx cn31xx;
- struct cvmx_l2c_bst2_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t mrb:4;
- uint64_t rmdf:4;
- uint64_t rhdf:4;
- uint64_t ipcbst:1;
- uint64_t picbst:1;
- uint64_t xrdmsk:1;
- uint64_t xrddat:1;
-#else
- uint64_t xrddat:1;
- uint64_t xrdmsk:1;
- uint64_t picbst:1;
- uint64_t ipcbst:1;
- uint64_t rhdf:4;
- uint64_t rmdf:4;
- uint64_t mrb:4;
- uint64_t reserved_16_63:48;
-#endif
- } cn38xx;
- struct cvmx_l2c_bst2_cn38xx cn38xxp2;
- struct cvmx_l2c_bst2_cn30xx cn50xx;
- struct cvmx_l2c_bst2_cn30xx cn52xx;
- struct cvmx_l2c_bst2_cn30xx cn52xxp1;
- struct cvmx_l2c_bst2_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t mrb:4;
- uint64_t rmdb:4;
- uint64_t rhdb:4;
- uint64_t ipcbst:1;
- uint64_t picbst:1;
- uint64_t xrdmsk:1;
- uint64_t xrddat:1;
-#else
- uint64_t xrddat:1;
- uint64_t xrdmsk:1;
- uint64_t picbst:1;
- uint64_t ipcbst:1;
- uint64_t rhdb:4;
- uint64_t rmdb:4;
- uint64_t mrb:4;
- uint64_t reserved_16_63:48;
-#endif
- } cn56xx;
- struct cvmx_l2c_bst2_cn56xx cn56xxp1;
- struct cvmx_l2c_bst2_cn56xx cn58xx;
- struct cvmx_l2c_bst2_cn56xx cn58xxp1;
-};
-
-union cvmx_l2c_bst_memx {
- uint64_t u64;
- struct cvmx_l2c_bst_memx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t start_bist:1;
- uint64_t clear_bist:1;
- uint64_t reserved_5_61:57;
- uint64_t rdffl:1;
- uint64_t vbffl:4;
-#else
- uint64_t vbffl:4;
- uint64_t rdffl:1;
- uint64_t reserved_5_61:57;
- uint64_t clear_bist:1;
- uint64_t start_bist:1;
-#endif
- } s;
- struct cvmx_l2c_bst_memx_s cn61xx;
- struct cvmx_l2c_bst_memx_s cn63xx;
- struct cvmx_l2c_bst_memx_s cn63xxp1;
- struct cvmx_l2c_bst_memx_s cn66xx;
- struct cvmx_l2c_bst_memx_s cn68xx;
- struct cvmx_l2c_bst_memx_s cn68xxp1;
- struct cvmx_l2c_bst_memx_s cnf71xx;
-};
-
-union cvmx_l2c_bst_tdtx {
- uint64_t u64;
- struct cvmx_l2c_bst_tdtx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t fbfrspfl:8;
- uint64_t sbffl:8;
- uint64_t fbffl:8;
- uint64_t l2dfl:8;
-#else
- uint64_t l2dfl:8;
- uint64_t fbffl:8;
- uint64_t sbffl:8;
- uint64_t fbfrspfl:8;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_l2c_bst_tdtx_s cn61xx;
- struct cvmx_l2c_bst_tdtx_s cn63xx;
- struct cvmx_l2c_bst_tdtx_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_24_63:40;
- uint64_t sbffl:8;
- uint64_t fbffl:8;
- uint64_t l2dfl:8;
-#else
- uint64_t l2dfl:8;
- uint64_t fbffl:8;
- uint64_t sbffl:8;
- uint64_t reserved_24_63:40;
-#endif
- } cn63xxp1;
- struct cvmx_l2c_bst_tdtx_s cn66xx;
- struct cvmx_l2c_bst_tdtx_s cn68xx;
- struct cvmx_l2c_bst_tdtx_s cn68xxp1;
- struct cvmx_l2c_bst_tdtx_s cnf71xx;
-};
+#define CVMX_L2C_TADX_PFCX(offset, block_id) \
+ (CVMX_ADD_IO_SEG(0x0001180080A00400ull) + (((offset) & 3) + \
+ ((block_id) & 7) * 0x8000ull) * 8)
+#define CVMX_L2C_TADX_PFC0(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00400ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_TADX_PFC1(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00408ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_TADX_PFC2(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00410ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_TADX_PFC3(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00418ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_TADX_PRF(offset) (CVMX_ADD_IO_SEG(0x0001180080A00008ull) + \
+ ((offset) & 7) * 0x40000ull)
+#define CVMX_L2C_TADX_TAG(block_id) (CVMX_ADD_IO_SEG(0x0001180080A00010ull) + \
+ ((block_id) & 3) * 0x40000ull)
+#define CVMX_L2C_WPAR_IOBX(offset) (CVMX_ADD_IO_SEG(0x0001180080840200ull) + \
+ ((offset) & 1) * 8)
+#define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + \
+ ((offset) & 31) * 8)
+#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
-union cvmx_l2c_bst_ttgx {
- uint64_t u64;
- struct cvmx_l2c_bst_ttgx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t lrufl:1;
- uint64_t tagfl:16;
-#else
- uint64_t tagfl:16;
- uint64_t lrufl:1;
- uint64_t reserved_17_63:47;
-#endif
- } s;
- struct cvmx_l2c_bst_ttgx_s cn61xx;
- struct cvmx_l2c_bst_ttgx_s cn63xx;
- struct cvmx_l2c_bst_ttgx_s cn63xxp1;
- struct cvmx_l2c_bst_ttgx_s cn66xx;
- struct cvmx_l2c_bst_ttgx_s cn68xx;
- struct cvmx_l2c_bst_ttgx_s cn68xxp1;
- struct cvmx_l2c_bst_ttgx_s cnf71xx;
-};
union cvmx_l2c_cfg {
uint64_t u64;
struct cvmx_l2c_cfg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t bstrun:1;
- uint64_t lbist:1;
- uint64_t xor_bank:1;
- uint64_t dpres1:1;
- uint64_t dpres0:1;
- uint64_t dfill_dis:1;
- uint64_t fpexp:4;
- uint64_t fpempty:1;
- uint64_t fpen:1;
- uint64_t idxalias:1;
- uint64_t mwf_crd:4;
- uint64_t rsp_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t lrf_arb_mode:1;
-#else
- uint64_t lrf_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t mwf_crd:4;
- uint64_t idxalias:1;
- uint64_t fpen:1;
- uint64_t fpempty:1;
- uint64_t fpexp:4;
- uint64_t dfill_dis:1;
- uint64_t dpres0:1;
- uint64_t dpres1:1;
- uint64_t xor_bank:1;
- uint64_t lbist:1;
- uint64_t bstrun:1;
- uint64_t reserved_20_63:44;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_20_63:44,
+ __BITFIELD_FIELD(uint64_t bstrun:1,
+ __BITFIELD_FIELD(uint64_t lbist:1,
+ __BITFIELD_FIELD(uint64_t xor_bank:1,
+ __BITFIELD_FIELD(uint64_t dpres1:1,
+ __BITFIELD_FIELD(uint64_t dpres0:1,
+ __BITFIELD_FIELD(uint64_t dfill_dis:1,
+ __BITFIELD_FIELD(uint64_t fpexp:4,
+ __BITFIELD_FIELD(uint64_t fpempty:1,
+ __BITFIELD_FIELD(uint64_t fpen:1,
+ __BITFIELD_FIELD(uint64_t idxalias:1,
+ __BITFIELD_FIELD(uint64_t mwf_crd:4,
+ __BITFIELD_FIELD(uint64_t rsp_arb_mode:1,
+ __BITFIELD_FIELD(uint64_t rfb_arb_mode:1,
+ __BITFIELD_FIELD(uint64_t lrf_arb_mode:1,
+ ;)))))))))))))))
} s;
- struct cvmx_l2c_cfg_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t fpexp:4;
- uint64_t fpempty:1;
- uint64_t fpen:1;
- uint64_t idxalias:1;
- uint64_t mwf_crd:4;
- uint64_t rsp_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t lrf_arb_mode:1;
-#else
- uint64_t lrf_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t mwf_crd:4;
- uint64_t idxalias:1;
- uint64_t fpen:1;
- uint64_t fpempty:1;
- uint64_t fpexp:4;
- uint64_t reserved_14_63:50;
-#endif
- } cn30xx;
- struct cvmx_l2c_cfg_cn30xx cn31xx;
- struct cvmx_l2c_cfg_cn30xx cn38xx;
- struct cvmx_l2c_cfg_cn30xx cn38xxp2;
- struct cvmx_l2c_cfg_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t bstrun:1;
- uint64_t lbist:1;
- uint64_t reserved_14_17:4;
- uint64_t fpexp:4;
- uint64_t fpempty:1;
- uint64_t fpen:1;
- uint64_t idxalias:1;
- uint64_t mwf_crd:4;
- uint64_t rsp_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t lrf_arb_mode:1;
-#else
- uint64_t lrf_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t mwf_crd:4;
- uint64_t idxalias:1;
- uint64_t fpen:1;
- uint64_t fpempty:1;
- uint64_t fpexp:4;
- uint64_t reserved_14_17:4;
- uint64_t lbist:1;
- uint64_t bstrun:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn50xx;
- struct cvmx_l2c_cfg_cn50xx cn52xx;
- struct cvmx_l2c_cfg_cn50xx cn52xxp1;
- struct cvmx_l2c_cfg_s cn56xx;
- struct cvmx_l2c_cfg_s cn56xxp1;
- struct cvmx_l2c_cfg_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t bstrun:1;
- uint64_t lbist:1;
- uint64_t reserved_15_17:3;
- uint64_t dfill_dis:1;
- uint64_t fpexp:4;
- uint64_t fpempty:1;
- uint64_t fpen:1;
- uint64_t idxalias:1;
- uint64_t mwf_crd:4;
- uint64_t rsp_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t lrf_arb_mode:1;
-#else
- uint64_t lrf_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t mwf_crd:4;
- uint64_t idxalias:1;
- uint64_t fpen:1;
- uint64_t fpempty:1;
- uint64_t fpexp:4;
- uint64_t dfill_dis:1;
- uint64_t reserved_15_17:3;
- uint64_t lbist:1;
- uint64_t bstrun:1;
- uint64_t reserved_20_63:44;
-#endif
- } cn58xx;
- struct cvmx_l2c_cfg_cn58xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t dfill_dis:1;
- uint64_t fpexp:4;
- uint64_t fpempty:1;
- uint64_t fpen:1;
- uint64_t idxalias:1;
- uint64_t mwf_crd:4;
- uint64_t rsp_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t lrf_arb_mode:1;
-#else
- uint64_t lrf_arb_mode:1;
- uint64_t rfb_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t mwf_crd:4;
- uint64_t idxalias:1;
- uint64_t fpen:1;
- uint64_t fpempty:1;
- uint64_t fpexp:4;
- uint64_t dfill_dis:1;
- uint64_t reserved_15_63:49;
-#endif
- } cn58xxp1;
-};
-
-union cvmx_l2c_cop0_mapx {
- uint64_t u64;
- struct cvmx_l2c_cop0_mapx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t data:64;
-#else
- uint64_t data:64;
-#endif
- } s;
- struct cvmx_l2c_cop0_mapx_s cn61xx;
- struct cvmx_l2c_cop0_mapx_s cn63xx;
- struct cvmx_l2c_cop0_mapx_s cn63xxp1;
- struct cvmx_l2c_cop0_mapx_s cn66xx;
- struct cvmx_l2c_cop0_mapx_s cn68xx;
- struct cvmx_l2c_cop0_mapx_s cn68xxp1;
- struct cvmx_l2c_cop0_mapx_s cnf71xx;
};
union cvmx_l2c_ctl {
uint64_t u64;
struct cvmx_l2c_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_30_63:34;
- uint64_t sepcmt:1;
- uint64_t rdf_fast:1;
- uint64_t disstgl2i:1;
- uint64_t l2dfsbe:1;
- uint64_t l2dfdbe:1;
- uint64_t discclk:1;
- uint64_t maxvab:4;
- uint64_t maxlfb:4;
- uint64_t rsp_arb_mode:1;
- uint64_t xmc_arb_mode:1;
- uint64_t ef_ena:1;
- uint64_t ef_cnt:7;
- uint64_t vab_thresh:4;
- uint64_t disecc:1;
- uint64_t disidxalias:1;
-#else
- uint64_t disidxalias:1;
- uint64_t disecc:1;
- uint64_t vab_thresh:4;
- uint64_t ef_cnt:7;
- uint64_t ef_ena:1;
- uint64_t xmc_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t maxlfb:4;
- uint64_t maxvab:4;
- uint64_t discclk:1;
- uint64_t l2dfdbe:1;
- uint64_t l2dfsbe:1;
- uint64_t disstgl2i:1;
- uint64_t rdf_fast:1;
- uint64_t sepcmt:1;
- uint64_t reserved_30_63:34;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_30_63:34,
+ __BITFIELD_FIELD(uint64_t sepcmt:1,
+ __BITFIELD_FIELD(uint64_t rdf_fast:1,
+ __BITFIELD_FIELD(uint64_t disstgl2i:1,
+ __BITFIELD_FIELD(uint64_t l2dfsbe:1,
+ __BITFIELD_FIELD(uint64_t l2dfdbe:1,
+ __BITFIELD_FIELD(uint64_t discclk:1,
+ __BITFIELD_FIELD(uint64_t maxvab:4,
+ __BITFIELD_FIELD(uint64_t maxlfb:4,
+ __BITFIELD_FIELD(uint64_t rsp_arb_mode:1,
+ __BITFIELD_FIELD(uint64_t xmc_arb_mode:1,
+ __BITFIELD_FIELD(uint64_t ef_ena:1,
+ __BITFIELD_FIELD(uint64_t ef_cnt:7,
+ __BITFIELD_FIELD(uint64_t vab_thresh:4,
+ __BITFIELD_FIELD(uint64_t disecc:1,
+ __BITFIELD_FIELD(uint64_t disidxalias:1,
+ ;))))))))))))))))
} s;
- struct cvmx_l2c_ctl_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t rdf_fast:1;
- uint64_t disstgl2i:1;
- uint64_t l2dfsbe:1;
- uint64_t l2dfdbe:1;
- uint64_t discclk:1;
- uint64_t maxvab:4;
- uint64_t maxlfb:4;
- uint64_t rsp_arb_mode:1;
- uint64_t xmc_arb_mode:1;
- uint64_t ef_ena:1;
- uint64_t ef_cnt:7;
- uint64_t vab_thresh:4;
- uint64_t disecc:1;
- uint64_t disidxalias:1;
-#else
- uint64_t disidxalias:1;
- uint64_t disecc:1;
- uint64_t vab_thresh:4;
- uint64_t ef_cnt:7;
- uint64_t ef_ena:1;
- uint64_t xmc_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t maxlfb:4;
- uint64_t maxvab:4;
- uint64_t discclk:1;
- uint64_t l2dfdbe:1;
- uint64_t l2dfsbe:1;
- uint64_t disstgl2i:1;
- uint64_t rdf_fast:1;
- uint64_t reserved_29_63:35;
-#endif
- } cn61xx;
- struct cvmx_l2c_ctl_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_28_63:36;
- uint64_t disstgl2i:1;
- uint64_t l2dfsbe:1;
- uint64_t l2dfdbe:1;
- uint64_t discclk:1;
- uint64_t maxvab:4;
- uint64_t maxlfb:4;
- uint64_t rsp_arb_mode:1;
- uint64_t xmc_arb_mode:1;
- uint64_t ef_ena:1;
- uint64_t ef_cnt:7;
- uint64_t vab_thresh:4;
- uint64_t disecc:1;
- uint64_t disidxalias:1;
-#else
- uint64_t disidxalias:1;
- uint64_t disecc:1;
- uint64_t vab_thresh:4;
- uint64_t ef_cnt:7;
- uint64_t ef_ena:1;
- uint64_t xmc_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t maxlfb:4;
- uint64_t maxvab:4;
- uint64_t discclk:1;
- uint64_t l2dfdbe:1;
- uint64_t l2dfsbe:1;
- uint64_t disstgl2i:1;
- uint64_t reserved_28_63:36;
-#endif
- } cn63xx;
- struct cvmx_l2c_ctl_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_25_63:39;
- uint64_t discclk:1;
- uint64_t maxvab:4;
- uint64_t maxlfb:4;
- uint64_t rsp_arb_mode:1;
- uint64_t xmc_arb_mode:1;
- uint64_t ef_ena:1;
- uint64_t ef_cnt:7;
- uint64_t vab_thresh:4;
- uint64_t disecc:1;
- uint64_t disidxalias:1;
-#else
- uint64_t disidxalias:1;
- uint64_t disecc:1;
- uint64_t vab_thresh:4;
- uint64_t ef_cnt:7;
- uint64_t ef_ena:1;
- uint64_t xmc_arb_mode:1;
- uint64_t rsp_arb_mode:1;
- uint64_t maxlfb:4;
- uint64_t maxvab:4;
- uint64_t discclk:1;
- uint64_t reserved_25_63:39;
-#endif
- } cn63xxp1;
- struct cvmx_l2c_ctl_cn61xx cn66xx;
- struct cvmx_l2c_ctl_s cn68xx;
- struct cvmx_l2c_ctl_cn63xx cn68xxp1;
- struct cvmx_l2c_ctl_cn61xx cnf71xx;
};
union cvmx_l2c_dbg {
uint64_t u64;
struct cvmx_l2c_dbg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_15_63:49;
- uint64_t lfb_enum:4;
- uint64_t lfb_dmp:1;
- uint64_t ppnum:4;
- uint64_t set:3;
- uint64_t finv:1;
- uint64_t l2d:1;
- uint64_t l2t:1;
-#else
- uint64_t l2t:1;
- uint64_t l2d:1;
- uint64_t finv:1;
- uint64_t set:3;
- uint64_t ppnum:4;
- uint64_t lfb_dmp:1;
- uint64_t lfb_enum:4;
- uint64_t reserved_15_63:49;
-#endif
- } s;
- struct cvmx_l2c_dbg_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_13_63:51;
- uint64_t lfb_enum:2;
- uint64_t lfb_dmp:1;
- uint64_t reserved_7_9:3;
- uint64_t ppnum:1;
- uint64_t reserved_5_5:1;
- uint64_t set:2;
- uint64_t finv:1;
- uint64_t l2d:1;
- uint64_t l2t:1;
-#else
- uint64_t l2t:1;
- uint64_t l2d:1;
- uint64_t finv:1;
- uint64_t set:2;
- uint64_t reserved_5_5:1;
- uint64_t ppnum:1;
- uint64_t reserved_7_9:3;
- uint64_t lfb_dmp:1;
- uint64_t lfb_enum:2;
- uint64_t reserved_13_63:51;
-#endif
- } cn30xx;
- struct cvmx_l2c_dbg_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t lfb_enum:3;
- uint64_t lfb_dmp:1;
- uint64_t reserved_7_9:3;
- uint64_t ppnum:1;
- uint64_t reserved_5_5:1;
- uint64_t set:2;
- uint64_t finv:1;
- uint64_t l2d:1;
- uint64_t l2t:1;
-#else
- uint64_t l2t:1;
- uint64_t l2d:1;
- uint64_t finv:1;
- uint64_t set:2;
- uint64_t reserved_5_5:1;
- uint64_t ppnum:1;
- uint64_t reserved_7_9:3;
- uint64_t lfb_dmp:1;
- uint64_t lfb_enum:3;
- uint64_t reserved_14_63:50;
-#endif
- } cn31xx;
- struct cvmx_l2c_dbg_s cn38xx;
- struct cvmx_l2c_dbg_s cn38xxp2;
- struct cvmx_l2c_dbg_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t lfb_enum:3;
- uint64_t lfb_dmp:1;
- uint64_t reserved_7_9:3;
- uint64_t ppnum:1;
- uint64_t set:3;
- uint64_t finv:1;
- uint64_t l2d:1;
- uint64_t l2t:1;
-#else
- uint64_t l2t:1;
- uint64_t l2d:1;
- uint64_t finv:1;
- uint64_t set:3;
- uint64_t ppnum:1;
- uint64_t reserved_7_9:3;
- uint64_t lfb_dmp:1;
- uint64_t lfb_enum:3;
- uint64_t reserved_14_63:50;
-#endif
- } cn50xx;
- struct cvmx_l2c_dbg_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t lfb_enum:3;
- uint64_t lfb_dmp:1;
- uint64_t reserved_8_9:2;
- uint64_t ppnum:2;
- uint64_t set:3;
- uint64_t finv:1;
- uint64_t l2d:1;
- uint64_t l2t:1;
-#else
- uint64_t l2t:1;
- uint64_t l2d:1;
- uint64_t finv:1;
- uint64_t set:3;
- uint64_t ppnum:2;
- uint64_t reserved_8_9:2;
- uint64_t lfb_dmp:1;
- uint64_t lfb_enum:3;
- uint64_t reserved_14_63:50;
-#endif
- } cn52xx;
- struct cvmx_l2c_dbg_cn52xx cn52xxp1;
- struct cvmx_l2c_dbg_s cn56xx;
- struct cvmx_l2c_dbg_s cn56xxp1;
- struct cvmx_l2c_dbg_s cn58xx;
- struct cvmx_l2c_dbg_s cn58xxp1;
-};
-
-union cvmx_l2c_dut {
- uint64_t u64;
- struct cvmx_l2c_dut_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t dtena:1;
- uint64_t reserved_30_30:1;
- uint64_t dt_vld:1;
- uint64_t dt_tag:29;
-#else
- uint64_t dt_tag:29;
- uint64_t dt_vld:1;
- uint64_t reserved_30_30:1;
- uint64_t dtena:1;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_l2c_dut_s cn30xx;
- struct cvmx_l2c_dut_s cn31xx;
- struct cvmx_l2c_dut_s cn38xx;
- struct cvmx_l2c_dut_s cn38xxp2;
- struct cvmx_l2c_dut_s cn50xx;
- struct cvmx_l2c_dut_s cn52xx;
- struct cvmx_l2c_dut_s cn52xxp1;
- struct cvmx_l2c_dut_s cn56xx;
- struct cvmx_l2c_dut_s cn56xxp1;
- struct cvmx_l2c_dut_s cn58xx;
- struct cvmx_l2c_dut_s cn58xxp1;
-};
-
-union cvmx_l2c_dut_mapx {
- uint64_t u64;
- struct cvmx_l2c_dut_mapx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_38_63:26;
- uint64_t tag:28;
- uint64_t reserved_1_9:9;
- uint64_t valid:1;
-#else
- uint64_t valid:1;
- uint64_t reserved_1_9:9;
- uint64_t tag:28;
- uint64_t reserved_38_63:26;
-#endif
- } s;
- struct cvmx_l2c_dut_mapx_s cn61xx;
- struct cvmx_l2c_dut_mapx_s cn63xx;
- struct cvmx_l2c_dut_mapx_s cn63xxp1;
- struct cvmx_l2c_dut_mapx_s cn66xx;
- struct cvmx_l2c_dut_mapx_s cn68xx;
- struct cvmx_l2c_dut_mapx_s cn68xxp1;
- struct cvmx_l2c_dut_mapx_s cnf71xx;
-};
-
-union cvmx_l2c_err_tdtx {
- uint64_t u64;
- struct cvmx_l2c_err_tdtx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t dbe:1;
- uint64_t sbe:1;
- uint64_t vdbe:1;
- uint64_t vsbe:1;
- uint64_t syn:10;
- uint64_t reserved_22_49:28;
- uint64_t wayidx:18;
- uint64_t reserved_2_3:2;
- uint64_t type:2;
-#else
- uint64_t type:2;
- uint64_t reserved_2_3:2;
- uint64_t wayidx:18;
- uint64_t reserved_22_49:28;
- uint64_t syn:10;
- uint64_t vsbe:1;
- uint64_t vdbe:1;
- uint64_t sbe:1;
- uint64_t dbe:1;
-#endif
- } s;
- struct cvmx_l2c_err_tdtx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t dbe:1;
- uint64_t sbe:1;
- uint64_t vdbe:1;
- uint64_t vsbe:1;
- uint64_t syn:10;
- uint64_t reserved_20_49:30;
- uint64_t wayidx:16;
- uint64_t reserved_2_3:2;
- uint64_t type:2;
-#else
- uint64_t type:2;
- uint64_t reserved_2_3:2;
- uint64_t wayidx:16;
- uint64_t reserved_20_49:30;
- uint64_t syn:10;
- uint64_t vsbe:1;
- uint64_t vdbe:1;
- uint64_t sbe:1;
- uint64_t dbe:1;
-#endif
- } cn61xx;
- struct cvmx_l2c_err_tdtx_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t dbe:1;
- uint64_t sbe:1;
- uint64_t vdbe:1;
- uint64_t vsbe:1;
- uint64_t syn:10;
- uint64_t reserved_21_49:29;
- uint64_t wayidx:17;
- uint64_t reserved_2_3:2;
- uint64_t type:2;
-#else
- uint64_t type:2;
- uint64_t reserved_2_3:2;
- uint64_t wayidx:17;
- uint64_t reserved_21_49:29;
- uint64_t syn:10;
- uint64_t vsbe:1;
- uint64_t vdbe:1;
- uint64_t sbe:1;
- uint64_t dbe:1;
-#endif
- } cn63xx;
- struct cvmx_l2c_err_tdtx_cn63xx cn63xxp1;
- struct cvmx_l2c_err_tdtx_cn63xx cn66xx;
- struct cvmx_l2c_err_tdtx_s cn68xx;
- struct cvmx_l2c_err_tdtx_s cn68xxp1;
- struct cvmx_l2c_err_tdtx_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_err_ttgx {
- uint64_t u64;
- struct cvmx_l2c_err_ttgx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t dbe:1;
- uint64_t sbe:1;
- uint64_t noway:1;
- uint64_t reserved_56_60:5;
- uint64_t syn:6;
- uint64_t reserved_22_49:28;
- uint64_t wayidx:15;
- uint64_t reserved_2_6:5;
- uint64_t type:2;
-#else
- uint64_t type:2;
- uint64_t reserved_2_6:5;
- uint64_t wayidx:15;
- uint64_t reserved_22_49:28;
- uint64_t syn:6;
- uint64_t reserved_56_60:5;
- uint64_t noway:1;
- uint64_t sbe:1;
- uint64_t dbe:1;
-#endif
- } s;
- struct cvmx_l2c_err_ttgx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t dbe:1;
- uint64_t sbe:1;
- uint64_t noway:1;
- uint64_t reserved_56_60:5;
- uint64_t syn:6;
- uint64_t reserved_20_49:30;
- uint64_t wayidx:13;
- uint64_t reserved_2_6:5;
- uint64_t type:2;
-#else
- uint64_t type:2;
- uint64_t reserved_2_6:5;
- uint64_t wayidx:13;
- uint64_t reserved_20_49:30;
- uint64_t syn:6;
- uint64_t reserved_56_60:5;
- uint64_t noway:1;
- uint64_t sbe:1;
- uint64_t dbe:1;
-#endif
- } cn61xx;
- struct cvmx_l2c_err_ttgx_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t dbe:1;
- uint64_t sbe:1;
- uint64_t noway:1;
- uint64_t reserved_56_60:5;
- uint64_t syn:6;
- uint64_t reserved_21_49:29;
- uint64_t wayidx:14;
- uint64_t reserved_2_6:5;
- uint64_t type:2;
-#else
- uint64_t type:2;
- uint64_t reserved_2_6:5;
- uint64_t wayidx:14;
- uint64_t reserved_21_49:29;
- uint64_t syn:6;
- uint64_t reserved_56_60:5;
- uint64_t noway:1;
- uint64_t sbe:1;
- uint64_t dbe:1;
-#endif
- } cn63xx;
- struct cvmx_l2c_err_ttgx_cn63xx cn63xxp1;
- struct cvmx_l2c_err_ttgx_cn63xx cn66xx;
- struct cvmx_l2c_err_ttgx_s cn68xx;
- struct cvmx_l2c_err_ttgx_s cn68xxp1;
- struct cvmx_l2c_err_ttgx_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_err_vbfx {
- uint64_t u64;
- struct cvmx_l2c_err_vbfx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t vdbe:1;
- uint64_t vsbe:1;
- uint64_t vsyn:10;
- uint64_t reserved_2_49:48;
- uint64_t type:2;
-#else
- uint64_t type:2;
- uint64_t reserved_2_49:48;
- uint64_t vsyn:10;
- uint64_t vsbe:1;
- uint64_t vdbe:1;
- uint64_t reserved_62_63:2;
-#endif
- } s;
- struct cvmx_l2c_err_vbfx_s cn61xx;
- struct cvmx_l2c_err_vbfx_s cn63xx;
- struct cvmx_l2c_err_vbfx_s cn63xxp1;
- struct cvmx_l2c_err_vbfx_s cn66xx;
- struct cvmx_l2c_err_vbfx_s cn68xx;
- struct cvmx_l2c_err_vbfx_s cn68xxp1;
- struct cvmx_l2c_err_vbfx_s cnf71xx;
-};
-
-union cvmx_l2c_err_xmc {
- uint64_t u64;
- struct cvmx_l2c_err_xmc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t cmd:6;
- uint64_t reserved_54_57:4;
- uint64_t sid:6;
- uint64_t reserved_38_47:10;
- uint64_t addr:38;
-#else
- uint64_t addr:38;
- uint64_t reserved_38_47:10;
- uint64_t sid:6;
- uint64_t reserved_54_57:4;
- uint64_t cmd:6;
-#endif
- } s;
- struct cvmx_l2c_err_xmc_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t cmd:6;
- uint64_t reserved_52_57:6;
- uint64_t sid:4;
- uint64_t reserved_38_47:10;
- uint64_t addr:38;
-#else
- uint64_t addr:38;
- uint64_t reserved_38_47:10;
- uint64_t sid:4;
- uint64_t reserved_52_57:6;
- uint64_t cmd:6;
-#endif
- } cn61xx;
- struct cvmx_l2c_err_xmc_cn61xx cn63xx;
- struct cvmx_l2c_err_xmc_cn61xx cn63xxp1;
- struct cvmx_l2c_err_xmc_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t cmd:6;
- uint64_t reserved_53_57:5;
- uint64_t sid:5;
- uint64_t reserved_38_47:10;
- uint64_t addr:38;
-#else
- uint64_t addr:38;
- uint64_t reserved_38_47:10;
- uint64_t sid:5;
- uint64_t reserved_53_57:5;
- uint64_t cmd:6;
-#endif
- } cn66xx;
- struct cvmx_l2c_err_xmc_s cn68xx;
- struct cvmx_l2c_err_xmc_s cn68xxp1;
- struct cvmx_l2c_err_xmc_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_grpwrr0 {
- uint64_t u64;
- struct cvmx_l2c_grpwrr0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t plc1rmsk:32;
- uint64_t plc0rmsk:32;
-#else
- uint64_t plc0rmsk:32;
- uint64_t plc1rmsk:32;
-#endif
- } s;
- struct cvmx_l2c_grpwrr0_s cn52xx;
- struct cvmx_l2c_grpwrr0_s cn52xxp1;
- struct cvmx_l2c_grpwrr0_s cn56xx;
- struct cvmx_l2c_grpwrr0_s cn56xxp1;
-};
-
-union cvmx_l2c_grpwrr1 {
- uint64_t u64;
- struct cvmx_l2c_grpwrr1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t ilcrmsk:32;
- uint64_t plc2rmsk:32;
-#else
- uint64_t plc2rmsk:32;
- uint64_t ilcrmsk:32;
-#endif
- } s;
- struct cvmx_l2c_grpwrr1_s cn52xx;
- struct cvmx_l2c_grpwrr1_s cn52xxp1;
- struct cvmx_l2c_grpwrr1_s cn56xx;
- struct cvmx_l2c_grpwrr1_s cn56xxp1;
-};
-
-union cvmx_l2c_int_en {
- uint64_t u64;
- struct cvmx_l2c_int_en_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t lck2ena:1;
- uint64_t lckena:1;
- uint64_t l2ddeden:1;
- uint64_t l2dsecen:1;
- uint64_t l2tdeden:1;
- uint64_t l2tsecen:1;
- uint64_t oob3en:1;
- uint64_t oob2en:1;
- uint64_t oob1en:1;
-#else
- uint64_t oob1en:1;
- uint64_t oob2en:1;
- uint64_t oob3en:1;
- uint64_t l2tsecen:1;
- uint64_t l2tdeden:1;
- uint64_t l2dsecen:1;
- uint64_t l2ddeden:1;
- uint64_t lckena:1;
- uint64_t lck2ena:1;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_l2c_int_en_s cn52xx;
- struct cvmx_l2c_int_en_s cn52xxp1;
- struct cvmx_l2c_int_en_s cn56xx;
- struct cvmx_l2c_int_en_s cn56xxp1;
-};
-
-union cvmx_l2c_int_ena {
- uint64_t u64;
- struct cvmx_l2c_int_ena_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t bigrd:1;
- uint64_t bigwr:1;
- uint64_t vrtpe:1;
- uint64_t vrtadrng:1;
- uint64_t vrtidrng:1;
- uint64_t vrtwr:1;
- uint64_t holewr:1;
- uint64_t holerd:1;
-#else
- uint64_t holerd:1;
- uint64_t holewr:1;
- uint64_t vrtwr:1;
- uint64_t vrtidrng:1;
- uint64_t vrtadrng:1;
- uint64_t vrtpe:1;
- uint64_t bigwr:1;
- uint64_t bigrd:1;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_l2c_int_ena_s cn61xx;
- struct cvmx_l2c_int_ena_s cn63xx;
- struct cvmx_l2c_int_ena_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t vrtpe:1;
- uint64_t vrtadrng:1;
- uint64_t vrtidrng:1;
- uint64_t vrtwr:1;
- uint64_t holewr:1;
- uint64_t holerd:1;
-#else
- uint64_t holerd:1;
- uint64_t holewr:1;
- uint64_t vrtwr:1;
- uint64_t vrtidrng:1;
- uint64_t vrtadrng:1;
- uint64_t vrtpe:1;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xxp1;
- struct cvmx_l2c_int_ena_s cn66xx;
- struct cvmx_l2c_int_ena_s cn68xx;
- struct cvmx_l2c_int_ena_s cn68xxp1;
- struct cvmx_l2c_int_ena_s cnf71xx;
-};
-
-union cvmx_l2c_int_reg {
- uint64_t u64;
- struct cvmx_l2c_int_reg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t tad3:1;
- uint64_t tad2:1;
- uint64_t tad1:1;
- uint64_t tad0:1;
- uint64_t reserved_8_15:8;
- uint64_t bigrd:1;
- uint64_t bigwr:1;
- uint64_t vrtpe:1;
- uint64_t vrtadrng:1;
- uint64_t vrtidrng:1;
- uint64_t vrtwr:1;
- uint64_t holewr:1;
- uint64_t holerd:1;
-#else
- uint64_t holerd:1;
- uint64_t holewr:1;
- uint64_t vrtwr:1;
- uint64_t vrtidrng:1;
- uint64_t vrtadrng:1;
- uint64_t vrtpe:1;
- uint64_t bigwr:1;
- uint64_t bigrd:1;
- uint64_t reserved_8_15:8;
- uint64_t tad0:1;
- uint64_t tad1:1;
- uint64_t tad2:1;
- uint64_t tad3:1;
- uint64_t reserved_20_63:44;
-#endif
- } s;
- struct cvmx_l2c_int_reg_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t tad0:1;
- uint64_t reserved_8_15:8;
- uint64_t bigrd:1;
- uint64_t bigwr:1;
- uint64_t vrtpe:1;
- uint64_t vrtadrng:1;
- uint64_t vrtidrng:1;
- uint64_t vrtwr:1;
- uint64_t holewr:1;
- uint64_t holerd:1;
-#else
- uint64_t holerd:1;
- uint64_t holewr:1;
- uint64_t vrtwr:1;
- uint64_t vrtidrng:1;
- uint64_t vrtadrng:1;
- uint64_t vrtpe:1;
- uint64_t bigwr:1;
- uint64_t bigrd:1;
- uint64_t reserved_8_15:8;
- uint64_t tad0:1;
- uint64_t reserved_17_63:47;
-#endif
- } cn61xx;
- struct cvmx_l2c_int_reg_cn61xx cn63xx;
- struct cvmx_l2c_int_reg_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t tad0:1;
- uint64_t reserved_6_15:10;
- uint64_t vrtpe:1;
- uint64_t vrtadrng:1;
- uint64_t vrtidrng:1;
- uint64_t vrtwr:1;
- uint64_t holewr:1;
- uint64_t holerd:1;
-#else
- uint64_t holerd:1;
- uint64_t holewr:1;
- uint64_t vrtwr:1;
- uint64_t vrtidrng:1;
- uint64_t vrtadrng:1;
- uint64_t vrtpe:1;
- uint64_t reserved_6_15:10;
- uint64_t tad0:1;
- uint64_t reserved_17_63:47;
-#endif
- } cn63xxp1;
- struct cvmx_l2c_int_reg_cn61xx cn66xx;
- struct cvmx_l2c_int_reg_s cn68xx;
- struct cvmx_l2c_int_reg_s cn68xxp1;
- struct cvmx_l2c_int_reg_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_int_stat {
- uint64_t u64;
- struct cvmx_l2c_int_stat_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t lck2:1;
- uint64_t lck:1;
- uint64_t l2dded:1;
- uint64_t l2dsec:1;
- uint64_t l2tded:1;
- uint64_t l2tsec:1;
- uint64_t oob3:1;
- uint64_t oob2:1;
- uint64_t oob1:1;
-#else
- uint64_t oob1:1;
- uint64_t oob2:1;
- uint64_t oob3:1;
- uint64_t l2tsec:1;
- uint64_t l2tded:1;
- uint64_t l2dsec:1;
- uint64_t l2dded:1;
- uint64_t lck:1;
- uint64_t lck2:1;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_l2c_int_stat_s cn52xx;
- struct cvmx_l2c_int_stat_s cn52xxp1;
- struct cvmx_l2c_int_stat_s cn56xx;
- struct cvmx_l2c_int_stat_s cn56xxp1;
-};
-
-union cvmx_l2c_iocx_pfc {
- uint64_t u64;
- struct cvmx_l2c_iocx_pfc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_iocx_pfc_s cn61xx;
- struct cvmx_l2c_iocx_pfc_s cn63xx;
- struct cvmx_l2c_iocx_pfc_s cn63xxp1;
- struct cvmx_l2c_iocx_pfc_s cn66xx;
- struct cvmx_l2c_iocx_pfc_s cn68xx;
- struct cvmx_l2c_iocx_pfc_s cn68xxp1;
- struct cvmx_l2c_iocx_pfc_s cnf71xx;
-};
-
-union cvmx_l2c_iorx_pfc {
- uint64_t u64;
- struct cvmx_l2c_iorx_pfc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_iorx_pfc_s cn61xx;
- struct cvmx_l2c_iorx_pfc_s cn63xx;
- struct cvmx_l2c_iorx_pfc_s cn63xxp1;
- struct cvmx_l2c_iorx_pfc_s cn66xx;
- struct cvmx_l2c_iorx_pfc_s cn68xx;
- struct cvmx_l2c_iorx_pfc_s cn68xxp1;
- struct cvmx_l2c_iorx_pfc_s cnf71xx;
-};
-
-union cvmx_l2c_lckbase {
- uint64_t u64;
- struct cvmx_l2c_lckbase_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_31_63:33;
- uint64_t lck_base:27;
- uint64_t reserved_1_3:3;
- uint64_t lck_ena:1;
-#else
- uint64_t lck_ena:1;
- uint64_t reserved_1_3:3;
- uint64_t lck_base:27;
- uint64_t reserved_31_63:33;
-#endif
- } s;
- struct cvmx_l2c_lckbase_s cn30xx;
- struct cvmx_l2c_lckbase_s cn31xx;
- struct cvmx_l2c_lckbase_s cn38xx;
- struct cvmx_l2c_lckbase_s cn38xxp2;
- struct cvmx_l2c_lckbase_s cn50xx;
- struct cvmx_l2c_lckbase_s cn52xx;
- struct cvmx_l2c_lckbase_s cn52xxp1;
- struct cvmx_l2c_lckbase_s cn56xx;
- struct cvmx_l2c_lckbase_s cn56xxp1;
- struct cvmx_l2c_lckbase_s cn58xx;
- struct cvmx_l2c_lckbase_s cn58xxp1;
-};
-
-union cvmx_l2c_lckoff {
- uint64_t u64;
- struct cvmx_l2c_lckoff_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t lck_offset:10;
-#else
- uint64_t lck_offset:10;
- uint64_t reserved_10_63:54;
-#endif
- } s;
- struct cvmx_l2c_lckoff_s cn30xx;
- struct cvmx_l2c_lckoff_s cn31xx;
- struct cvmx_l2c_lckoff_s cn38xx;
- struct cvmx_l2c_lckoff_s cn38xxp2;
- struct cvmx_l2c_lckoff_s cn50xx;
- struct cvmx_l2c_lckoff_s cn52xx;
- struct cvmx_l2c_lckoff_s cn52xxp1;
- struct cvmx_l2c_lckoff_s cn56xx;
- struct cvmx_l2c_lckoff_s cn56xxp1;
- struct cvmx_l2c_lckoff_s cn58xx;
- struct cvmx_l2c_lckoff_s cn58xxp1;
-};
-
-union cvmx_l2c_lfb0 {
- uint64_t u64;
- struct cvmx_l2c_lfb0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t stcpnd:1;
- uint64_t stpnd:1;
- uint64_t stinv:1;
- uint64_t stcfl:1;
- uint64_t vam:1;
- uint64_t inxt:4;
- uint64_t itl:1;
- uint64_t ihd:1;
- uint64_t set:3;
- uint64_t vabnum:4;
- uint64_t sid:9;
- uint64_t cmd:4;
- uint64_t vld:1;
-#else
- uint64_t vld:1;
- uint64_t cmd:4;
- uint64_t sid:9;
- uint64_t vabnum:4;
- uint64_t set:3;
- uint64_t ihd:1;
- uint64_t itl:1;
- uint64_t inxt:4;
- uint64_t vam:1;
- uint64_t stcfl:1;
- uint64_t stinv:1;
- uint64_t stpnd:1;
- uint64_t stcpnd:1;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_l2c_lfb0_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t stcpnd:1;
- uint64_t stpnd:1;
- uint64_t stinv:1;
- uint64_t stcfl:1;
- uint64_t vam:1;
- uint64_t reserved_25_26:2;
- uint64_t inxt:2;
- uint64_t itl:1;
- uint64_t ihd:1;
- uint64_t reserved_20_20:1;
- uint64_t set:2;
- uint64_t reserved_16_17:2;
- uint64_t vabnum:2;
- uint64_t sid:9;
- uint64_t cmd:4;
- uint64_t vld:1;
-#else
- uint64_t vld:1;
- uint64_t cmd:4;
- uint64_t sid:9;
- uint64_t vabnum:2;
- uint64_t reserved_16_17:2;
- uint64_t set:2;
- uint64_t reserved_20_20:1;
- uint64_t ihd:1;
- uint64_t itl:1;
- uint64_t inxt:2;
- uint64_t reserved_25_26:2;
- uint64_t vam:1;
- uint64_t stcfl:1;
- uint64_t stinv:1;
- uint64_t stpnd:1;
- uint64_t stcpnd:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn30xx;
- struct cvmx_l2c_lfb0_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t stcpnd:1;
- uint64_t stpnd:1;
- uint64_t stinv:1;
- uint64_t stcfl:1;
- uint64_t vam:1;
- uint64_t reserved_26_26:1;
- uint64_t inxt:3;
- uint64_t itl:1;
- uint64_t ihd:1;
- uint64_t reserved_20_20:1;
- uint64_t set:2;
- uint64_t reserved_17_17:1;
- uint64_t vabnum:3;
- uint64_t sid:9;
- uint64_t cmd:4;
- uint64_t vld:1;
-#else
- uint64_t vld:1;
- uint64_t cmd:4;
- uint64_t sid:9;
- uint64_t vabnum:3;
- uint64_t reserved_17_17:1;
- uint64_t set:2;
- uint64_t reserved_20_20:1;
- uint64_t ihd:1;
- uint64_t itl:1;
- uint64_t inxt:3;
- uint64_t reserved_26_26:1;
- uint64_t vam:1;
- uint64_t stcfl:1;
- uint64_t stinv:1;
- uint64_t stpnd:1;
- uint64_t stcpnd:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn31xx;
- struct cvmx_l2c_lfb0_s cn38xx;
- struct cvmx_l2c_lfb0_s cn38xxp2;
- struct cvmx_l2c_lfb0_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t stcpnd:1;
- uint64_t stpnd:1;
- uint64_t stinv:1;
- uint64_t stcfl:1;
- uint64_t vam:1;
- uint64_t reserved_26_26:1;
- uint64_t inxt:3;
- uint64_t itl:1;
- uint64_t ihd:1;
- uint64_t set:3;
- uint64_t reserved_17_17:1;
- uint64_t vabnum:3;
- uint64_t sid:9;
- uint64_t cmd:4;
- uint64_t vld:1;
-#else
- uint64_t vld:1;
- uint64_t cmd:4;
- uint64_t sid:9;
- uint64_t vabnum:3;
- uint64_t reserved_17_17:1;
- uint64_t set:3;
- uint64_t ihd:1;
- uint64_t itl:1;
- uint64_t inxt:3;
- uint64_t reserved_26_26:1;
- uint64_t vam:1;
- uint64_t stcfl:1;
- uint64_t stinv:1;
- uint64_t stpnd:1;
- uint64_t stcpnd:1;
- uint64_t reserved_32_63:32;
-#endif
- } cn50xx;
- struct cvmx_l2c_lfb0_cn50xx cn52xx;
- struct cvmx_l2c_lfb0_cn50xx cn52xxp1;
- struct cvmx_l2c_lfb0_s cn56xx;
- struct cvmx_l2c_lfb0_s cn56xxp1;
- struct cvmx_l2c_lfb0_s cn58xx;
- struct cvmx_l2c_lfb0_s cn58xxp1;
-};
-
-union cvmx_l2c_lfb1 {
- uint64_t u64;
- struct cvmx_l2c_lfb1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t dsgoing:1;
- uint64_t bid:2;
- uint64_t wtrsp:1;
- uint64_t wtdw:1;
- uint64_t wtdq:1;
- uint64_t wtwhp:1;
- uint64_t wtwhf:1;
- uint64_t wtwrm:1;
- uint64_t wtstm:1;
- uint64_t wtrda:1;
- uint64_t wtstdt:1;
- uint64_t wtstrsp:1;
- uint64_t wtstrsc:1;
- uint64_t wtvtm:1;
- uint64_t wtmfl:1;
- uint64_t prbrty:1;
- uint64_t wtprb:1;
- uint64_t vld:1;
-#else
- uint64_t vld:1;
- uint64_t wtprb:1;
- uint64_t prbrty:1;
- uint64_t wtmfl:1;
- uint64_t wtvtm:1;
- uint64_t wtstrsc:1;
- uint64_t wtstrsp:1;
- uint64_t wtstdt:1;
- uint64_t wtrda:1;
- uint64_t wtstm:1;
- uint64_t wtwrm:1;
- uint64_t wtwhf:1;
- uint64_t wtwhp:1;
- uint64_t wtdq:1;
- uint64_t wtdw:1;
- uint64_t wtrsp:1;
- uint64_t bid:2;
- uint64_t dsgoing:1;
- uint64_t reserved_19_63:45;
-#endif
- } s;
- struct cvmx_l2c_lfb1_s cn30xx;
- struct cvmx_l2c_lfb1_s cn31xx;
- struct cvmx_l2c_lfb1_s cn38xx;
- struct cvmx_l2c_lfb1_s cn38xxp2;
- struct cvmx_l2c_lfb1_s cn50xx;
- struct cvmx_l2c_lfb1_s cn52xx;
- struct cvmx_l2c_lfb1_s cn52xxp1;
- struct cvmx_l2c_lfb1_s cn56xx;
- struct cvmx_l2c_lfb1_s cn56xxp1;
- struct cvmx_l2c_lfb1_s cn58xx;
- struct cvmx_l2c_lfb1_s cn58xxp1;
-};
-
-union cvmx_l2c_lfb2 {
- uint64_t u64;
- struct cvmx_l2c_lfb2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_0_63:64;
-#else
- uint64_t reserved_0_63:64;
-#endif
- } s;
- struct cvmx_l2c_lfb2_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_27_63:37;
- uint64_t lfb_tag:19;
- uint64_t lfb_idx:8;
-#else
- uint64_t lfb_idx:8;
- uint64_t lfb_tag:19;
- uint64_t reserved_27_63:37;
-#endif
- } cn30xx;
- struct cvmx_l2c_lfb2_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_27_63:37;
- uint64_t lfb_tag:17;
- uint64_t lfb_idx:10;
-#else
- uint64_t lfb_idx:10;
- uint64_t lfb_tag:17;
- uint64_t reserved_27_63:37;
-#endif
- } cn31xx;
- struct cvmx_l2c_lfb2_cn31xx cn38xx;
- struct cvmx_l2c_lfb2_cn31xx cn38xxp2;
- struct cvmx_l2c_lfb2_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_27_63:37;
- uint64_t lfb_tag:20;
- uint64_t lfb_idx:7;
-#else
- uint64_t lfb_idx:7;
- uint64_t lfb_tag:20;
- uint64_t reserved_27_63:37;
-#endif
- } cn50xx;
- struct cvmx_l2c_lfb2_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_27_63:37;
- uint64_t lfb_tag:18;
- uint64_t lfb_idx:9;
-#else
- uint64_t lfb_idx:9;
- uint64_t lfb_tag:18;
- uint64_t reserved_27_63:37;
-#endif
- } cn52xx;
- struct cvmx_l2c_lfb2_cn52xx cn52xxp1;
- struct cvmx_l2c_lfb2_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_27_63:37;
- uint64_t lfb_tag:16;
- uint64_t lfb_idx:11;
-#else
- uint64_t lfb_idx:11;
- uint64_t lfb_tag:16;
- uint64_t reserved_27_63:37;
-#endif
- } cn56xx;
- struct cvmx_l2c_lfb2_cn56xx cn56xxp1;
- struct cvmx_l2c_lfb2_cn56xx cn58xx;
- struct cvmx_l2c_lfb2_cn56xx cn58xxp1;
-};
-
-union cvmx_l2c_lfb3 {
- uint64_t u64;
- struct cvmx_l2c_lfb3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t stpartdis:1;
- uint64_t lfb_hwm:4;
-#else
- uint64_t lfb_hwm:4;
- uint64_t stpartdis:1;
- uint64_t reserved_5_63:59;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_15_63:49,
+ __BITFIELD_FIELD(uint64_t lfb_enum:4,
+ __BITFIELD_FIELD(uint64_t lfb_dmp:1,
+ __BITFIELD_FIELD(uint64_t ppnum:4,
+ __BITFIELD_FIELD(uint64_t set:3,
+ __BITFIELD_FIELD(uint64_t finv:1,
+ __BITFIELD_FIELD(uint64_t l2d:1,
+ __BITFIELD_FIELD(uint64_t l2t:1,
+ ;))))))))
} s;
- struct cvmx_l2c_lfb3_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t stpartdis:1;
- uint64_t reserved_2_3:2;
- uint64_t lfb_hwm:2;
-#else
- uint64_t lfb_hwm:2;
- uint64_t reserved_2_3:2;
- uint64_t stpartdis:1;
- uint64_t reserved_5_63:59;
-#endif
- } cn30xx;
- struct cvmx_l2c_lfb3_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t stpartdis:1;
- uint64_t reserved_3_3:1;
- uint64_t lfb_hwm:3;
-#else
- uint64_t lfb_hwm:3;
- uint64_t reserved_3_3:1;
- uint64_t stpartdis:1;
- uint64_t reserved_5_63:59;
-#endif
- } cn31xx;
- struct cvmx_l2c_lfb3_s cn38xx;
- struct cvmx_l2c_lfb3_s cn38xxp2;
- struct cvmx_l2c_lfb3_cn31xx cn50xx;
- struct cvmx_l2c_lfb3_cn31xx cn52xx;
- struct cvmx_l2c_lfb3_cn31xx cn52xxp1;
- struct cvmx_l2c_lfb3_s cn56xx;
- struct cvmx_l2c_lfb3_s cn56xxp1;
- struct cvmx_l2c_lfb3_s cn58xx;
- struct cvmx_l2c_lfb3_s cn58xxp1;
-};
-
-union cvmx_l2c_oob {
- uint64_t u64;
- struct cvmx_l2c_oob_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t dwbena:1;
- uint64_t stena:1;
-#else
- uint64_t stena:1;
- uint64_t dwbena:1;
- uint64_t reserved_2_63:62;
-#endif
- } s;
- struct cvmx_l2c_oob_s cn52xx;
- struct cvmx_l2c_oob_s cn52xxp1;
- struct cvmx_l2c_oob_s cn56xx;
- struct cvmx_l2c_oob_s cn56xxp1;
-};
-
-union cvmx_l2c_oob1 {
- uint64_t u64;
- struct cvmx_l2c_oob1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t fadr:27;
- uint64_t fsrc:1;
- uint64_t reserved_34_35:2;
- uint64_t sadr:14;
- uint64_t reserved_14_19:6;
- uint64_t size:14;
-#else
- uint64_t size:14;
- uint64_t reserved_14_19:6;
- uint64_t sadr:14;
- uint64_t reserved_34_35:2;
- uint64_t fsrc:1;
- uint64_t fadr:27;
-#endif
- } s;
- struct cvmx_l2c_oob1_s cn52xx;
- struct cvmx_l2c_oob1_s cn52xxp1;
- struct cvmx_l2c_oob1_s cn56xx;
- struct cvmx_l2c_oob1_s cn56xxp1;
-};
-
-union cvmx_l2c_oob2 {
- uint64_t u64;
- struct cvmx_l2c_oob2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t fadr:27;
- uint64_t fsrc:1;
- uint64_t reserved_34_35:2;
- uint64_t sadr:14;
- uint64_t reserved_14_19:6;
- uint64_t size:14;
-#else
- uint64_t size:14;
- uint64_t reserved_14_19:6;
- uint64_t sadr:14;
- uint64_t reserved_34_35:2;
- uint64_t fsrc:1;
- uint64_t fadr:27;
-#endif
- } s;
- struct cvmx_l2c_oob2_s cn52xx;
- struct cvmx_l2c_oob2_s cn52xxp1;
- struct cvmx_l2c_oob2_s cn56xx;
- struct cvmx_l2c_oob2_s cn56xxp1;
-};
-
-union cvmx_l2c_oob3 {
- uint64_t u64;
- struct cvmx_l2c_oob3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t fadr:27;
- uint64_t fsrc:1;
- uint64_t reserved_34_35:2;
- uint64_t sadr:14;
- uint64_t reserved_14_19:6;
- uint64_t size:14;
-#else
- uint64_t size:14;
- uint64_t reserved_14_19:6;
- uint64_t sadr:14;
- uint64_t reserved_34_35:2;
- uint64_t fsrc:1;
- uint64_t fadr:27;
-#endif
- } s;
- struct cvmx_l2c_oob3_s cn52xx;
- struct cvmx_l2c_oob3_s cn52xxp1;
- struct cvmx_l2c_oob3_s cn56xx;
- struct cvmx_l2c_oob3_s cn56xxp1;
-};
-
-union cvmx_l2c_pfcx {
- uint64_t u64;
- struct cvmx_l2c_pfcx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_36_63:28;
- uint64_t pfcnt0:36;
-#else
- uint64_t pfcnt0:36;
- uint64_t reserved_36_63:28;
-#endif
- } s;
- struct cvmx_l2c_pfcx_s cn30xx;
- struct cvmx_l2c_pfcx_s cn31xx;
- struct cvmx_l2c_pfcx_s cn38xx;
- struct cvmx_l2c_pfcx_s cn38xxp2;
- struct cvmx_l2c_pfcx_s cn50xx;
- struct cvmx_l2c_pfcx_s cn52xx;
- struct cvmx_l2c_pfcx_s cn52xxp1;
- struct cvmx_l2c_pfcx_s cn56xx;
- struct cvmx_l2c_pfcx_s cn56xxp1;
- struct cvmx_l2c_pfcx_s cn58xx;
- struct cvmx_l2c_pfcx_s cn58xxp1;
};
union cvmx_l2c_pfctl {
uint64_t u64;
struct cvmx_l2c_pfctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_36_63:28;
- uint64_t cnt3rdclr:1;
- uint64_t cnt2rdclr:1;
- uint64_t cnt1rdclr:1;
- uint64_t cnt0rdclr:1;
- uint64_t cnt3ena:1;
- uint64_t cnt3clr:1;
- uint64_t cnt3sel:6;
- uint64_t cnt2ena:1;
- uint64_t cnt2clr:1;
- uint64_t cnt2sel:6;
- uint64_t cnt1ena:1;
- uint64_t cnt1clr:1;
- uint64_t cnt1sel:6;
- uint64_t cnt0ena:1;
- uint64_t cnt0clr:1;
- uint64_t cnt0sel:6;
-#else
- uint64_t cnt0sel:6;
- uint64_t cnt0clr:1;
- uint64_t cnt0ena:1;
- uint64_t cnt1sel:6;
- uint64_t cnt1clr:1;
- uint64_t cnt1ena:1;
- uint64_t cnt2sel:6;
- uint64_t cnt2clr:1;
- uint64_t cnt2ena:1;
- uint64_t cnt3sel:6;
- uint64_t cnt3clr:1;
- uint64_t cnt3ena:1;
- uint64_t cnt0rdclr:1;
- uint64_t cnt1rdclr:1;
- uint64_t cnt2rdclr:1;
- uint64_t cnt3rdclr:1;
- uint64_t reserved_36_63:28;
-#endif
- } s;
- struct cvmx_l2c_pfctl_s cn30xx;
- struct cvmx_l2c_pfctl_s cn31xx;
- struct cvmx_l2c_pfctl_s cn38xx;
- struct cvmx_l2c_pfctl_s cn38xxp2;
- struct cvmx_l2c_pfctl_s cn50xx;
- struct cvmx_l2c_pfctl_s cn52xx;
- struct cvmx_l2c_pfctl_s cn52xxp1;
- struct cvmx_l2c_pfctl_s cn56xx;
- struct cvmx_l2c_pfctl_s cn56xxp1;
- struct cvmx_l2c_pfctl_s cn58xx;
- struct cvmx_l2c_pfctl_s cn58xxp1;
-};
-
-union cvmx_l2c_ppgrp {
- uint64_t u64;
- struct cvmx_l2c_ppgrp_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_24_63:40;
- uint64_t pp11grp:2;
- uint64_t pp10grp:2;
- uint64_t pp9grp:2;
- uint64_t pp8grp:2;
- uint64_t pp7grp:2;
- uint64_t pp6grp:2;
- uint64_t pp5grp:2;
- uint64_t pp4grp:2;
- uint64_t pp3grp:2;
- uint64_t pp2grp:2;
- uint64_t pp1grp:2;
- uint64_t pp0grp:2;
-#else
- uint64_t pp0grp:2;
- uint64_t pp1grp:2;
- uint64_t pp2grp:2;
- uint64_t pp3grp:2;
- uint64_t pp4grp:2;
- uint64_t pp5grp:2;
- uint64_t pp6grp:2;
- uint64_t pp7grp:2;
- uint64_t pp8grp:2;
- uint64_t pp9grp:2;
- uint64_t pp10grp:2;
- uint64_t pp11grp:2;
- uint64_t reserved_24_63:40;
-#endif
- } s;
- struct cvmx_l2c_ppgrp_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t pp3grp:2;
- uint64_t pp2grp:2;
- uint64_t pp1grp:2;
- uint64_t pp0grp:2;
-#else
- uint64_t pp0grp:2;
- uint64_t pp1grp:2;
- uint64_t pp2grp:2;
- uint64_t pp3grp:2;
- uint64_t reserved_8_63:56;
-#endif
- } cn52xx;
- struct cvmx_l2c_ppgrp_cn52xx cn52xxp1;
- struct cvmx_l2c_ppgrp_s cn56xx;
- struct cvmx_l2c_ppgrp_s cn56xxp1;
-};
-
-union cvmx_l2c_qos_iobx {
- uint64_t u64;
- struct cvmx_l2c_qos_iobx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_7_63:57;
- uint64_t dwblvl:3;
- uint64_t reserved_3_3:1;
- uint64_t lvl:3;
-#else
- uint64_t lvl:3;
- uint64_t reserved_3_3:1;
- uint64_t dwblvl:3;
- uint64_t reserved_7_63:57;
-#endif
- } s;
- struct cvmx_l2c_qos_iobx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t dwblvl:2;
- uint64_t reserved_2_3:2;
- uint64_t lvl:2;
-#else
- uint64_t lvl:2;
- uint64_t reserved_2_3:2;
- uint64_t dwblvl:2;
- uint64_t reserved_6_63:58;
-#endif
- } cn61xx;
- struct cvmx_l2c_qos_iobx_cn61xx cn63xx;
- struct cvmx_l2c_qos_iobx_cn61xx cn63xxp1;
- struct cvmx_l2c_qos_iobx_cn61xx cn66xx;
- struct cvmx_l2c_qos_iobx_s cn68xx;
- struct cvmx_l2c_qos_iobx_s cn68xxp1;
- struct cvmx_l2c_qos_iobx_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_qos_ppx {
- uint64_t u64;
- struct cvmx_l2c_qos_ppx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_3_63:61;
- uint64_t lvl:3;
-#else
- uint64_t lvl:3;
- uint64_t reserved_3_63:61;
-#endif
- } s;
- struct cvmx_l2c_qos_ppx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t lvl:2;
-#else
- uint64_t lvl:2;
- uint64_t reserved_2_63:62;
-#endif
- } cn61xx;
- struct cvmx_l2c_qos_ppx_cn61xx cn63xx;
- struct cvmx_l2c_qos_ppx_cn61xx cn63xxp1;
- struct cvmx_l2c_qos_ppx_cn61xx cn66xx;
- struct cvmx_l2c_qos_ppx_s cn68xx;
- struct cvmx_l2c_qos_ppx_s cn68xxp1;
- struct cvmx_l2c_qos_ppx_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_qos_wgt {
- uint64_t u64;
- struct cvmx_l2c_qos_wgt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t wgt7:8;
- uint64_t wgt6:8;
- uint64_t wgt5:8;
- uint64_t wgt4:8;
- uint64_t wgt3:8;
- uint64_t wgt2:8;
- uint64_t wgt1:8;
- uint64_t wgt0:8;
-#else
- uint64_t wgt0:8;
- uint64_t wgt1:8;
- uint64_t wgt2:8;
- uint64_t wgt3:8;
- uint64_t wgt4:8;
- uint64_t wgt5:8;
- uint64_t wgt6:8;
- uint64_t wgt7:8;
-#endif
- } s;
- struct cvmx_l2c_qos_wgt_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wgt3:8;
- uint64_t wgt2:8;
- uint64_t wgt1:8;
- uint64_t wgt0:8;
-#else
- uint64_t wgt0:8;
- uint64_t wgt1:8;
- uint64_t wgt2:8;
- uint64_t wgt3:8;
- uint64_t reserved_32_63:32;
-#endif
- } cn61xx;
- struct cvmx_l2c_qos_wgt_cn61xx cn63xx;
- struct cvmx_l2c_qos_wgt_cn61xx cn63xxp1;
- struct cvmx_l2c_qos_wgt_cn61xx cn66xx;
- struct cvmx_l2c_qos_wgt_s cn68xx;
- struct cvmx_l2c_qos_wgt_s cn68xxp1;
- struct cvmx_l2c_qos_wgt_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_rscx_pfc {
- uint64_t u64;
- struct cvmx_l2c_rscx_pfc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_rscx_pfc_s cn61xx;
- struct cvmx_l2c_rscx_pfc_s cn63xx;
- struct cvmx_l2c_rscx_pfc_s cn63xxp1;
- struct cvmx_l2c_rscx_pfc_s cn66xx;
- struct cvmx_l2c_rscx_pfc_s cn68xx;
- struct cvmx_l2c_rscx_pfc_s cn68xxp1;
- struct cvmx_l2c_rscx_pfc_s cnf71xx;
-};
-
-union cvmx_l2c_rsdx_pfc {
- uint64_t u64;
- struct cvmx_l2c_rsdx_pfc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_rsdx_pfc_s cn61xx;
- struct cvmx_l2c_rsdx_pfc_s cn63xx;
- struct cvmx_l2c_rsdx_pfc_s cn63xxp1;
- struct cvmx_l2c_rsdx_pfc_s cn66xx;
- struct cvmx_l2c_rsdx_pfc_s cn68xx;
- struct cvmx_l2c_rsdx_pfc_s cn68xxp1;
- struct cvmx_l2c_rsdx_pfc_s cnf71xx;
-};
-
-union cvmx_l2c_spar0 {
- uint64_t u64;
- struct cvmx_l2c_spar0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t umsk3:8;
- uint64_t umsk2:8;
- uint64_t umsk1:8;
- uint64_t umsk0:8;
-#else
- uint64_t umsk0:8;
- uint64_t umsk1:8;
- uint64_t umsk2:8;
- uint64_t umsk3:8;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_l2c_spar0_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t umsk0:4;
-#else
- uint64_t umsk0:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn30xx;
- struct cvmx_l2c_spar0_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t umsk1:4;
- uint64_t reserved_4_7:4;
- uint64_t umsk0:4;
-#else
- uint64_t umsk0:4;
- uint64_t reserved_4_7:4;
- uint64_t umsk1:4;
- uint64_t reserved_12_63:52;
-#endif
- } cn31xx;
- struct cvmx_l2c_spar0_s cn38xx;
- struct cvmx_l2c_spar0_s cn38xxp2;
- struct cvmx_l2c_spar0_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t umsk1:8;
- uint64_t umsk0:8;
-#else
- uint64_t umsk0:8;
- uint64_t umsk1:8;
- uint64_t reserved_16_63:48;
-#endif
- } cn50xx;
- struct cvmx_l2c_spar0_s cn52xx;
- struct cvmx_l2c_spar0_s cn52xxp1;
- struct cvmx_l2c_spar0_s cn56xx;
- struct cvmx_l2c_spar0_s cn56xxp1;
- struct cvmx_l2c_spar0_s cn58xx;
- struct cvmx_l2c_spar0_s cn58xxp1;
-};
-
-union cvmx_l2c_spar1 {
- uint64_t u64;
- struct cvmx_l2c_spar1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t umsk7:8;
- uint64_t umsk6:8;
- uint64_t umsk5:8;
- uint64_t umsk4:8;
-#else
- uint64_t umsk4:8;
- uint64_t umsk5:8;
- uint64_t umsk6:8;
- uint64_t umsk7:8;
- uint64_t reserved_32_63:32;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_36_63:28,
+ __BITFIELD_FIELD(uint64_t cnt3rdclr:1,
+ __BITFIELD_FIELD(uint64_t cnt2rdclr:1,
+ __BITFIELD_FIELD(uint64_t cnt1rdclr:1,
+ __BITFIELD_FIELD(uint64_t cnt0rdclr:1,
+ __BITFIELD_FIELD(uint64_t cnt3ena:1,
+ __BITFIELD_FIELD(uint64_t cnt3clr:1,
+ __BITFIELD_FIELD(uint64_t cnt3sel:6,
+ __BITFIELD_FIELD(uint64_t cnt2ena:1,
+ __BITFIELD_FIELD(uint64_t cnt2clr:1,
+ __BITFIELD_FIELD(uint64_t cnt2sel:6,
+ __BITFIELD_FIELD(uint64_t cnt1ena:1,
+ __BITFIELD_FIELD(uint64_t cnt1clr:1,
+ __BITFIELD_FIELD(uint64_t cnt1sel:6,
+ __BITFIELD_FIELD(uint64_t cnt0ena:1,
+ __BITFIELD_FIELD(uint64_t cnt0clr:1,
+ __BITFIELD_FIELD(uint64_t cnt0sel:6,
+ ;)))))))))))))))))
} s;
- struct cvmx_l2c_spar1_s cn38xx;
- struct cvmx_l2c_spar1_s cn38xxp2;
- struct cvmx_l2c_spar1_s cn56xx;
- struct cvmx_l2c_spar1_s cn56xxp1;
- struct cvmx_l2c_spar1_s cn58xx;
- struct cvmx_l2c_spar1_s cn58xxp1;
-};
-
-union cvmx_l2c_spar2 {
- uint64_t u64;
- struct cvmx_l2c_spar2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t umsk11:8;
- uint64_t umsk10:8;
- uint64_t umsk9:8;
- uint64_t umsk8:8;
-#else
- uint64_t umsk8:8;
- uint64_t umsk9:8;
- uint64_t umsk10:8;
- uint64_t umsk11:8;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_l2c_spar2_s cn38xx;
- struct cvmx_l2c_spar2_s cn38xxp2;
- struct cvmx_l2c_spar2_s cn56xx;
- struct cvmx_l2c_spar2_s cn56xxp1;
- struct cvmx_l2c_spar2_s cn58xx;
- struct cvmx_l2c_spar2_s cn58xxp1;
-};
-
-union cvmx_l2c_spar3 {
- uint64_t u64;
- struct cvmx_l2c_spar3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t umsk15:8;
- uint64_t umsk14:8;
- uint64_t umsk13:8;
- uint64_t umsk12:8;
-#else
- uint64_t umsk12:8;
- uint64_t umsk13:8;
- uint64_t umsk14:8;
- uint64_t umsk15:8;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_l2c_spar3_s cn38xx;
- struct cvmx_l2c_spar3_s cn38xxp2;
- struct cvmx_l2c_spar3_s cn58xx;
- struct cvmx_l2c_spar3_s cn58xxp1;
-};
-
-union cvmx_l2c_spar4 {
- uint64_t u64;
- struct cvmx_l2c_spar4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t umskiob:8;
-#else
- uint64_t umskiob:8;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_l2c_spar4_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t umskiob:4;
-#else
- uint64_t umskiob:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn30xx;
- struct cvmx_l2c_spar4_cn30xx cn31xx;
- struct cvmx_l2c_spar4_s cn38xx;
- struct cvmx_l2c_spar4_s cn38xxp2;
- struct cvmx_l2c_spar4_s cn50xx;
- struct cvmx_l2c_spar4_s cn52xx;
- struct cvmx_l2c_spar4_s cn52xxp1;
- struct cvmx_l2c_spar4_s cn56xx;
- struct cvmx_l2c_spar4_s cn56xxp1;
- struct cvmx_l2c_spar4_s cn58xx;
- struct cvmx_l2c_spar4_s cn58xxp1;
-};
-
-union cvmx_l2c_tadx_ecc0 {
- uint64_t u64;
- struct cvmx_l2c_tadx_ecc0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_58_63:6;
- uint64_t ow3ecc:10;
- uint64_t reserved_42_47:6;
- uint64_t ow2ecc:10;
- uint64_t reserved_26_31:6;
- uint64_t ow1ecc:10;
- uint64_t reserved_10_15:6;
- uint64_t ow0ecc:10;
-#else
- uint64_t ow0ecc:10;
- uint64_t reserved_10_15:6;
- uint64_t ow1ecc:10;
- uint64_t reserved_26_31:6;
- uint64_t ow2ecc:10;
- uint64_t reserved_42_47:6;
- uint64_t ow3ecc:10;
- uint64_t reserved_58_63:6;
-#endif
- } s;
- struct cvmx_l2c_tadx_ecc0_s cn61xx;
- struct cvmx_l2c_tadx_ecc0_s cn63xx;
- struct cvmx_l2c_tadx_ecc0_s cn63xxp1;
- struct cvmx_l2c_tadx_ecc0_s cn66xx;
- struct cvmx_l2c_tadx_ecc0_s cn68xx;
- struct cvmx_l2c_tadx_ecc0_s cn68xxp1;
- struct cvmx_l2c_tadx_ecc0_s cnf71xx;
-};
-
-union cvmx_l2c_tadx_ecc1 {
- uint64_t u64;
- struct cvmx_l2c_tadx_ecc1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_58_63:6;
- uint64_t ow7ecc:10;
- uint64_t reserved_42_47:6;
- uint64_t ow6ecc:10;
- uint64_t reserved_26_31:6;
- uint64_t ow5ecc:10;
- uint64_t reserved_10_15:6;
- uint64_t ow4ecc:10;
-#else
- uint64_t ow4ecc:10;
- uint64_t reserved_10_15:6;
- uint64_t ow5ecc:10;
- uint64_t reserved_26_31:6;
- uint64_t ow6ecc:10;
- uint64_t reserved_42_47:6;
- uint64_t ow7ecc:10;
- uint64_t reserved_58_63:6;
-#endif
- } s;
- struct cvmx_l2c_tadx_ecc1_s cn61xx;
- struct cvmx_l2c_tadx_ecc1_s cn63xx;
- struct cvmx_l2c_tadx_ecc1_s cn63xxp1;
- struct cvmx_l2c_tadx_ecc1_s cn66xx;
- struct cvmx_l2c_tadx_ecc1_s cn68xx;
- struct cvmx_l2c_tadx_ecc1_s cn68xxp1;
- struct cvmx_l2c_tadx_ecc1_s cnf71xx;
-};
-
-union cvmx_l2c_tadx_ien {
- uint64_t u64;
- struct cvmx_l2c_tadx_ien_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t wrdislmc:1;
- uint64_t rddislmc:1;
- uint64_t noway:1;
- uint64_t vbfdbe:1;
- uint64_t vbfsbe:1;
- uint64_t tagdbe:1;
- uint64_t tagsbe:1;
- uint64_t l2ddbe:1;
- uint64_t l2dsbe:1;
-#else
- uint64_t l2dsbe:1;
- uint64_t l2ddbe:1;
- uint64_t tagsbe:1;
- uint64_t tagdbe:1;
- uint64_t vbfsbe:1;
- uint64_t vbfdbe:1;
- uint64_t noway:1;
- uint64_t rddislmc:1;
- uint64_t wrdislmc:1;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_l2c_tadx_ien_s cn61xx;
- struct cvmx_l2c_tadx_ien_s cn63xx;
- struct cvmx_l2c_tadx_ien_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_7_63:57;
- uint64_t noway:1;
- uint64_t vbfdbe:1;
- uint64_t vbfsbe:1;
- uint64_t tagdbe:1;
- uint64_t tagsbe:1;
- uint64_t l2ddbe:1;
- uint64_t l2dsbe:1;
-#else
- uint64_t l2dsbe:1;
- uint64_t l2ddbe:1;
- uint64_t tagsbe:1;
- uint64_t tagdbe:1;
- uint64_t vbfsbe:1;
- uint64_t vbfdbe:1;
- uint64_t noway:1;
- uint64_t reserved_7_63:57;
-#endif
- } cn63xxp1;
- struct cvmx_l2c_tadx_ien_s cn66xx;
- struct cvmx_l2c_tadx_ien_s cn68xx;
- struct cvmx_l2c_tadx_ien_s cn68xxp1;
- struct cvmx_l2c_tadx_ien_s cnf71xx;
-};
-
-union cvmx_l2c_tadx_int {
- uint64_t u64;
- struct cvmx_l2c_tadx_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t wrdislmc:1;
- uint64_t rddislmc:1;
- uint64_t noway:1;
- uint64_t vbfdbe:1;
- uint64_t vbfsbe:1;
- uint64_t tagdbe:1;
- uint64_t tagsbe:1;
- uint64_t l2ddbe:1;
- uint64_t l2dsbe:1;
-#else
- uint64_t l2dsbe:1;
- uint64_t l2ddbe:1;
- uint64_t tagsbe:1;
- uint64_t tagdbe:1;
- uint64_t vbfsbe:1;
- uint64_t vbfdbe:1;
- uint64_t noway:1;
- uint64_t rddislmc:1;
- uint64_t wrdislmc:1;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_l2c_tadx_int_s cn61xx;
- struct cvmx_l2c_tadx_int_s cn63xx;
- struct cvmx_l2c_tadx_int_s cn66xx;
- struct cvmx_l2c_tadx_int_s cn68xx;
- struct cvmx_l2c_tadx_int_s cn68xxp1;
- struct cvmx_l2c_tadx_int_s cnf71xx;
-};
-
-union cvmx_l2c_tadx_pfc0 {
- uint64_t u64;
- struct cvmx_l2c_tadx_pfc0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_tadx_pfc0_s cn61xx;
- struct cvmx_l2c_tadx_pfc0_s cn63xx;
- struct cvmx_l2c_tadx_pfc0_s cn63xxp1;
- struct cvmx_l2c_tadx_pfc0_s cn66xx;
- struct cvmx_l2c_tadx_pfc0_s cn68xx;
- struct cvmx_l2c_tadx_pfc0_s cn68xxp1;
- struct cvmx_l2c_tadx_pfc0_s cnf71xx;
-};
-
-union cvmx_l2c_tadx_pfc1 {
- uint64_t u64;
- struct cvmx_l2c_tadx_pfc1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_tadx_pfc1_s cn61xx;
- struct cvmx_l2c_tadx_pfc1_s cn63xx;
- struct cvmx_l2c_tadx_pfc1_s cn63xxp1;
- struct cvmx_l2c_tadx_pfc1_s cn66xx;
- struct cvmx_l2c_tadx_pfc1_s cn68xx;
- struct cvmx_l2c_tadx_pfc1_s cn68xxp1;
- struct cvmx_l2c_tadx_pfc1_s cnf71xx;
-};
-
-union cvmx_l2c_tadx_pfc2 {
- uint64_t u64;
- struct cvmx_l2c_tadx_pfc2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_tadx_pfc2_s cn61xx;
- struct cvmx_l2c_tadx_pfc2_s cn63xx;
- struct cvmx_l2c_tadx_pfc2_s cn63xxp1;
- struct cvmx_l2c_tadx_pfc2_s cn66xx;
- struct cvmx_l2c_tadx_pfc2_s cn68xx;
- struct cvmx_l2c_tadx_pfc2_s cn68xxp1;
- struct cvmx_l2c_tadx_pfc2_s cnf71xx;
-};
-
-union cvmx_l2c_tadx_pfc3 {
- uint64_t u64;
- struct cvmx_l2c_tadx_pfc3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_tadx_pfc3_s cn61xx;
- struct cvmx_l2c_tadx_pfc3_s cn63xx;
- struct cvmx_l2c_tadx_pfc3_s cn63xxp1;
- struct cvmx_l2c_tadx_pfc3_s cn66xx;
- struct cvmx_l2c_tadx_pfc3_s cn68xx;
- struct cvmx_l2c_tadx_pfc3_s cn68xxp1;
- struct cvmx_l2c_tadx_pfc3_s cnf71xx;
};
union cvmx_l2c_tadx_prf {
uint64_t u64;
struct cvmx_l2c_tadx_prf_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t cnt3sel:8;
- uint64_t cnt2sel:8;
- uint64_t cnt1sel:8;
- uint64_t cnt0sel:8;
-#else
- uint64_t cnt0sel:8;
- uint64_t cnt1sel:8;
- uint64_t cnt2sel:8;
- uint64_t cnt3sel:8;
- uint64_t reserved_32_63:32;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_32_63:32,
+ __BITFIELD_FIELD(uint64_t cnt3sel:8,
+ __BITFIELD_FIELD(uint64_t cnt2sel:8,
+ __BITFIELD_FIELD(uint64_t cnt1sel:8,
+ __BITFIELD_FIELD(uint64_t cnt0sel:8,
+ ;)))))
} s;
- struct cvmx_l2c_tadx_prf_s cn61xx;
- struct cvmx_l2c_tadx_prf_s cn63xx;
- struct cvmx_l2c_tadx_prf_s cn63xxp1;
- struct cvmx_l2c_tadx_prf_s cn66xx;
- struct cvmx_l2c_tadx_prf_s cn68xx;
- struct cvmx_l2c_tadx_prf_s cn68xxp1;
- struct cvmx_l2c_tadx_prf_s cnf71xx;
};
union cvmx_l2c_tadx_tag {
uint64_t u64;
struct cvmx_l2c_tadx_tag_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_46_63:18;
- uint64_t ecc:6;
- uint64_t reserved_36_39:4;
- uint64_t tag:19;
- uint64_t reserved_4_16:13;
- uint64_t use:1;
- uint64_t valid:1;
- uint64_t dirty:1;
- uint64_t lock:1;
-#else
- uint64_t lock:1;
- uint64_t dirty:1;
- uint64_t valid:1;
- uint64_t use:1;
- uint64_t reserved_4_16:13;
- uint64_t tag:19;
- uint64_t reserved_36_39:4;
- uint64_t ecc:6;
- uint64_t reserved_46_63:18;
-#endif
- } s;
- struct cvmx_l2c_tadx_tag_s cn61xx;
- struct cvmx_l2c_tadx_tag_s cn63xx;
- struct cvmx_l2c_tadx_tag_s cn63xxp1;
- struct cvmx_l2c_tadx_tag_s cn66xx;
- struct cvmx_l2c_tadx_tag_s cn68xx;
- struct cvmx_l2c_tadx_tag_s cn68xxp1;
- struct cvmx_l2c_tadx_tag_s cnf71xx;
-};
-
-union cvmx_l2c_ver_id {
- uint64_t u64;
- struct cvmx_l2c_ver_id_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t mask:64;
-#else
- uint64_t mask:64;
-#endif
- } s;
- struct cvmx_l2c_ver_id_s cn61xx;
- struct cvmx_l2c_ver_id_s cn63xx;
- struct cvmx_l2c_ver_id_s cn63xxp1;
- struct cvmx_l2c_ver_id_s cn66xx;
- struct cvmx_l2c_ver_id_s cn68xx;
- struct cvmx_l2c_ver_id_s cn68xxp1;
- struct cvmx_l2c_ver_id_s cnf71xx;
-};
-
-union cvmx_l2c_ver_iob {
- uint64_t u64;
- struct cvmx_l2c_ver_iob_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t mask:2;
-#else
- uint64_t mask:2;
- uint64_t reserved_2_63:62;
-#endif
- } s;
- struct cvmx_l2c_ver_iob_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t mask:1;
-#else
- uint64_t mask:1;
- uint64_t reserved_1_63:63;
-#endif
- } cn61xx;
- struct cvmx_l2c_ver_iob_cn61xx cn63xx;
- struct cvmx_l2c_ver_iob_cn61xx cn63xxp1;
- struct cvmx_l2c_ver_iob_cn61xx cn66xx;
- struct cvmx_l2c_ver_iob_s cn68xx;
- struct cvmx_l2c_ver_iob_s cn68xxp1;
- struct cvmx_l2c_ver_iob_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_ver_msc {
- uint64_t u64;
- struct cvmx_l2c_ver_msc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t invl2:1;
- uint64_t dwb:1;
-#else
- uint64_t dwb:1;
- uint64_t invl2:1;
- uint64_t reserved_2_63:62;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_46_63:18,
+ __BITFIELD_FIELD(uint64_t ecc:6,
+ __BITFIELD_FIELD(uint64_t reserved_36_39:4,
+ __BITFIELD_FIELD(uint64_t tag:19,
+ __BITFIELD_FIELD(uint64_t reserved_4_16:13,
+ __BITFIELD_FIELD(uint64_t use:1,
+ __BITFIELD_FIELD(uint64_t valid:1,
+ __BITFIELD_FIELD(uint64_t dirty:1,
+ __BITFIELD_FIELD(uint64_t lock:1,
+ ;)))))))))
} s;
- struct cvmx_l2c_ver_msc_s cn61xx;
- struct cvmx_l2c_ver_msc_s cn63xx;
- struct cvmx_l2c_ver_msc_s cn66xx;
- struct cvmx_l2c_ver_msc_s cn68xx;
- struct cvmx_l2c_ver_msc_s cn68xxp1;
- struct cvmx_l2c_ver_msc_s cnf71xx;
};
-union cvmx_l2c_ver_pp {
- uint64_t u64;
- struct cvmx_l2c_ver_pp_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t mask:32;
-#else
- uint64_t mask:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_l2c_ver_pp_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mask:4;
-#else
- uint64_t mask:4;
- uint64_t reserved_4_63:60;
-#endif
- } cn61xx;
- struct cvmx_l2c_ver_pp_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t mask:6;
-#else
- uint64_t mask:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn63xx;
- struct cvmx_l2c_ver_pp_cn63xx cn63xxp1;
- struct cvmx_l2c_ver_pp_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t mask:10;
-#else
- uint64_t mask:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn66xx;
- struct cvmx_l2c_ver_pp_s cn68xx;
- struct cvmx_l2c_ver_pp_s cn68xxp1;
- struct cvmx_l2c_ver_pp_cn61xx cnf71xx;
-};
-
-union cvmx_l2c_virtid_iobx {
- uint64_t u64;
- struct cvmx_l2c_virtid_iobx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t dwbid:6;
- uint64_t reserved_6_7:2;
- uint64_t id:6;
-#else
- uint64_t id:6;
- uint64_t reserved_6_7:2;
- uint64_t dwbid:6;
- uint64_t reserved_14_63:50;
-#endif
- } s;
- struct cvmx_l2c_virtid_iobx_s cn61xx;
- struct cvmx_l2c_virtid_iobx_s cn63xx;
- struct cvmx_l2c_virtid_iobx_s cn63xxp1;
- struct cvmx_l2c_virtid_iobx_s cn66xx;
- struct cvmx_l2c_virtid_iobx_s cn68xx;
- struct cvmx_l2c_virtid_iobx_s cn68xxp1;
- struct cvmx_l2c_virtid_iobx_s cnf71xx;
-};
-
-union cvmx_l2c_virtid_ppx {
- uint64_t u64;
- struct cvmx_l2c_virtid_ppx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t id:6;
-#else
- uint64_t id:6;
- uint64_t reserved_6_63:58;
-#endif
- } s;
- struct cvmx_l2c_virtid_ppx_s cn61xx;
- struct cvmx_l2c_virtid_ppx_s cn63xx;
- struct cvmx_l2c_virtid_ppx_s cn63xxp1;
- struct cvmx_l2c_virtid_ppx_s cn66xx;
- struct cvmx_l2c_virtid_ppx_s cn68xx;
- struct cvmx_l2c_virtid_ppx_s cn68xxp1;
- struct cvmx_l2c_virtid_ppx_s cnf71xx;
-};
-
-union cvmx_l2c_vrt_ctl {
- uint64_t u64;
- struct cvmx_l2c_vrt_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t ooberr:1;
- uint64_t reserved_7_7:1;
- uint64_t memsz:3;
- uint64_t numid:3;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t numid:3;
- uint64_t memsz:3;
- uint64_t reserved_7_7:1;
- uint64_t ooberr:1;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_l2c_vrt_ctl_s cn61xx;
- struct cvmx_l2c_vrt_ctl_s cn63xx;
- struct cvmx_l2c_vrt_ctl_s cn63xxp1;
- struct cvmx_l2c_vrt_ctl_s cn66xx;
- struct cvmx_l2c_vrt_ctl_s cn68xx;
- struct cvmx_l2c_vrt_ctl_s cn68xxp1;
- struct cvmx_l2c_vrt_ctl_s cnf71xx;
-};
-
-union cvmx_l2c_vrt_memx {
- uint64_t u64;
- struct cvmx_l2c_vrt_memx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_36_63:28;
- uint64_t parity:4;
- uint64_t data:32;
-#else
- uint64_t data:32;
- uint64_t parity:4;
- uint64_t reserved_36_63:28;
-#endif
- } s;
- struct cvmx_l2c_vrt_memx_s cn61xx;
- struct cvmx_l2c_vrt_memx_s cn63xx;
- struct cvmx_l2c_vrt_memx_s cn63xxp1;
- struct cvmx_l2c_vrt_memx_s cn66xx;
- struct cvmx_l2c_vrt_memx_s cn68xx;
- struct cvmx_l2c_vrt_memx_s cn68xxp1;
- struct cvmx_l2c_vrt_memx_s cnf71xx;
-};
-
-union cvmx_l2c_wpar_iobx {
- uint64_t u64;
- struct cvmx_l2c_wpar_iobx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t mask:16;
-#else
- uint64_t mask:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_l2c_wpar_iobx_s cn61xx;
- struct cvmx_l2c_wpar_iobx_s cn63xx;
- struct cvmx_l2c_wpar_iobx_s cn63xxp1;
- struct cvmx_l2c_wpar_iobx_s cn66xx;
- struct cvmx_l2c_wpar_iobx_s cn68xx;
- struct cvmx_l2c_wpar_iobx_s cn68xxp1;
- struct cvmx_l2c_wpar_iobx_s cnf71xx;
-};
-
-union cvmx_l2c_wpar_ppx {
- uint64_t u64;
- struct cvmx_l2c_wpar_ppx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t mask:16;
-#else
- uint64_t mask:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_l2c_wpar_ppx_s cn61xx;
- struct cvmx_l2c_wpar_ppx_s cn63xx;
- struct cvmx_l2c_wpar_ppx_s cn63xxp1;
- struct cvmx_l2c_wpar_ppx_s cn66xx;
- struct cvmx_l2c_wpar_ppx_s cn68xx;
- struct cvmx_l2c_wpar_ppx_s cn68xxp1;
- struct cvmx_l2c_wpar_ppx_s cnf71xx;
-};
-
-union cvmx_l2c_xmcx_pfc {
- uint64_t u64;
- struct cvmx_l2c_xmcx_pfc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
- } s;
- struct cvmx_l2c_xmcx_pfc_s cn61xx;
- struct cvmx_l2c_xmcx_pfc_s cn63xx;
- struct cvmx_l2c_xmcx_pfc_s cn63xxp1;
- struct cvmx_l2c_xmcx_pfc_s cn66xx;
- struct cvmx_l2c_xmcx_pfc_s cn68xx;
- struct cvmx_l2c_xmcx_pfc_s cn68xxp1;
- struct cvmx_l2c_xmcx_pfc_s cnf71xx;
-};
-
-union cvmx_l2c_xmc_cmd {
+union cvmx_l2c_lckbase {
uint64_t u64;
- struct cvmx_l2c_xmc_cmd_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t inuse:1;
- uint64_t cmd:6;
- uint64_t reserved_38_56:19;
- uint64_t addr:38;
-#else
- uint64_t addr:38;
- uint64_t reserved_38_56:19;
- uint64_t cmd:6;
- uint64_t inuse:1;
-#endif
+ struct cvmx_l2c_lckbase_s {
+ __BITFIELD_FIELD(uint64_t reserved_31_63:33,
+ __BITFIELD_FIELD(uint64_t lck_base:27,
+ __BITFIELD_FIELD(uint64_t reserved_1_3:3,
+ __BITFIELD_FIELD(uint64_t lck_ena:1,
+ ;))))
} s;
- struct cvmx_l2c_xmc_cmd_s cn61xx;
- struct cvmx_l2c_xmc_cmd_s cn63xx;
- struct cvmx_l2c_xmc_cmd_s cn63xxp1;
- struct cvmx_l2c_xmc_cmd_s cn66xx;
- struct cvmx_l2c_xmc_cmd_s cn68xx;
- struct cvmx_l2c_xmc_cmd_s cn68xxp1;
- struct cvmx_l2c_xmc_cmd_s cnf71xx;
};
-union cvmx_l2c_xmdx_pfc {
+union cvmx_l2c_lckoff {
uint64_t u64;
- struct cvmx_l2c_xmdx_pfc_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t count:64;
-#else
- uint64_t count:64;
-#endif
+ struct cvmx_l2c_lckoff_s {
+ __BITFIELD_FIELD(uint64_t reserved_10_63:54,
+ __BITFIELD_FIELD(uint64_t lck_offset:10,
+ ;))
} s;
- struct cvmx_l2c_xmdx_pfc_s cn61xx;
- struct cvmx_l2c_xmdx_pfc_s cn63xx;
- struct cvmx_l2c_xmdx_pfc_s cn63xxp1;
- struct cvmx_l2c_xmdx_pfc_s cn66xx;
- struct cvmx_l2c_xmdx_pfc_s cn68xx;
- struct cvmx_l2c_xmdx_pfc_s cn68xxp1;
- struct cvmx_l2c_xmdx_pfc_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c.h b/arch/mips/include/asm/octeon/cvmx-l2c.h
index ddb429210a0e..02c4479a90c8 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2010 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -33,48 +33,39 @@
#ifndef __CVMX_L2C_H__
#define __CVMX_L2C_H__
-#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */
-#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro, use function */
-#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */
+#include <uapi/asm/bitfield.h>
+#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro */
+#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro */
+#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro */
-#define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */
+/* Based on 128 byte cache line size */
+#define CVMX_L2C_IDX_ADDR_SHIFT 7
#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
/* Defines for index aliasing computations */
-#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
+#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + \
+ cvmx_l2c_get_set_bits())
#define CVMX_L2C_ALIAS_MASK (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT)
-#define CVMX_L2C_MEMBANK_SELECT_SIZE 4096
+#define CVMX_L2C_MEMBANK_SELECT_SIZE 4096
-/* Defines for Virtualizations, valid only from Octeon II onwards. */
-#define CVMX_L2C_VRT_MAX_VIRTID_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 64 : 0)
-#define CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 32 : 0)
+/* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
+#define CVMX_L2C_TADS 1
union cvmx_l2c_tag {
uint64_t u64;
struct {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved:28;
- uint64_t V:1; /* Line valid */
- uint64_t D:1; /* Line dirty */
- uint64_t L:1; /* Line locked */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t addr:32; /* Phys mem (not all bits valid) */
-#else
- uint64_t addr:32; /* Phys mem (not all bits valid) */
- uint64_t U:1; /* Use, LRU eviction */
- uint64_t L:1; /* Line locked */
- uint64_t D:1; /* Line dirty */
- uint64_t V:1; /* Line valid */
- uint64_t reserved:28;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved:28,
+ __BITFIELD_FIELD(uint64_t V:1,
+ __BITFIELD_FIELD(uint64_t D:1,
+ __BITFIELD_FIELD(uint64_t L:1,
+ __BITFIELD_FIELD(uint64_t U:1,
+ __BITFIELD_FIELD(uint64_t addr:32,
+ ;))))))
} s;
};
-/* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
-#define CVMX_L2C_TADS 1
-
- /* L2C Performance Counter events. */
+/* L2C Performance Counter events. */
enum cvmx_l2c_event {
CVMX_L2C_EVENT_CYCLES = 0,
CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
@@ -175,7 +166,8 @@ enum cvmx_l2c_tad_event {
*
* @note The routine does not clear the counter.
*/
-void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event, uint32_t clear_on_read);
+void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
+ uint32_t clear_on_read);
/**
* Read the given L2 Cache performance counter. The counter must be configured
@@ -307,8 +299,11 @@ int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index);
/* Wrapper providing a deprecated old function name */
-static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index) __attribute__((deprecated));
-static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index)
+static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association,
+ uint32_t index)
+ __attribute__((deprecated));
+static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association,
+ uint32_t index)
{
return cvmx_l2c_get_tag(association, index);
}
diff --git a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
deleted file mode 100644
index 11a456215638..000000000000
--- a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
+++ /dev/null
@@ -1,526 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2012 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_L2D_DEFS_H__
-#define __CVMX_L2D_DEFS_H__
-
-#define CVMX_L2D_BST0 (CVMX_ADD_IO_SEG(0x0001180080000780ull))
-#define CVMX_L2D_BST1 (CVMX_ADD_IO_SEG(0x0001180080000788ull))
-#define CVMX_L2D_BST2 (CVMX_ADD_IO_SEG(0x0001180080000790ull))
-#define CVMX_L2D_BST3 (CVMX_ADD_IO_SEG(0x0001180080000798ull))
-#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))
-#define CVMX_L2D_FADR (CVMX_ADD_IO_SEG(0x0001180080000018ull))
-#define CVMX_L2D_FSYN0 (CVMX_ADD_IO_SEG(0x0001180080000020ull))
-#define CVMX_L2D_FSYN1 (CVMX_ADD_IO_SEG(0x0001180080000028ull))
-#define CVMX_L2D_FUS0 (CVMX_ADD_IO_SEG(0x00011800800007A0ull))
-#define CVMX_L2D_FUS1 (CVMX_ADD_IO_SEG(0x00011800800007A8ull))
-#define CVMX_L2D_FUS2 (CVMX_ADD_IO_SEG(0x00011800800007B0ull))
-#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
-
-union cvmx_l2d_bst0 {
- uint64_t u64;
- struct cvmx_l2d_bst0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_35_63:29;
- uint64_t ftl:1;
- uint64_t q0stat:34;
-#else
- uint64_t q0stat:34;
- uint64_t ftl:1;
- uint64_t reserved_35_63:29;
-#endif
- } s;
- struct cvmx_l2d_bst0_s cn30xx;
- struct cvmx_l2d_bst0_s cn31xx;
- struct cvmx_l2d_bst0_s cn38xx;
- struct cvmx_l2d_bst0_s cn38xxp2;
- struct cvmx_l2d_bst0_s cn50xx;
- struct cvmx_l2d_bst0_s cn52xx;
- struct cvmx_l2d_bst0_s cn52xxp1;
- struct cvmx_l2d_bst0_s cn56xx;
- struct cvmx_l2d_bst0_s cn56xxp1;
- struct cvmx_l2d_bst0_s cn58xx;
- struct cvmx_l2d_bst0_s cn58xxp1;
-};
-
-union cvmx_l2d_bst1 {
- uint64_t u64;
- struct cvmx_l2d_bst1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t q1stat:34;
-#else
- uint64_t q1stat:34;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_l2d_bst1_s cn30xx;
- struct cvmx_l2d_bst1_s cn31xx;
- struct cvmx_l2d_bst1_s cn38xx;
- struct cvmx_l2d_bst1_s cn38xxp2;
- struct cvmx_l2d_bst1_s cn50xx;
- struct cvmx_l2d_bst1_s cn52xx;
- struct cvmx_l2d_bst1_s cn52xxp1;
- struct cvmx_l2d_bst1_s cn56xx;
- struct cvmx_l2d_bst1_s cn56xxp1;
- struct cvmx_l2d_bst1_s cn58xx;
- struct cvmx_l2d_bst1_s cn58xxp1;
-};
-
-union cvmx_l2d_bst2 {
- uint64_t u64;
- struct cvmx_l2d_bst2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t q2stat:34;
-#else
- uint64_t q2stat:34;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_l2d_bst2_s cn30xx;
- struct cvmx_l2d_bst2_s cn31xx;
- struct cvmx_l2d_bst2_s cn38xx;
- struct cvmx_l2d_bst2_s cn38xxp2;
- struct cvmx_l2d_bst2_s cn50xx;
- struct cvmx_l2d_bst2_s cn52xx;
- struct cvmx_l2d_bst2_s cn52xxp1;
- struct cvmx_l2d_bst2_s cn56xx;
- struct cvmx_l2d_bst2_s cn56xxp1;
- struct cvmx_l2d_bst2_s cn58xx;
- struct cvmx_l2d_bst2_s cn58xxp1;
-};
-
-union cvmx_l2d_bst3 {
- uint64_t u64;
- struct cvmx_l2d_bst3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t q3stat:34;
-#else
- uint64_t q3stat:34;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_l2d_bst3_s cn30xx;
- struct cvmx_l2d_bst3_s cn31xx;
- struct cvmx_l2d_bst3_s cn38xx;
- struct cvmx_l2d_bst3_s cn38xxp2;
- struct cvmx_l2d_bst3_s cn50xx;
- struct cvmx_l2d_bst3_s cn52xx;
- struct cvmx_l2d_bst3_s cn52xxp1;
- struct cvmx_l2d_bst3_s cn56xx;
- struct cvmx_l2d_bst3_s cn56xxp1;
- struct cvmx_l2d_bst3_s cn58xx;
- struct cvmx_l2d_bst3_s cn58xxp1;
-};
-
-union cvmx_l2d_err {
- uint64_t u64;
- struct cvmx_l2d_err_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t bmhclsel:1;
- uint64_t ded_err:1;
- uint64_t sec_err:1;
- uint64_t ded_intena:1;
- uint64_t sec_intena:1;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t sec_intena:1;
- uint64_t ded_intena:1;
- uint64_t sec_err:1;
- uint64_t ded_err:1;
- uint64_t bmhclsel:1;
- uint64_t reserved_6_63:58;
-#endif
- } s;
- struct cvmx_l2d_err_s cn30xx;
- struct cvmx_l2d_err_s cn31xx;
- struct cvmx_l2d_err_s cn38xx;
- struct cvmx_l2d_err_s cn38xxp2;
- struct cvmx_l2d_err_s cn50xx;
- struct cvmx_l2d_err_s cn52xx;
- struct cvmx_l2d_err_s cn52xxp1;
- struct cvmx_l2d_err_s cn56xx;
- struct cvmx_l2d_err_s cn56xxp1;
- struct cvmx_l2d_err_s cn58xx;
- struct cvmx_l2d_err_s cn58xxp1;
-};
-
-union cvmx_l2d_fadr {
- uint64_t u64;
- struct cvmx_l2d_fadr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t fadru:1;
- uint64_t fowmsk:4;
- uint64_t fset:3;
- uint64_t fadr:11;
-#else
- uint64_t fadr:11;
- uint64_t fset:3;
- uint64_t fowmsk:4;
- uint64_t fadru:1;
- uint64_t reserved_19_63:45;
-#endif
- } s;
- struct cvmx_l2d_fadr_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t fowmsk:4;
- uint64_t reserved_13_13:1;
- uint64_t fset:2;
- uint64_t reserved_9_10:2;
- uint64_t fadr:9;
-#else
- uint64_t fadr:9;
- uint64_t reserved_9_10:2;
- uint64_t fset:2;
- uint64_t reserved_13_13:1;
- uint64_t fowmsk:4;
- uint64_t reserved_18_63:46;
-#endif
- } cn30xx;
- struct cvmx_l2d_fadr_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t fowmsk:4;
- uint64_t reserved_13_13:1;
- uint64_t fset:2;
- uint64_t reserved_10_10:1;
- uint64_t fadr:10;
-#else
- uint64_t fadr:10;
- uint64_t reserved_10_10:1;
- uint64_t fset:2;
- uint64_t reserved_13_13:1;
- uint64_t fowmsk:4;
- uint64_t reserved_18_63:46;
-#endif
- } cn31xx;
- struct cvmx_l2d_fadr_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t fowmsk:4;
- uint64_t fset:3;
- uint64_t fadr:11;
-#else
- uint64_t fadr:11;
- uint64_t fset:3;
- uint64_t fowmsk:4;
- uint64_t reserved_18_63:46;
-#endif
- } cn38xx;
- struct cvmx_l2d_fadr_cn38xx cn38xxp2;
- struct cvmx_l2d_fadr_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t fowmsk:4;
- uint64_t fset:3;
- uint64_t reserved_8_10:3;
- uint64_t fadr:8;
-#else
- uint64_t fadr:8;
- uint64_t reserved_8_10:3;
- uint64_t fset:3;
- uint64_t fowmsk:4;
- uint64_t reserved_18_63:46;
-#endif
- } cn50xx;
- struct cvmx_l2d_fadr_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t fowmsk:4;
- uint64_t fset:3;
- uint64_t reserved_10_10:1;
- uint64_t fadr:10;
-#else
- uint64_t fadr:10;
- uint64_t reserved_10_10:1;
- uint64_t fset:3;
- uint64_t fowmsk:4;
- uint64_t reserved_18_63:46;
-#endif
- } cn52xx;
- struct cvmx_l2d_fadr_cn52xx cn52xxp1;
- struct cvmx_l2d_fadr_s cn56xx;
- struct cvmx_l2d_fadr_s cn56xxp1;
- struct cvmx_l2d_fadr_s cn58xx;
- struct cvmx_l2d_fadr_s cn58xxp1;
-};
-
-union cvmx_l2d_fsyn0 {
- uint64_t u64;
- struct cvmx_l2d_fsyn0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t fsyn_ow1:10;
- uint64_t fsyn_ow0:10;
-#else
- uint64_t fsyn_ow0:10;
- uint64_t fsyn_ow1:10;
- uint64_t reserved_20_63:44;
-#endif
- } s;
- struct cvmx_l2d_fsyn0_s cn30xx;
- struct cvmx_l2d_fsyn0_s cn31xx;
- struct cvmx_l2d_fsyn0_s cn38xx;
- struct cvmx_l2d_fsyn0_s cn38xxp2;
- struct cvmx_l2d_fsyn0_s cn50xx;
- struct cvmx_l2d_fsyn0_s cn52xx;
- struct cvmx_l2d_fsyn0_s cn52xxp1;
- struct cvmx_l2d_fsyn0_s cn56xx;
- struct cvmx_l2d_fsyn0_s cn56xxp1;
- struct cvmx_l2d_fsyn0_s cn58xx;
- struct cvmx_l2d_fsyn0_s cn58xxp1;
-};
-
-union cvmx_l2d_fsyn1 {
- uint64_t u64;
- struct cvmx_l2d_fsyn1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t fsyn_ow3:10;
- uint64_t fsyn_ow2:10;
-#else
- uint64_t fsyn_ow2:10;
- uint64_t fsyn_ow3:10;
- uint64_t reserved_20_63:44;
-#endif
- } s;
- struct cvmx_l2d_fsyn1_s cn30xx;
- struct cvmx_l2d_fsyn1_s cn31xx;
- struct cvmx_l2d_fsyn1_s cn38xx;
- struct cvmx_l2d_fsyn1_s cn38xxp2;
- struct cvmx_l2d_fsyn1_s cn50xx;
- struct cvmx_l2d_fsyn1_s cn52xx;
- struct cvmx_l2d_fsyn1_s cn52xxp1;
- struct cvmx_l2d_fsyn1_s cn56xx;
- struct cvmx_l2d_fsyn1_s cn56xxp1;
- struct cvmx_l2d_fsyn1_s cn58xx;
- struct cvmx_l2d_fsyn1_s cn58xxp1;
-};
-
-union cvmx_l2d_fus0 {
- uint64_t u64;
- struct cvmx_l2d_fus0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t q0fus:34;
-#else
- uint64_t q0fus:34;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_l2d_fus0_s cn30xx;
- struct cvmx_l2d_fus0_s cn31xx;
- struct cvmx_l2d_fus0_s cn38xx;
- struct cvmx_l2d_fus0_s cn38xxp2;
- struct cvmx_l2d_fus0_s cn50xx;
- struct cvmx_l2d_fus0_s cn52xx;
- struct cvmx_l2d_fus0_s cn52xxp1;
- struct cvmx_l2d_fus0_s cn56xx;
- struct cvmx_l2d_fus0_s cn56xxp1;
- struct cvmx_l2d_fus0_s cn58xx;
- struct cvmx_l2d_fus0_s cn58xxp1;
-};
-
-union cvmx_l2d_fus1 {
- uint64_t u64;
- struct cvmx_l2d_fus1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t q1fus:34;
-#else
- uint64_t q1fus:34;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_l2d_fus1_s cn30xx;
- struct cvmx_l2d_fus1_s cn31xx;
- struct cvmx_l2d_fus1_s cn38xx;
- struct cvmx_l2d_fus1_s cn38xxp2;
- struct cvmx_l2d_fus1_s cn50xx;
- struct cvmx_l2d_fus1_s cn52xx;
- struct cvmx_l2d_fus1_s cn52xxp1;
- struct cvmx_l2d_fus1_s cn56xx;
- struct cvmx_l2d_fus1_s cn56xxp1;
- struct cvmx_l2d_fus1_s cn58xx;
- struct cvmx_l2d_fus1_s cn58xxp1;
-};
-
-union cvmx_l2d_fus2 {
- uint64_t u64;
- struct cvmx_l2d_fus2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t q2fus:34;
-#else
- uint64_t q2fus:34;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_l2d_fus2_s cn30xx;
- struct cvmx_l2d_fus2_s cn31xx;
- struct cvmx_l2d_fus2_s cn38xx;
- struct cvmx_l2d_fus2_s cn38xxp2;
- struct cvmx_l2d_fus2_s cn50xx;
- struct cvmx_l2d_fus2_s cn52xx;
- struct cvmx_l2d_fus2_s cn52xxp1;
- struct cvmx_l2d_fus2_s cn56xx;
- struct cvmx_l2d_fus2_s cn56xxp1;
- struct cvmx_l2d_fus2_s cn58xx;
- struct cvmx_l2d_fus2_s cn58xxp1;
-};
-
-union cvmx_l2d_fus3 {
- uint64_t u64;
- struct cvmx_l2d_fus3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_40_63:24;
- uint64_t ema_ctl:3;
- uint64_t reserved_34_36:3;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t reserved_34_36:3;
- uint64_t ema_ctl:3;
- uint64_t reserved_40_63:24;
-#endif
- } s;
- struct cvmx_l2d_fus3_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_35_63:29;
- uint64_t crip_64k:1;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t crip_64k:1;
- uint64_t reserved_35_63:29;
-#endif
- } cn30xx;
- struct cvmx_l2d_fus3_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_35_63:29;
- uint64_t crip_128k:1;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t crip_128k:1;
- uint64_t reserved_35_63:29;
-#endif
- } cn31xx;
- struct cvmx_l2d_fus3_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_36_63:28;
- uint64_t crip_256k:1;
- uint64_t crip_512k:1;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t crip_512k:1;
- uint64_t crip_256k:1;
- uint64_t reserved_36_63:28;
-#endif
- } cn38xx;
- struct cvmx_l2d_fus3_cn38xx cn38xxp2;
- struct cvmx_l2d_fus3_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_40_63:24;
- uint64_t ema_ctl:3;
- uint64_t reserved_36_36:1;
- uint64_t crip_32k:1;
- uint64_t crip_64k:1;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t crip_64k:1;
- uint64_t crip_32k:1;
- uint64_t reserved_36_36:1;
- uint64_t ema_ctl:3;
- uint64_t reserved_40_63:24;
-#endif
- } cn50xx;
- struct cvmx_l2d_fus3_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_40_63:24;
- uint64_t ema_ctl:3;
- uint64_t reserved_36_36:1;
- uint64_t crip_128k:1;
- uint64_t crip_256k:1;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t crip_256k:1;
- uint64_t crip_128k:1;
- uint64_t reserved_36_36:1;
- uint64_t ema_ctl:3;
- uint64_t reserved_40_63:24;
-#endif
- } cn52xx;
- struct cvmx_l2d_fus3_cn52xx cn52xxp1;
- struct cvmx_l2d_fus3_cn56xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_40_63:24;
- uint64_t ema_ctl:3;
- uint64_t reserved_36_36:1;
- uint64_t crip_512k:1;
- uint64_t crip_1024k:1;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t crip_1024k:1;
- uint64_t crip_512k:1;
- uint64_t reserved_36_36:1;
- uint64_t ema_ctl:3;
- uint64_t reserved_40_63:24;
-#endif
- } cn56xx;
- struct cvmx_l2d_fus3_cn56xx cn56xxp1;
- struct cvmx_l2d_fus3_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_39_63:25;
- uint64_t ema_ctl:2;
- uint64_t reserved_36_36:1;
- uint64_t crip_512k:1;
- uint64_t crip_1024k:1;
- uint64_t q3fus:34;
-#else
- uint64_t q3fus:34;
- uint64_t crip_1024k:1;
- uint64_t crip_512k:1;
- uint64_t reserved_36_36:1;
- uint64_t ema_ctl:2;
- uint64_t reserved_39_63:25;
-#endif
- } cn58xx;
- struct cvmx_l2d_fus3_cn58xx cn58xxp1;
-};
-
-#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
index 83ce22c080e6..fe50671fd1bb 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,210 +28,116 @@
#ifndef __CVMX_L2T_DEFS_H__
#define __CVMX_L2T_DEFS_H__
-#define CVMX_L2T_ERR (CVMX_ADD_IO_SEG(0x0001180080000008ull))
+#include <uapi/asm/bitfield.h>
+
+#define CVMX_L2T_ERR (CVMX_ADD_IO_SEG(0x0001180080000008ull))
+
union cvmx_l2t_err {
uint64_t u64;
struct cvmx_l2t_err_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t fadru:1;
- uint64_t lck_intena2:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena:1;
- uint64_t lckerr:1;
- uint64_t fset:3;
- uint64_t fadr:10;
- uint64_t fsyn:6;
- uint64_t ded_err:1;
- uint64_t sec_err:1;
- uint64_t ded_intena:1;
- uint64_t sec_intena:1;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t sec_intena:1;
- uint64_t ded_intena:1;
- uint64_t sec_err:1;
- uint64_t ded_err:1;
- uint64_t fsyn:6;
- uint64_t fadr:10;
- uint64_t fset:3;
- uint64_t lckerr:1;
- uint64_t lck_intena:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena2:1;
- uint64_t fadru:1;
- uint64_t reserved_29_63:35;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_29_63:35,
+ __BITFIELD_FIELD(uint64_t fadru:1,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t fset:3,
+ __BITFIELD_FIELD(uint64_t fadr:10,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;))))))))))))))
} s;
struct cvmx_l2t_err_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_28_63:36;
- uint64_t lck_intena2:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena:1;
- uint64_t lckerr:1;
- uint64_t reserved_23_23:1;
- uint64_t fset:2;
- uint64_t reserved_19_20:2;
- uint64_t fadr:8;
- uint64_t fsyn:6;
- uint64_t ded_err:1;
- uint64_t sec_err:1;
- uint64_t ded_intena:1;
- uint64_t sec_intena:1;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t sec_intena:1;
- uint64_t ded_intena:1;
- uint64_t sec_err:1;
- uint64_t ded_err:1;
- uint64_t fsyn:6;
- uint64_t fadr:8;
- uint64_t reserved_19_20:2;
- uint64_t fset:2;
- uint64_t reserved_23_23:1;
- uint64_t lckerr:1;
- uint64_t lck_intena:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena2:1;
- uint64_t reserved_28_63:36;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t reserved_23_23:1,
+ __BITFIELD_FIELD(uint64_t fset:2,
+ __BITFIELD_FIELD(uint64_t reserved_19_20:2,
+ __BITFIELD_FIELD(uint64_t fadr:8,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;)))))))))))))))
} cn30xx;
struct cvmx_l2t_err_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_28_63:36;
- uint64_t lck_intena2:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena:1;
- uint64_t lckerr:1;
- uint64_t reserved_23_23:1;
- uint64_t fset:2;
- uint64_t reserved_20_20:1;
- uint64_t fadr:9;
- uint64_t fsyn:6;
- uint64_t ded_err:1;
- uint64_t sec_err:1;
- uint64_t ded_intena:1;
- uint64_t sec_intena:1;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t sec_intena:1;
- uint64_t ded_intena:1;
- uint64_t sec_err:1;
- uint64_t ded_err:1;
- uint64_t fsyn:6;
- uint64_t fadr:9;
- uint64_t reserved_20_20:1;
- uint64_t fset:2;
- uint64_t reserved_23_23:1;
- uint64_t lckerr:1;
- uint64_t lck_intena:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena2:1;
- uint64_t reserved_28_63:36;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t reserved_23_23:1,
+ __BITFIELD_FIELD(uint64_t fset:2,
+ __BITFIELD_FIELD(uint64_t reserved_20_20:1,
+ __BITFIELD_FIELD(uint64_t fadr:9,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;)))))))))))))))
} cn31xx;
struct cvmx_l2t_err_cn38xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_28_63:36;
- uint64_t lck_intena2:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena:1;
- uint64_t lckerr:1;
- uint64_t fset:3;
- uint64_t fadr:10;
- uint64_t fsyn:6;
- uint64_t ded_err:1;
- uint64_t sec_err:1;
- uint64_t ded_intena:1;
- uint64_t sec_intena:1;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t sec_intena:1;
- uint64_t ded_intena:1;
- uint64_t sec_err:1;
- uint64_t ded_err:1;
- uint64_t fsyn:6;
- uint64_t fadr:10;
- uint64_t fset:3;
- uint64_t lckerr:1;
- uint64_t lck_intena:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena2:1;
- uint64_t reserved_28_63:36;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t fset:3,
+ __BITFIELD_FIELD(uint64_t fadr:10,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;)))))))))))))
} cn38xx;
struct cvmx_l2t_err_cn38xx cn38xxp2;
struct cvmx_l2t_err_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_28_63:36;
- uint64_t lck_intena2:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena:1;
- uint64_t lckerr:1;
- uint64_t fset:3;
- uint64_t reserved_18_20:3;
- uint64_t fadr:7;
- uint64_t fsyn:6;
- uint64_t ded_err:1;
- uint64_t sec_err:1;
- uint64_t ded_intena:1;
- uint64_t sec_intena:1;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t sec_intena:1;
- uint64_t ded_intena:1;
- uint64_t sec_err:1;
- uint64_t ded_err:1;
- uint64_t fsyn:6;
- uint64_t fadr:7;
- uint64_t reserved_18_20:3;
- uint64_t fset:3;
- uint64_t lckerr:1;
- uint64_t lck_intena:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena2:1;
- uint64_t reserved_28_63:36;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t fset:3,
+ __BITFIELD_FIELD(uint64_t reserved_18_20:3,
+ __BITFIELD_FIELD(uint64_t fadr:7,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;))))))))))))))
} cn50xx;
struct cvmx_l2t_err_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_28_63:36;
- uint64_t lck_intena2:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena:1;
- uint64_t lckerr:1;
- uint64_t fset:3;
- uint64_t reserved_20_20:1;
- uint64_t fadr:9;
- uint64_t fsyn:6;
- uint64_t ded_err:1;
- uint64_t sec_err:1;
- uint64_t ded_intena:1;
- uint64_t sec_intena:1;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t sec_intena:1;
- uint64_t ded_intena:1;
- uint64_t sec_err:1;
- uint64_t ded_err:1;
- uint64_t fsyn:6;
- uint64_t fadr:9;
- uint64_t reserved_20_20:1;
- uint64_t fset:3;
- uint64_t lckerr:1;
- uint64_t lck_intena:1;
- uint64_t lckerr2:1;
- uint64_t lck_intena2:1;
- uint64_t reserved_28_63:36;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_28_63:36,
+ __BITFIELD_FIELD(uint64_t lck_intena2:1,
+ __BITFIELD_FIELD(uint64_t lckerr2:1,
+ __BITFIELD_FIELD(uint64_t lck_intena:1,
+ __BITFIELD_FIELD(uint64_t lckerr:1,
+ __BITFIELD_FIELD(uint64_t fset:3,
+ __BITFIELD_FIELD(uint64_t reserved_20_20:1,
+ __BITFIELD_FIELD(uint64_t fadr:9,
+ __BITFIELD_FIELD(uint64_t fsyn:6,
+ __BITFIELD_FIELD(uint64_t ded_err:1,
+ __BITFIELD_FIELD(uint64_t sec_err:1,
+ __BITFIELD_FIELD(uint64_t ded_intena:1,
+ __BITFIELD_FIELD(uint64_t sec_intena:1,
+ __BITFIELD_FIELD(uint64_t ecc_ena:1,
+ ;))))))))))))))
} cn52xx;
struct cvmx_l2t_err_cn52xx cn52xxp1;
struct cvmx_l2t_err_s cn56xx;
diff --git a/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h b/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h
index 4bce393391e2..e2dce1acf029 100644
--- a/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pciercx-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,3148 +28,341 @@
#ifndef __CVMX_PCIERCX_DEFS_H__
#define __CVMX_PCIERCX_DEFS_H__
-#define CVMX_PCIERCX_CFG000(block_id) (0x0000000000000000ull)
+#include <uapi/asm/bitfield.h>
+
#define CVMX_PCIERCX_CFG001(block_id) (0x0000000000000004ull)
-#define CVMX_PCIERCX_CFG002(block_id) (0x0000000000000008ull)
-#define CVMX_PCIERCX_CFG003(block_id) (0x000000000000000Cull)
-#define CVMX_PCIERCX_CFG004(block_id) (0x0000000000000010ull)
-#define CVMX_PCIERCX_CFG005(block_id) (0x0000000000000014ull)
#define CVMX_PCIERCX_CFG006(block_id) (0x0000000000000018ull)
-#define CVMX_PCIERCX_CFG007(block_id) (0x000000000000001Cull)
#define CVMX_PCIERCX_CFG008(block_id) (0x0000000000000020ull)
#define CVMX_PCIERCX_CFG009(block_id) (0x0000000000000024ull)
#define CVMX_PCIERCX_CFG010(block_id) (0x0000000000000028ull)
#define CVMX_PCIERCX_CFG011(block_id) (0x000000000000002Cull)
-#define CVMX_PCIERCX_CFG012(block_id) (0x0000000000000030ull)
-#define CVMX_PCIERCX_CFG013(block_id) (0x0000000000000034ull)
-#define CVMX_PCIERCX_CFG014(block_id) (0x0000000000000038ull)
-#define CVMX_PCIERCX_CFG015(block_id) (0x000000000000003Cull)
-#define CVMX_PCIERCX_CFG016(block_id) (0x0000000000000040ull)
-#define CVMX_PCIERCX_CFG017(block_id) (0x0000000000000044ull)
-#define CVMX_PCIERCX_CFG020(block_id) (0x0000000000000050ull)
-#define CVMX_PCIERCX_CFG021(block_id) (0x0000000000000054ull)
-#define CVMX_PCIERCX_CFG022(block_id) (0x0000000000000058ull)
-#define CVMX_PCIERCX_CFG023(block_id) (0x000000000000005Cull)
-#define CVMX_PCIERCX_CFG028(block_id) (0x0000000000000070ull)
-#define CVMX_PCIERCX_CFG029(block_id) (0x0000000000000074ull)
#define CVMX_PCIERCX_CFG030(block_id) (0x0000000000000078ull)
#define CVMX_PCIERCX_CFG031(block_id) (0x000000000000007Cull)
#define CVMX_PCIERCX_CFG032(block_id) (0x0000000000000080ull)
-#define CVMX_PCIERCX_CFG033(block_id) (0x0000000000000084ull)
#define CVMX_PCIERCX_CFG034(block_id) (0x0000000000000088ull)
#define CVMX_PCIERCX_CFG035(block_id) (0x000000000000008Cull)
-#define CVMX_PCIERCX_CFG036(block_id) (0x0000000000000090ull)
-#define CVMX_PCIERCX_CFG037(block_id) (0x0000000000000094ull)
-#define CVMX_PCIERCX_CFG038(block_id) (0x0000000000000098ull)
-#define CVMX_PCIERCX_CFG039(block_id) (0x000000000000009Cull)
#define CVMX_PCIERCX_CFG040(block_id) (0x00000000000000A0ull)
-#define CVMX_PCIERCX_CFG041(block_id) (0x00000000000000A4ull)
-#define CVMX_PCIERCX_CFG042(block_id) (0x00000000000000A8ull)
-#define CVMX_PCIERCX_CFG064(block_id) (0x0000000000000100ull)
-#define CVMX_PCIERCX_CFG065(block_id) (0x0000000000000104ull)
#define CVMX_PCIERCX_CFG066(block_id) (0x0000000000000108ull)
-#define CVMX_PCIERCX_CFG067(block_id) (0x000000000000010Cull)
-#define CVMX_PCIERCX_CFG068(block_id) (0x0000000000000110ull)
#define CVMX_PCIERCX_CFG069(block_id) (0x0000000000000114ull)
#define CVMX_PCIERCX_CFG070(block_id) (0x0000000000000118ull)
-#define CVMX_PCIERCX_CFG071(block_id) (0x000000000000011Cull)
-#define CVMX_PCIERCX_CFG072(block_id) (0x0000000000000120ull)
-#define CVMX_PCIERCX_CFG073(block_id) (0x0000000000000124ull)
-#define CVMX_PCIERCX_CFG074(block_id) (0x0000000000000128ull)
#define CVMX_PCIERCX_CFG075(block_id) (0x000000000000012Cull)
-#define CVMX_PCIERCX_CFG076(block_id) (0x0000000000000130ull)
-#define CVMX_PCIERCX_CFG077(block_id) (0x0000000000000134ull)
#define CVMX_PCIERCX_CFG448(block_id) (0x0000000000000700ull)
-#define CVMX_PCIERCX_CFG449(block_id) (0x0000000000000704ull)
-#define CVMX_PCIERCX_CFG450(block_id) (0x0000000000000708ull)
-#define CVMX_PCIERCX_CFG451(block_id) (0x000000000000070Cull)
#define CVMX_PCIERCX_CFG452(block_id) (0x0000000000000710ull)
-#define CVMX_PCIERCX_CFG453(block_id) (0x0000000000000714ull)
-#define CVMX_PCIERCX_CFG454(block_id) (0x0000000000000718ull)
#define CVMX_PCIERCX_CFG455(block_id) (0x000000000000071Cull)
-#define CVMX_PCIERCX_CFG456(block_id) (0x0000000000000720ull)
-#define CVMX_PCIERCX_CFG458(block_id) (0x0000000000000728ull)
-#define CVMX_PCIERCX_CFG459(block_id) (0x000000000000072Cull)
-#define CVMX_PCIERCX_CFG460(block_id) (0x0000000000000730ull)
-#define CVMX_PCIERCX_CFG461(block_id) (0x0000000000000734ull)
-#define CVMX_PCIERCX_CFG462(block_id) (0x0000000000000738ull)
-#define CVMX_PCIERCX_CFG463(block_id) (0x000000000000073Cull)
-#define CVMX_PCIERCX_CFG464(block_id) (0x0000000000000740ull)
-#define CVMX_PCIERCX_CFG465(block_id) (0x0000000000000744ull)
-#define CVMX_PCIERCX_CFG466(block_id) (0x0000000000000748ull)
-#define CVMX_PCIERCX_CFG467(block_id) (0x000000000000074Cull)
-#define CVMX_PCIERCX_CFG468(block_id) (0x0000000000000750ull)
-#define CVMX_PCIERCX_CFG490(block_id) (0x00000000000007A8ull)
-#define CVMX_PCIERCX_CFG491(block_id) (0x00000000000007ACull)
-#define CVMX_PCIERCX_CFG492(block_id) (0x00000000000007B0ull)
#define CVMX_PCIERCX_CFG515(block_id) (0x000000000000080Cull)
-#define CVMX_PCIERCX_CFG516(block_id) (0x0000000000000810ull)
-#define CVMX_PCIERCX_CFG517(block_id) (0x0000000000000814ull)
-
-union cvmx_pciercx_cfg000 {
- uint32_t u32;
- struct cvmx_pciercx_cfg000_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t devid:16;
- uint32_t vendid:16;
-#else
- uint32_t vendid:16;
- uint32_t devid:16;
-#endif
- } s;
- struct cvmx_pciercx_cfg000_s cn52xx;
- struct cvmx_pciercx_cfg000_s cn52xxp1;
- struct cvmx_pciercx_cfg000_s cn56xx;
- struct cvmx_pciercx_cfg000_s cn56xxp1;
- struct cvmx_pciercx_cfg000_s cn61xx;
- struct cvmx_pciercx_cfg000_s cn63xx;
- struct cvmx_pciercx_cfg000_s cn63xxp1;
- struct cvmx_pciercx_cfg000_s cn66xx;
- struct cvmx_pciercx_cfg000_s cn68xx;
- struct cvmx_pciercx_cfg000_s cn68xxp1;
- struct cvmx_pciercx_cfg000_s cnf71xx;
-};
union cvmx_pciercx_cfg001 {
uint32_t u32;
struct cvmx_pciercx_cfg001_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dpe:1;
- uint32_t sse:1;
- uint32_t rma:1;
- uint32_t rta:1;
- uint32_t sta:1;
- uint32_t devt:2;
- uint32_t mdpe:1;
- uint32_t fbb:1;
- uint32_t reserved_22_22:1;
- uint32_t m66:1;
- uint32_t cl:1;
- uint32_t i_stat:1;
- uint32_t reserved_11_18:8;
- uint32_t i_dis:1;
- uint32_t fbbe:1;
- uint32_t see:1;
- uint32_t ids_wcc:1;
- uint32_t per:1;
- uint32_t vps:1;
- uint32_t mwice:1;
- uint32_t scse:1;
- uint32_t me:1;
- uint32_t msae:1;
- uint32_t isae:1;
-#else
- uint32_t isae:1;
- uint32_t msae:1;
- uint32_t me:1;
- uint32_t scse:1;
- uint32_t mwice:1;
- uint32_t vps:1;
- uint32_t per:1;
- uint32_t ids_wcc:1;
- uint32_t see:1;
- uint32_t fbbe:1;
- uint32_t i_dis:1;
- uint32_t reserved_11_18:8;
- uint32_t i_stat:1;
- uint32_t cl:1;
- uint32_t m66:1;
- uint32_t reserved_22_22:1;
- uint32_t fbb:1;
- uint32_t mdpe:1;
- uint32_t devt:2;
- uint32_t sta:1;
- uint32_t rta:1;
- uint32_t rma:1;
- uint32_t sse:1;
- uint32_t dpe:1;
-#endif
- } s;
- struct cvmx_pciercx_cfg001_s cn52xx;
- struct cvmx_pciercx_cfg001_s cn52xxp1;
- struct cvmx_pciercx_cfg001_s cn56xx;
- struct cvmx_pciercx_cfg001_s cn56xxp1;
- struct cvmx_pciercx_cfg001_s cn61xx;
- struct cvmx_pciercx_cfg001_s cn63xx;
- struct cvmx_pciercx_cfg001_s cn63xxp1;
- struct cvmx_pciercx_cfg001_s cn66xx;
- struct cvmx_pciercx_cfg001_s cn68xx;
- struct cvmx_pciercx_cfg001_s cn68xxp1;
- struct cvmx_pciercx_cfg001_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg002 {
- uint32_t u32;
- struct cvmx_pciercx_cfg002_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t bcc:8;
- uint32_t sc:8;
- uint32_t pi:8;
- uint32_t rid:8;
-#else
- uint32_t rid:8;
- uint32_t pi:8;
- uint32_t sc:8;
- uint32_t bcc:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg002_s cn52xx;
- struct cvmx_pciercx_cfg002_s cn52xxp1;
- struct cvmx_pciercx_cfg002_s cn56xx;
- struct cvmx_pciercx_cfg002_s cn56xxp1;
- struct cvmx_pciercx_cfg002_s cn61xx;
- struct cvmx_pciercx_cfg002_s cn63xx;
- struct cvmx_pciercx_cfg002_s cn63xxp1;
- struct cvmx_pciercx_cfg002_s cn66xx;
- struct cvmx_pciercx_cfg002_s cn68xx;
- struct cvmx_pciercx_cfg002_s cn68xxp1;
- struct cvmx_pciercx_cfg002_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg003 {
- uint32_t u32;
- struct cvmx_pciercx_cfg003_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t bist:8;
- uint32_t mfd:1;
- uint32_t chf:7;
- uint32_t lt:8;
- uint32_t cls:8;
-#else
- uint32_t cls:8;
- uint32_t lt:8;
- uint32_t chf:7;
- uint32_t mfd:1;
- uint32_t bist:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg003_s cn52xx;
- struct cvmx_pciercx_cfg003_s cn52xxp1;
- struct cvmx_pciercx_cfg003_s cn56xx;
- struct cvmx_pciercx_cfg003_s cn56xxp1;
- struct cvmx_pciercx_cfg003_s cn61xx;
- struct cvmx_pciercx_cfg003_s cn63xx;
- struct cvmx_pciercx_cfg003_s cn63xxp1;
- struct cvmx_pciercx_cfg003_s cn66xx;
- struct cvmx_pciercx_cfg003_s cn68xx;
- struct cvmx_pciercx_cfg003_s cn68xxp1;
- struct cvmx_pciercx_cfg003_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg004 {
- uint32_t u32;
- struct cvmx_pciercx_cfg004_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_0_31:32;
-#else
- uint32_t reserved_0_31:32;
-#endif
+ __BITFIELD_FIELD(uint32_t dpe:1,
+ __BITFIELD_FIELD(uint32_t sse:1,
+ __BITFIELD_FIELD(uint32_t rma:1,
+ __BITFIELD_FIELD(uint32_t rta:1,
+ __BITFIELD_FIELD(uint32_t sta:1,
+ __BITFIELD_FIELD(uint32_t devt:2,
+ __BITFIELD_FIELD(uint32_t mdpe:1,
+ __BITFIELD_FIELD(uint32_t fbb:1,
+ __BITFIELD_FIELD(uint32_t reserved_22_22:1,
+ __BITFIELD_FIELD(uint32_t m66:1,
+ __BITFIELD_FIELD(uint32_t cl:1,
+ __BITFIELD_FIELD(uint32_t i_stat:1,
+ __BITFIELD_FIELD(uint32_t reserved_11_18:8,
+ __BITFIELD_FIELD(uint32_t i_dis:1,
+ __BITFIELD_FIELD(uint32_t fbbe:1,
+ __BITFIELD_FIELD(uint32_t see:1,
+ __BITFIELD_FIELD(uint32_t ids_wcc:1,
+ __BITFIELD_FIELD(uint32_t per:1,
+ __BITFIELD_FIELD(uint32_t vps:1,
+ __BITFIELD_FIELD(uint32_t mwice:1,
+ __BITFIELD_FIELD(uint32_t scse:1,
+ __BITFIELD_FIELD(uint32_t me:1,
+ __BITFIELD_FIELD(uint32_t msae:1,
+ __BITFIELD_FIELD(uint32_t isae:1,
+ ;))))))))))))))))))))))))
} s;
- struct cvmx_pciercx_cfg004_s cn52xx;
- struct cvmx_pciercx_cfg004_s cn52xxp1;
- struct cvmx_pciercx_cfg004_s cn56xx;
- struct cvmx_pciercx_cfg004_s cn56xxp1;
- struct cvmx_pciercx_cfg004_s cn61xx;
- struct cvmx_pciercx_cfg004_s cn63xx;
- struct cvmx_pciercx_cfg004_s cn63xxp1;
- struct cvmx_pciercx_cfg004_s cn66xx;
- struct cvmx_pciercx_cfg004_s cn68xx;
- struct cvmx_pciercx_cfg004_s cn68xxp1;
- struct cvmx_pciercx_cfg004_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg005 {
- uint32_t u32;
- struct cvmx_pciercx_cfg005_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_0_31:32;
-#else
- uint32_t reserved_0_31:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg005_s cn52xx;
- struct cvmx_pciercx_cfg005_s cn52xxp1;
- struct cvmx_pciercx_cfg005_s cn56xx;
- struct cvmx_pciercx_cfg005_s cn56xxp1;
- struct cvmx_pciercx_cfg005_s cn61xx;
- struct cvmx_pciercx_cfg005_s cn63xx;
- struct cvmx_pciercx_cfg005_s cn63xxp1;
- struct cvmx_pciercx_cfg005_s cn66xx;
- struct cvmx_pciercx_cfg005_s cn68xx;
- struct cvmx_pciercx_cfg005_s cn68xxp1;
- struct cvmx_pciercx_cfg005_s cnf71xx;
};
union cvmx_pciercx_cfg006 {
uint32_t u32;
struct cvmx_pciercx_cfg006_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t slt:8;
- uint32_t subbnum:8;
- uint32_t sbnum:8;
- uint32_t pbnum:8;
-#else
- uint32_t pbnum:8;
- uint32_t sbnum:8;
- uint32_t subbnum:8;
- uint32_t slt:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg006_s cn52xx;
- struct cvmx_pciercx_cfg006_s cn52xxp1;
- struct cvmx_pciercx_cfg006_s cn56xx;
- struct cvmx_pciercx_cfg006_s cn56xxp1;
- struct cvmx_pciercx_cfg006_s cn61xx;
- struct cvmx_pciercx_cfg006_s cn63xx;
- struct cvmx_pciercx_cfg006_s cn63xxp1;
- struct cvmx_pciercx_cfg006_s cn66xx;
- struct cvmx_pciercx_cfg006_s cn68xx;
- struct cvmx_pciercx_cfg006_s cn68xxp1;
- struct cvmx_pciercx_cfg006_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg007 {
- uint32_t u32;
- struct cvmx_pciercx_cfg007_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dpe:1;
- uint32_t sse:1;
- uint32_t rma:1;
- uint32_t rta:1;
- uint32_t sta:1;
- uint32_t devt:2;
- uint32_t mdpe:1;
- uint32_t fbb:1;
- uint32_t reserved_22_22:1;
- uint32_t m66:1;
- uint32_t reserved_16_20:5;
- uint32_t lio_limi:4;
- uint32_t reserved_9_11:3;
- uint32_t io32b:1;
- uint32_t lio_base:4;
- uint32_t reserved_1_3:3;
- uint32_t io32a:1;
-#else
- uint32_t io32a:1;
- uint32_t reserved_1_3:3;
- uint32_t lio_base:4;
- uint32_t io32b:1;
- uint32_t reserved_9_11:3;
- uint32_t lio_limi:4;
- uint32_t reserved_16_20:5;
- uint32_t m66:1;
- uint32_t reserved_22_22:1;
- uint32_t fbb:1;
- uint32_t mdpe:1;
- uint32_t devt:2;
- uint32_t sta:1;
- uint32_t rta:1;
- uint32_t rma:1;
- uint32_t sse:1;
- uint32_t dpe:1;
-#endif
+ __BITFIELD_FIELD(uint32_t slt:8,
+ __BITFIELD_FIELD(uint32_t subbnum:8,
+ __BITFIELD_FIELD(uint32_t sbnum:8,
+ __BITFIELD_FIELD(uint32_t pbnum:8,
+ ;))))
} s;
- struct cvmx_pciercx_cfg007_s cn52xx;
- struct cvmx_pciercx_cfg007_s cn52xxp1;
- struct cvmx_pciercx_cfg007_s cn56xx;
- struct cvmx_pciercx_cfg007_s cn56xxp1;
- struct cvmx_pciercx_cfg007_s cn61xx;
- struct cvmx_pciercx_cfg007_s cn63xx;
- struct cvmx_pciercx_cfg007_s cn63xxp1;
- struct cvmx_pciercx_cfg007_s cn66xx;
- struct cvmx_pciercx_cfg007_s cn68xx;
- struct cvmx_pciercx_cfg007_s cn68xxp1;
- struct cvmx_pciercx_cfg007_s cnf71xx;
};
union cvmx_pciercx_cfg008 {
uint32_t u32;
struct cvmx_pciercx_cfg008_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t ml_addr:12;
- uint32_t reserved_16_19:4;
- uint32_t mb_addr:12;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t mb_addr:12;
- uint32_t reserved_16_19:4;
- uint32_t ml_addr:12;
-#endif
+ __BITFIELD_FIELD(uint32_t ml_addr:12,
+ __BITFIELD_FIELD(uint32_t reserved_16_19:4,
+ __BITFIELD_FIELD(uint32_t mb_addr:12,
+ __BITFIELD_FIELD(uint32_t reserved_0_3:4,
+ ;))))
} s;
- struct cvmx_pciercx_cfg008_s cn52xx;
- struct cvmx_pciercx_cfg008_s cn52xxp1;
- struct cvmx_pciercx_cfg008_s cn56xx;
- struct cvmx_pciercx_cfg008_s cn56xxp1;
- struct cvmx_pciercx_cfg008_s cn61xx;
- struct cvmx_pciercx_cfg008_s cn63xx;
- struct cvmx_pciercx_cfg008_s cn63xxp1;
- struct cvmx_pciercx_cfg008_s cn66xx;
- struct cvmx_pciercx_cfg008_s cn68xx;
- struct cvmx_pciercx_cfg008_s cn68xxp1;
- struct cvmx_pciercx_cfg008_s cnf71xx;
};
union cvmx_pciercx_cfg009 {
uint32_t u32;
struct cvmx_pciercx_cfg009_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t lmem_limit:12;
- uint32_t reserved_17_19:3;
- uint32_t mem64b:1;
- uint32_t lmem_base:12;
- uint32_t reserved_1_3:3;
- uint32_t mem64a:1;
-#else
- uint32_t mem64a:1;
- uint32_t reserved_1_3:3;
- uint32_t lmem_base:12;
- uint32_t mem64b:1;
- uint32_t reserved_17_19:3;
- uint32_t lmem_limit:12;
-#endif
+ __BITFIELD_FIELD(uint32_t lmem_limit:12,
+ __BITFIELD_FIELD(uint32_t reserved_17_19:3,
+ __BITFIELD_FIELD(uint32_t mem64b:1,
+ __BITFIELD_FIELD(uint32_t lmem_base:12,
+ __BITFIELD_FIELD(uint32_t reserved_1_3:3,
+ __BITFIELD_FIELD(uint32_t mem64a:1,
+ ;))))))
} s;
- struct cvmx_pciercx_cfg009_s cn52xx;
- struct cvmx_pciercx_cfg009_s cn52xxp1;
- struct cvmx_pciercx_cfg009_s cn56xx;
- struct cvmx_pciercx_cfg009_s cn56xxp1;
- struct cvmx_pciercx_cfg009_s cn61xx;
- struct cvmx_pciercx_cfg009_s cn63xx;
- struct cvmx_pciercx_cfg009_s cn63xxp1;
- struct cvmx_pciercx_cfg009_s cn66xx;
- struct cvmx_pciercx_cfg009_s cn68xx;
- struct cvmx_pciercx_cfg009_s cn68xxp1;
- struct cvmx_pciercx_cfg009_s cnf71xx;
};
union cvmx_pciercx_cfg010 {
uint32_t u32;
struct cvmx_pciercx_cfg010_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t umem_base:32;
-#else
- uint32_t umem_base:32;
-#endif
+ uint32_t umem_base;
} s;
- struct cvmx_pciercx_cfg010_s cn52xx;
- struct cvmx_pciercx_cfg010_s cn52xxp1;
- struct cvmx_pciercx_cfg010_s cn56xx;
- struct cvmx_pciercx_cfg010_s cn56xxp1;
- struct cvmx_pciercx_cfg010_s cn61xx;
- struct cvmx_pciercx_cfg010_s cn63xx;
- struct cvmx_pciercx_cfg010_s cn63xxp1;
- struct cvmx_pciercx_cfg010_s cn66xx;
- struct cvmx_pciercx_cfg010_s cn68xx;
- struct cvmx_pciercx_cfg010_s cn68xxp1;
- struct cvmx_pciercx_cfg010_s cnf71xx;
};
union cvmx_pciercx_cfg011 {
uint32_t u32;
struct cvmx_pciercx_cfg011_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t umem_limit:32;
-#else
- uint32_t umem_limit:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg011_s cn52xx;
- struct cvmx_pciercx_cfg011_s cn52xxp1;
- struct cvmx_pciercx_cfg011_s cn56xx;
- struct cvmx_pciercx_cfg011_s cn56xxp1;
- struct cvmx_pciercx_cfg011_s cn61xx;
- struct cvmx_pciercx_cfg011_s cn63xx;
- struct cvmx_pciercx_cfg011_s cn63xxp1;
- struct cvmx_pciercx_cfg011_s cn66xx;
- struct cvmx_pciercx_cfg011_s cn68xx;
- struct cvmx_pciercx_cfg011_s cn68xxp1;
- struct cvmx_pciercx_cfg011_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg012 {
- uint32_t u32;
- struct cvmx_pciercx_cfg012_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t uio_limit:16;
- uint32_t uio_base:16;
-#else
- uint32_t uio_base:16;
- uint32_t uio_limit:16;
-#endif
+ uint32_t umem_limit;
} s;
- struct cvmx_pciercx_cfg012_s cn52xx;
- struct cvmx_pciercx_cfg012_s cn52xxp1;
- struct cvmx_pciercx_cfg012_s cn56xx;
- struct cvmx_pciercx_cfg012_s cn56xxp1;
- struct cvmx_pciercx_cfg012_s cn61xx;
- struct cvmx_pciercx_cfg012_s cn63xx;
- struct cvmx_pciercx_cfg012_s cn63xxp1;
- struct cvmx_pciercx_cfg012_s cn66xx;
- struct cvmx_pciercx_cfg012_s cn68xx;
- struct cvmx_pciercx_cfg012_s cn68xxp1;
- struct cvmx_pciercx_cfg012_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg013 {
- uint32_t u32;
- struct cvmx_pciercx_cfg013_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_8_31:24;
- uint32_t cp:8;
-#else
- uint32_t cp:8;
- uint32_t reserved_8_31:24;
-#endif
- } s;
- struct cvmx_pciercx_cfg013_s cn52xx;
- struct cvmx_pciercx_cfg013_s cn52xxp1;
- struct cvmx_pciercx_cfg013_s cn56xx;
- struct cvmx_pciercx_cfg013_s cn56xxp1;
- struct cvmx_pciercx_cfg013_s cn61xx;
- struct cvmx_pciercx_cfg013_s cn63xx;
- struct cvmx_pciercx_cfg013_s cn63xxp1;
- struct cvmx_pciercx_cfg013_s cn66xx;
- struct cvmx_pciercx_cfg013_s cn68xx;
- struct cvmx_pciercx_cfg013_s cn68xxp1;
- struct cvmx_pciercx_cfg013_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg014 {
- uint32_t u32;
- struct cvmx_pciercx_cfg014_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_0_31:32;
-#else
- uint32_t reserved_0_31:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg014_s cn52xx;
- struct cvmx_pciercx_cfg014_s cn52xxp1;
- struct cvmx_pciercx_cfg014_s cn56xx;
- struct cvmx_pciercx_cfg014_s cn56xxp1;
- struct cvmx_pciercx_cfg014_s cn61xx;
- struct cvmx_pciercx_cfg014_s cn63xx;
- struct cvmx_pciercx_cfg014_s cn63xxp1;
- struct cvmx_pciercx_cfg014_s cn66xx;
- struct cvmx_pciercx_cfg014_s cn68xx;
- struct cvmx_pciercx_cfg014_s cn68xxp1;
- struct cvmx_pciercx_cfg014_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg015 {
- uint32_t u32;
- struct cvmx_pciercx_cfg015_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_28_31:4;
- uint32_t dtsees:1;
- uint32_t dts:1;
- uint32_t sdt:1;
- uint32_t pdt:1;
- uint32_t fbbe:1;
- uint32_t sbrst:1;
- uint32_t mam:1;
- uint32_t vga16d:1;
- uint32_t vgae:1;
- uint32_t isae:1;
- uint32_t see:1;
- uint32_t pere:1;
- uint32_t inta:8;
- uint32_t il:8;
-#else
- uint32_t il:8;
- uint32_t inta:8;
- uint32_t pere:1;
- uint32_t see:1;
- uint32_t isae:1;
- uint32_t vgae:1;
- uint32_t vga16d:1;
- uint32_t mam:1;
- uint32_t sbrst:1;
- uint32_t fbbe:1;
- uint32_t pdt:1;
- uint32_t sdt:1;
- uint32_t dts:1;
- uint32_t dtsees:1;
- uint32_t reserved_28_31:4;
-#endif
- } s;
- struct cvmx_pciercx_cfg015_s cn52xx;
- struct cvmx_pciercx_cfg015_s cn52xxp1;
- struct cvmx_pciercx_cfg015_s cn56xx;
- struct cvmx_pciercx_cfg015_s cn56xxp1;
- struct cvmx_pciercx_cfg015_s cn61xx;
- struct cvmx_pciercx_cfg015_s cn63xx;
- struct cvmx_pciercx_cfg015_s cn63xxp1;
- struct cvmx_pciercx_cfg015_s cn66xx;
- struct cvmx_pciercx_cfg015_s cn68xx;
- struct cvmx_pciercx_cfg015_s cn68xxp1;
- struct cvmx_pciercx_cfg015_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg016 {
- uint32_t u32;
- struct cvmx_pciercx_cfg016_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t pmes:5;
- uint32_t d2s:1;
- uint32_t d1s:1;
- uint32_t auxc:3;
- uint32_t dsi:1;
- uint32_t reserved_20_20:1;
- uint32_t pme_clock:1;
- uint32_t pmsv:3;
- uint32_t ncp:8;
- uint32_t pmcid:8;
-#else
- uint32_t pmcid:8;
- uint32_t ncp:8;
- uint32_t pmsv:3;
- uint32_t pme_clock:1;
- uint32_t reserved_20_20:1;
- uint32_t dsi:1;
- uint32_t auxc:3;
- uint32_t d1s:1;
- uint32_t d2s:1;
- uint32_t pmes:5;
-#endif
- } s;
- struct cvmx_pciercx_cfg016_s cn52xx;
- struct cvmx_pciercx_cfg016_s cn52xxp1;
- struct cvmx_pciercx_cfg016_s cn56xx;
- struct cvmx_pciercx_cfg016_s cn56xxp1;
- struct cvmx_pciercx_cfg016_s cn61xx;
- struct cvmx_pciercx_cfg016_s cn63xx;
- struct cvmx_pciercx_cfg016_s cn63xxp1;
- struct cvmx_pciercx_cfg016_s cn66xx;
- struct cvmx_pciercx_cfg016_s cn68xx;
- struct cvmx_pciercx_cfg016_s cn68xxp1;
- struct cvmx_pciercx_cfg016_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg017 {
- uint32_t u32;
- struct cvmx_pciercx_cfg017_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t pmdia:8;
- uint32_t bpccee:1;
- uint32_t bd3h:1;
- uint32_t reserved_16_21:6;
- uint32_t pmess:1;
- uint32_t pmedsia:2;
- uint32_t pmds:4;
- uint32_t pmeens:1;
- uint32_t reserved_4_7:4;
- uint32_t nsr:1;
- uint32_t reserved_2_2:1;
- uint32_t ps:2;
-#else
- uint32_t ps:2;
- uint32_t reserved_2_2:1;
- uint32_t nsr:1;
- uint32_t reserved_4_7:4;
- uint32_t pmeens:1;
- uint32_t pmds:4;
- uint32_t pmedsia:2;
- uint32_t pmess:1;
- uint32_t reserved_16_21:6;
- uint32_t bd3h:1;
- uint32_t bpccee:1;
- uint32_t pmdia:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg017_s cn52xx;
- struct cvmx_pciercx_cfg017_s cn52xxp1;
- struct cvmx_pciercx_cfg017_s cn56xx;
- struct cvmx_pciercx_cfg017_s cn56xxp1;
- struct cvmx_pciercx_cfg017_s cn61xx;
- struct cvmx_pciercx_cfg017_s cn63xx;
- struct cvmx_pciercx_cfg017_s cn63xxp1;
- struct cvmx_pciercx_cfg017_s cn66xx;
- struct cvmx_pciercx_cfg017_s cn68xx;
- struct cvmx_pciercx_cfg017_s cn68xxp1;
- struct cvmx_pciercx_cfg017_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg020 {
- uint32_t u32;
- struct cvmx_pciercx_cfg020_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t pvm:1;
- uint32_t m64:1;
- uint32_t mme:3;
- uint32_t mmc:3;
- uint32_t msien:1;
- uint32_t ncp:8;
- uint32_t msicid:8;
-#else
- uint32_t msicid:8;
- uint32_t ncp:8;
- uint32_t msien:1;
- uint32_t mmc:3;
- uint32_t mme:3;
- uint32_t m64:1;
- uint32_t pvm:1;
- uint32_t reserved_25_31:7;
-#endif
- } s;
- struct cvmx_pciercx_cfg020_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_24_31:8;
- uint32_t m64:1;
- uint32_t mme:3;
- uint32_t mmc:3;
- uint32_t msien:1;
- uint32_t ncp:8;
- uint32_t msicid:8;
-#else
- uint32_t msicid:8;
- uint32_t ncp:8;
- uint32_t msien:1;
- uint32_t mmc:3;
- uint32_t mme:3;
- uint32_t m64:1;
- uint32_t reserved_24_31:8;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg020_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg020_cn52xx cn56xx;
- struct cvmx_pciercx_cfg020_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg020_s cn61xx;
- struct cvmx_pciercx_cfg020_cn52xx cn63xx;
- struct cvmx_pciercx_cfg020_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg020_cn52xx cn66xx;
- struct cvmx_pciercx_cfg020_cn52xx cn68xx;
- struct cvmx_pciercx_cfg020_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg020_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg021 {
- uint32_t u32;
- struct cvmx_pciercx_cfg021_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t lmsi:30;
- uint32_t reserved_0_1:2;
-#else
- uint32_t reserved_0_1:2;
- uint32_t lmsi:30;
-#endif
- } s;
- struct cvmx_pciercx_cfg021_s cn52xx;
- struct cvmx_pciercx_cfg021_s cn52xxp1;
- struct cvmx_pciercx_cfg021_s cn56xx;
- struct cvmx_pciercx_cfg021_s cn56xxp1;
- struct cvmx_pciercx_cfg021_s cn61xx;
- struct cvmx_pciercx_cfg021_s cn63xx;
- struct cvmx_pciercx_cfg021_s cn63xxp1;
- struct cvmx_pciercx_cfg021_s cn66xx;
- struct cvmx_pciercx_cfg021_s cn68xx;
- struct cvmx_pciercx_cfg021_s cn68xxp1;
- struct cvmx_pciercx_cfg021_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg022 {
- uint32_t u32;
- struct cvmx_pciercx_cfg022_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t umsi:32;
-#else
- uint32_t umsi:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg022_s cn52xx;
- struct cvmx_pciercx_cfg022_s cn52xxp1;
- struct cvmx_pciercx_cfg022_s cn56xx;
- struct cvmx_pciercx_cfg022_s cn56xxp1;
- struct cvmx_pciercx_cfg022_s cn61xx;
- struct cvmx_pciercx_cfg022_s cn63xx;
- struct cvmx_pciercx_cfg022_s cn63xxp1;
- struct cvmx_pciercx_cfg022_s cn66xx;
- struct cvmx_pciercx_cfg022_s cn68xx;
- struct cvmx_pciercx_cfg022_s cn68xxp1;
- struct cvmx_pciercx_cfg022_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg023 {
- uint32_t u32;
- struct cvmx_pciercx_cfg023_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_16_31:16;
- uint32_t msimd:16;
-#else
- uint32_t msimd:16;
- uint32_t reserved_16_31:16;
-#endif
- } s;
- struct cvmx_pciercx_cfg023_s cn52xx;
- struct cvmx_pciercx_cfg023_s cn52xxp1;
- struct cvmx_pciercx_cfg023_s cn56xx;
- struct cvmx_pciercx_cfg023_s cn56xxp1;
- struct cvmx_pciercx_cfg023_s cn61xx;
- struct cvmx_pciercx_cfg023_s cn63xx;
- struct cvmx_pciercx_cfg023_s cn63xxp1;
- struct cvmx_pciercx_cfg023_s cn66xx;
- struct cvmx_pciercx_cfg023_s cn68xx;
- struct cvmx_pciercx_cfg023_s cn68xxp1;
- struct cvmx_pciercx_cfg023_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg028 {
- uint32_t u32;
- struct cvmx_pciercx_cfg028_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_30_31:2;
- uint32_t imn:5;
- uint32_t si:1;
- uint32_t dpt:4;
- uint32_t pciecv:4;
- uint32_t ncp:8;
- uint32_t pcieid:8;
-#else
- uint32_t pcieid:8;
- uint32_t ncp:8;
- uint32_t pciecv:4;
- uint32_t dpt:4;
- uint32_t si:1;
- uint32_t imn:5;
- uint32_t reserved_30_31:2;
-#endif
- } s;
- struct cvmx_pciercx_cfg028_s cn52xx;
- struct cvmx_pciercx_cfg028_s cn52xxp1;
- struct cvmx_pciercx_cfg028_s cn56xx;
- struct cvmx_pciercx_cfg028_s cn56xxp1;
- struct cvmx_pciercx_cfg028_s cn61xx;
- struct cvmx_pciercx_cfg028_s cn63xx;
- struct cvmx_pciercx_cfg028_s cn63xxp1;
- struct cvmx_pciercx_cfg028_s cn66xx;
- struct cvmx_pciercx_cfg028_s cn68xx;
- struct cvmx_pciercx_cfg028_s cn68xxp1;
- struct cvmx_pciercx_cfg028_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg029 {
- uint32_t u32;
- struct cvmx_pciercx_cfg029_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_28_31:4;
- uint32_t cspls:2;
- uint32_t csplv:8;
- uint32_t reserved_16_17:2;
- uint32_t rber:1;
- uint32_t reserved_12_14:3;
- uint32_t el1al:3;
- uint32_t el0al:3;
- uint32_t etfs:1;
- uint32_t pfs:2;
- uint32_t mpss:3;
-#else
- uint32_t mpss:3;
- uint32_t pfs:2;
- uint32_t etfs:1;
- uint32_t el0al:3;
- uint32_t el1al:3;
- uint32_t reserved_12_14:3;
- uint32_t rber:1;
- uint32_t reserved_16_17:2;
- uint32_t csplv:8;
- uint32_t cspls:2;
- uint32_t reserved_28_31:4;
-#endif
- } s;
- struct cvmx_pciercx_cfg029_s cn52xx;
- struct cvmx_pciercx_cfg029_s cn52xxp1;
- struct cvmx_pciercx_cfg029_s cn56xx;
- struct cvmx_pciercx_cfg029_s cn56xxp1;
- struct cvmx_pciercx_cfg029_s cn61xx;
- struct cvmx_pciercx_cfg029_s cn63xx;
- struct cvmx_pciercx_cfg029_s cn63xxp1;
- struct cvmx_pciercx_cfg029_s cn66xx;
- struct cvmx_pciercx_cfg029_s cn68xx;
- struct cvmx_pciercx_cfg029_s cn68xxp1;
- struct cvmx_pciercx_cfg029_s cnf71xx;
};
union cvmx_pciercx_cfg030 {
uint32_t u32;
struct cvmx_pciercx_cfg030_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_22_31:10;
- uint32_t tp:1;
- uint32_t ap_d:1;
- uint32_t ur_d:1;
- uint32_t fe_d:1;
- uint32_t nfe_d:1;
- uint32_t ce_d:1;
- uint32_t reserved_15_15:1;
- uint32_t mrrs:3;
- uint32_t ns_en:1;
- uint32_t ap_en:1;
- uint32_t pf_en:1;
- uint32_t etf_en:1;
- uint32_t mps:3;
- uint32_t ro_en:1;
- uint32_t ur_en:1;
- uint32_t fe_en:1;
- uint32_t nfe_en:1;
- uint32_t ce_en:1;
-#else
- uint32_t ce_en:1;
- uint32_t nfe_en:1;
- uint32_t fe_en:1;
- uint32_t ur_en:1;
- uint32_t ro_en:1;
- uint32_t mps:3;
- uint32_t etf_en:1;
- uint32_t pf_en:1;
- uint32_t ap_en:1;
- uint32_t ns_en:1;
- uint32_t mrrs:3;
- uint32_t reserved_15_15:1;
- uint32_t ce_d:1;
- uint32_t nfe_d:1;
- uint32_t fe_d:1;
- uint32_t ur_d:1;
- uint32_t ap_d:1;
- uint32_t tp:1;
- uint32_t reserved_22_31:10;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_22_31:10,
+ __BITFIELD_FIELD(uint32_t tp:1,
+ __BITFIELD_FIELD(uint32_t ap_d:1,
+ __BITFIELD_FIELD(uint32_t ur_d:1,
+ __BITFIELD_FIELD(uint32_t fe_d:1,
+ __BITFIELD_FIELD(uint32_t nfe_d:1,
+ __BITFIELD_FIELD(uint32_t ce_d:1,
+ __BITFIELD_FIELD(uint32_t reserved_15_15:1,
+ __BITFIELD_FIELD(uint32_t mrrs:3,
+ __BITFIELD_FIELD(uint32_t ns_en:1,
+ __BITFIELD_FIELD(uint32_t ap_en:1,
+ __BITFIELD_FIELD(uint32_t pf_en:1,
+ __BITFIELD_FIELD(uint32_t etf_en:1,
+ __BITFIELD_FIELD(uint32_t mps:3,
+ __BITFIELD_FIELD(uint32_t ro_en:1,
+ __BITFIELD_FIELD(uint32_t ur_en:1,
+ __BITFIELD_FIELD(uint32_t fe_en:1,
+ __BITFIELD_FIELD(uint32_t nfe_en:1,
+ __BITFIELD_FIELD(uint32_t ce_en:1,
+ ;)))))))))))))))))))
} s;
- struct cvmx_pciercx_cfg030_s cn52xx;
- struct cvmx_pciercx_cfg030_s cn52xxp1;
- struct cvmx_pciercx_cfg030_s cn56xx;
- struct cvmx_pciercx_cfg030_s cn56xxp1;
- struct cvmx_pciercx_cfg030_s cn61xx;
- struct cvmx_pciercx_cfg030_s cn63xx;
- struct cvmx_pciercx_cfg030_s cn63xxp1;
- struct cvmx_pciercx_cfg030_s cn66xx;
- struct cvmx_pciercx_cfg030_s cn68xx;
- struct cvmx_pciercx_cfg030_s cn68xxp1;
- struct cvmx_pciercx_cfg030_s cnf71xx;
};
union cvmx_pciercx_cfg031 {
uint32_t u32;
struct cvmx_pciercx_cfg031_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t pnum:8;
- uint32_t reserved_23_23:1;
- uint32_t aspm:1;
- uint32_t lbnc:1;
- uint32_t dllarc:1;
- uint32_t sderc:1;
- uint32_t cpm:1;
- uint32_t l1el:3;
- uint32_t l0el:3;
- uint32_t aslpms:2;
- uint32_t mlw:6;
- uint32_t mls:4;
-#else
- uint32_t mls:4;
- uint32_t mlw:6;
- uint32_t aslpms:2;
- uint32_t l0el:3;
- uint32_t l1el:3;
- uint32_t cpm:1;
- uint32_t sderc:1;
- uint32_t dllarc:1;
- uint32_t lbnc:1;
- uint32_t aspm:1;
- uint32_t reserved_23_23:1;
- uint32_t pnum:8;
-#endif
+ __BITFIELD_FIELD(uint32_t pnum:8,
+ __BITFIELD_FIELD(uint32_t reserved_23_23:1,
+ __BITFIELD_FIELD(uint32_t aspm:1,
+ __BITFIELD_FIELD(uint32_t lbnc:1,
+ __BITFIELD_FIELD(uint32_t dllarc:1,
+ __BITFIELD_FIELD(uint32_t sderc:1,
+ __BITFIELD_FIELD(uint32_t cpm:1,
+ __BITFIELD_FIELD(uint32_t l1el:3,
+ __BITFIELD_FIELD(uint32_t l0el:3,
+ __BITFIELD_FIELD(uint32_t aslpms:2,
+ __BITFIELD_FIELD(uint32_t mlw:6,
+ __BITFIELD_FIELD(uint32_t mls:4,
+ ;))))))))))))
} s;
- struct cvmx_pciercx_cfg031_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t pnum:8;
- uint32_t reserved_22_23:2;
- uint32_t lbnc:1;
- uint32_t dllarc:1;
- uint32_t sderc:1;
- uint32_t cpm:1;
- uint32_t l1el:3;
- uint32_t l0el:3;
- uint32_t aslpms:2;
- uint32_t mlw:6;
- uint32_t mls:4;
-#else
- uint32_t mls:4;
- uint32_t mlw:6;
- uint32_t aslpms:2;
- uint32_t l0el:3;
- uint32_t l1el:3;
- uint32_t cpm:1;
- uint32_t sderc:1;
- uint32_t dllarc:1;
- uint32_t lbnc:1;
- uint32_t reserved_22_23:2;
- uint32_t pnum:8;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg031_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg031_cn52xx cn56xx;
- struct cvmx_pciercx_cfg031_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg031_s cn61xx;
- struct cvmx_pciercx_cfg031_cn52xx cn63xx;
- struct cvmx_pciercx_cfg031_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg031_s cn66xx;
- struct cvmx_pciercx_cfg031_s cn68xx;
- struct cvmx_pciercx_cfg031_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg031_s cnf71xx;
};
union cvmx_pciercx_cfg032 {
uint32_t u32;
struct cvmx_pciercx_cfg032_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t lab:1;
- uint32_t lbm:1;
- uint32_t dlla:1;
- uint32_t scc:1;
- uint32_t lt:1;
- uint32_t reserved_26_26:1;
- uint32_t nlw:6;
- uint32_t ls:4;
- uint32_t reserved_12_15:4;
- uint32_t lab_int_enb:1;
- uint32_t lbm_int_enb:1;
- uint32_t hawd:1;
- uint32_t ecpm:1;
- uint32_t es:1;
- uint32_t ccc:1;
- uint32_t rl:1;
- uint32_t ld:1;
- uint32_t rcb:1;
- uint32_t reserved_2_2:1;
- uint32_t aslpc:2;
-#else
- uint32_t aslpc:2;
- uint32_t reserved_2_2:1;
- uint32_t rcb:1;
- uint32_t ld:1;
- uint32_t rl:1;
- uint32_t ccc:1;
- uint32_t es:1;
- uint32_t ecpm:1;
- uint32_t hawd:1;
- uint32_t lbm_int_enb:1;
- uint32_t lab_int_enb:1;
- uint32_t reserved_12_15:4;
- uint32_t ls:4;
- uint32_t nlw:6;
- uint32_t reserved_26_26:1;
- uint32_t lt:1;
- uint32_t scc:1;
- uint32_t dlla:1;
- uint32_t lbm:1;
- uint32_t lab:1;
-#endif
- } s;
- struct cvmx_pciercx_cfg032_s cn52xx;
- struct cvmx_pciercx_cfg032_s cn52xxp1;
- struct cvmx_pciercx_cfg032_s cn56xx;
- struct cvmx_pciercx_cfg032_s cn56xxp1;
- struct cvmx_pciercx_cfg032_s cn61xx;
- struct cvmx_pciercx_cfg032_s cn63xx;
- struct cvmx_pciercx_cfg032_s cn63xxp1;
- struct cvmx_pciercx_cfg032_s cn66xx;
- struct cvmx_pciercx_cfg032_s cn68xx;
- struct cvmx_pciercx_cfg032_s cn68xxp1;
- struct cvmx_pciercx_cfg032_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg033 {
- uint32_t u32;
- struct cvmx_pciercx_cfg033_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t ps_num:13;
- uint32_t nccs:1;
- uint32_t emip:1;
- uint32_t sp_ls:2;
- uint32_t sp_lv:8;
- uint32_t hp_c:1;
- uint32_t hp_s:1;
- uint32_t pip:1;
- uint32_t aip:1;
- uint32_t mrlsp:1;
- uint32_t pcp:1;
- uint32_t abp:1;
-#else
- uint32_t abp:1;
- uint32_t pcp:1;
- uint32_t mrlsp:1;
- uint32_t aip:1;
- uint32_t pip:1;
- uint32_t hp_s:1;
- uint32_t hp_c:1;
- uint32_t sp_lv:8;
- uint32_t sp_ls:2;
- uint32_t emip:1;
- uint32_t nccs:1;
- uint32_t ps_num:13;
-#endif
+ __BITFIELD_FIELD(uint32_t lab:1,
+ __BITFIELD_FIELD(uint32_t lbm:1,
+ __BITFIELD_FIELD(uint32_t dlla:1,
+ __BITFIELD_FIELD(uint32_t scc:1,
+ __BITFIELD_FIELD(uint32_t lt:1,
+ __BITFIELD_FIELD(uint32_t reserved_26_26:1,
+ __BITFIELD_FIELD(uint32_t nlw:6,
+ __BITFIELD_FIELD(uint32_t ls:4,
+ __BITFIELD_FIELD(uint32_t reserved_12_15:4,
+ __BITFIELD_FIELD(uint32_t lab_int_enb:1,
+ __BITFIELD_FIELD(uint32_t lbm_int_enb:1,
+ __BITFIELD_FIELD(uint32_t hawd:1,
+ __BITFIELD_FIELD(uint32_t ecpm:1,
+ __BITFIELD_FIELD(uint32_t es:1,
+ __BITFIELD_FIELD(uint32_t ccc:1,
+ __BITFIELD_FIELD(uint32_t rl:1,
+ __BITFIELD_FIELD(uint32_t ld:1,
+ __BITFIELD_FIELD(uint32_t rcb:1,
+ __BITFIELD_FIELD(uint32_t reserved_2_2:1,
+ __BITFIELD_FIELD(uint32_t aslpc:2,
+ ;))))))))))))))))))))
} s;
- struct cvmx_pciercx_cfg033_s cn52xx;
- struct cvmx_pciercx_cfg033_s cn52xxp1;
- struct cvmx_pciercx_cfg033_s cn56xx;
- struct cvmx_pciercx_cfg033_s cn56xxp1;
- struct cvmx_pciercx_cfg033_s cn61xx;
- struct cvmx_pciercx_cfg033_s cn63xx;
- struct cvmx_pciercx_cfg033_s cn63xxp1;
- struct cvmx_pciercx_cfg033_s cn66xx;
- struct cvmx_pciercx_cfg033_s cn68xx;
- struct cvmx_pciercx_cfg033_s cn68xxp1;
- struct cvmx_pciercx_cfg033_s cnf71xx;
};
union cvmx_pciercx_cfg034 {
uint32_t u32;
struct cvmx_pciercx_cfg034_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t dlls_c:1;
- uint32_t emis:1;
- uint32_t pds:1;
- uint32_t mrlss:1;
- uint32_t ccint_d:1;
- uint32_t pd_c:1;
- uint32_t mrls_c:1;
- uint32_t pf_d:1;
- uint32_t abp_d:1;
- uint32_t reserved_13_15:3;
- uint32_t dlls_en:1;
- uint32_t emic:1;
- uint32_t pcc:1;
- uint32_t pic:2;
- uint32_t aic:2;
- uint32_t hpint_en:1;
- uint32_t ccint_en:1;
- uint32_t pd_en:1;
- uint32_t mrls_en:1;
- uint32_t pf_en:1;
- uint32_t abp_en:1;
-#else
- uint32_t abp_en:1;
- uint32_t pf_en:1;
- uint32_t mrls_en:1;
- uint32_t pd_en:1;
- uint32_t ccint_en:1;
- uint32_t hpint_en:1;
- uint32_t aic:2;
- uint32_t pic:2;
- uint32_t pcc:1;
- uint32_t emic:1;
- uint32_t dlls_en:1;
- uint32_t reserved_13_15:3;
- uint32_t abp_d:1;
- uint32_t pf_d:1;
- uint32_t mrls_c:1;
- uint32_t pd_c:1;
- uint32_t ccint_d:1;
- uint32_t mrlss:1;
- uint32_t pds:1;
- uint32_t emis:1;
- uint32_t dlls_c:1;
- uint32_t reserved_25_31:7;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_25_31:7,
+ __BITFIELD_FIELD(uint32_t dlls_c:1,
+ __BITFIELD_FIELD(uint32_t emis:1,
+ __BITFIELD_FIELD(uint32_t pds:1,
+ __BITFIELD_FIELD(uint32_t mrlss:1,
+ __BITFIELD_FIELD(uint32_t ccint_d:1,
+ __BITFIELD_FIELD(uint32_t pd_c:1,
+ __BITFIELD_FIELD(uint32_t mrls_c:1,
+ __BITFIELD_FIELD(uint32_t pf_d:1,
+ __BITFIELD_FIELD(uint32_t abp_d:1,
+ __BITFIELD_FIELD(uint32_t reserved_13_15:3,
+ __BITFIELD_FIELD(uint32_t dlls_en:1,
+ __BITFIELD_FIELD(uint32_t emic:1,
+ __BITFIELD_FIELD(uint32_t pcc:1,
+ __BITFIELD_FIELD(uint32_t pic:1,
+ __BITFIELD_FIELD(uint32_t aic:1,
+ __BITFIELD_FIELD(uint32_t hpint_en:1,
+ __BITFIELD_FIELD(uint32_t ccint_en:1,
+ __BITFIELD_FIELD(uint32_t pd_en:1,
+ __BITFIELD_FIELD(uint32_t mrls_en:1,
+ __BITFIELD_FIELD(uint32_t pf_en:1,
+ __BITFIELD_FIELD(uint32_t abp_en:1,
+ ;))))))))))))))))))))))
} s;
- struct cvmx_pciercx_cfg034_s cn52xx;
- struct cvmx_pciercx_cfg034_s cn52xxp1;
- struct cvmx_pciercx_cfg034_s cn56xx;
- struct cvmx_pciercx_cfg034_s cn56xxp1;
- struct cvmx_pciercx_cfg034_s cn61xx;
- struct cvmx_pciercx_cfg034_s cn63xx;
- struct cvmx_pciercx_cfg034_s cn63xxp1;
- struct cvmx_pciercx_cfg034_s cn66xx;
- struct cvmx_pciercx_cfg034_s cn68xx;
- struct cvmx_pciercx_cfg034_s cn68xxp1;
- struct cvmx_pciercx_cfg034_s cnf71xx;
};
union cvmx_pciercx_cfg035 {
uint32_t u32;
struct cvmx_pciercx_cfg035_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_17_31:15;
- uint32_t crssv:1;
- uint32_t reserved_5_15:11;
- uint32_t crssve:1;
- uint32_t pmeie:1;
- uint32_t sefee:1;
- uint32_t senfee:1;
- uint32_t secee:1;
-#else
- uint32_t secee:1;
- uint32_t senfee:1;
- uint32_t sefee:1;
- uint32_t pmeie:1;
- uint32_t crssve:1;
- uint32_t reserved_5_15:11;
- uint32_t crssv:1;
- uint32_t reserved_17_31:15;
-#endif
- } s;
- struct cvmx_pciercx_cfg035_s cn52xx;
- struct cvmx_pciercx_cfg035_s cn52xxp1;
- struct cvmx_pciercx_cfg035_s cn56xx;
- struct cvmx_pciercx_cfg035_s cn56xxp1;
- struct cvmx_pciercx_cfg035_s cn61xx;
- struct cvmx_pciercx_cfg035_s cn63xx;
- struct cvmx_pciercx_cfg035_s cn63xxp1;
- struct cvmx_pciercx_cfg035_s cn66xx;
- struct cvmx_pciercx_cfg035_s cn68xx;
- struct cvmx_pciercx_cfg035_s cn68xxp1;
- struct cvmx_pciercx_cfg035_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg036 {
- uint32_t u32;
- struct cvmx_pciercx_cfg036_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_18_31:14;
- uint32_t pme_pend:1;
- uint32_t pme_stat:1;
- uint32_t pme_rid:16;
-#else
- uint32_t pme_rid:16;
- uint32_t pme_stat:1;
- uint32_t pme_pend:1;
- uint32_t reserved_18_31:14;
-#endif
- } s;
- struct cvmx_pciercx_cfg036_s cn52xx;
- struct cvmx_pciercx_cfg036_s cn52xxp1;
- struct cvmx_pciercx_cfg036_s cn56xx;
- struct cvmx_pciercx_cfg036_s cn56xxp1;
- struct cvmx_pciercx_cfg036_s cn61xx;
- struct cvmx_pciercx_cfg036_s cn63xx;
- struct cvmx_pciercx_cfg036_s cn63xxp1;
- struct cvmx_pciercx_cfg036_s cn66xx;
- struct cvmx_pciercx_cfg036_s cn68xx;
- struct cvmx_pciercx_cfg036_s cn68xxp1;
- struct cvmx_pciercx_cfg036_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg037 {
- uint32_t u32;
- struct cvmx_pciercx_cfg037_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_20_31:12;
- uint32_t obffs:2;
- uint32_t reserved_12_17:6;
- uint32_t ltrs:1;
- uint32_t noroprpr:1;
- uint32_t atom128s:1;
- uint32_t atom64s:1;
- uint32_t atom32s:1;
- uint32_t atom_ops:1;
- uint32_t reserved_5_5:1;
- uint32_t ctds:1;
- uint32_t ctrs:4;
-#else
- uint32_t ctrs:4;
- uint32_t ctds:1;
- uint32_t reserved_5_5:1;
- uint32_t atom_ops:1;
- uint32_t atom32s:1;
- uint32_t atom64s:1;
- uint32_t atom128s:1;
- uint32_t noroprpr:1;
- uint32_t ltrs:1;
- uint32_t reserved_12_17:6;
- uint32_t obffs:2;
- uint32_t reserved_20_31:12;
-#endif
- } s;
- struct cvmx_pciercx_cfg037_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_5_31:27;
- uint32_t ctds:1;
- uint32_t ctrs:4;
-#else
- uint32_t ctrs:4;
- uint32_t ctds:1;
- uint32_t reserved_5_31:27;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg037_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg037_cn52xx cn56xx;
- struct cvmx_pciercx_cfg037_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg037_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_14_31:18;
- uint32_t tph:2;
- uint32_t reserved_11_11:1;
- uint32_t noroprpr:1;
- uint32_t atom128s:1;
- uint32_t atom64s:1;
- uint32_t atom32s:1;
- uint32_t atom_ops:1;
- uint32_t ari_fw:1;
- uint32_t ctds:1;
- uint32_t ctrs:4;
-#else
- uint32_t ctrs:4;
- uint32_t ctds:1;
- uint32_t ari_fw:1;
- uint32_t atom_ops:1;
- uint32_t atom32s:1;
- uint32_t atom64s:1;
- uint32_t atom128s:1;
- uint32_t noroprpr:1;
- uint32_t reserved_11_11:1;
- uint32_t tph:2;
- uint32_t reserved_14_31:18;
-#endif
- } cn61xx;
- struct cvmx_pciercx_cfg037_cn52xx cn63xx;
- struct cvmx_pciercx_cfg037_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg037_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_14_31:18;
- uint32_t tph:2;
- uint32_t reserved_11_11:1;
- uint32_t noroprpr:1;
- uint32_t atom128s:1;
- uint32_t atom64s:1;
- uint32_t atom32s:1;
- uint32_t atom_ops:1;
- uint32_t ari:1;
- uint32_t ctds:1;
- uint32_t ctrs:4;
-#else
- uint32_t ctrs:4;
- uint32_t ctds:1;
- uint32_t ari:1;
- uint32_t atom_ops:1;
- uint32_t atom32s:1;
- uint32_t atom64s:1;
- uint32_t atom128s:1;
- uint32_t noroprpr:1;
- uint32_t reserved_11_11:1;
- uint32_t tph:2;
- uint32_t reserved_14_31:18;
-#endif
- } cn66xx;
- struct cvmx_pciercx_cfg037_cn66xx cn68xx;
- struct cvmx_pciercx_cfg037_cn66xx cn68xxp1;
- struct cvmx_pciercx_cfg037_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_20_31:12;
- uint32_t obffs:2;
- uint32_t reserved_14_17:4;
- uint32_t tphs:2;
- uint32_t ltrs:1;
- uint32_t noroprpr:1;
- uint32_t atom128s:1;
- uint32_t atom64s:1;
- uint32_t atom32s:1;
- uint32_t atom_ops:1;
- uint32_t ari_fw:1;
- uint32_t ctds:1;
- uint32_t ctrs:4;
-#else
- uint32_t ctrs:4;
- uint32_t ctds:1;
- uint32_t ari_fw:1;
- uint32_t atom_ops:1;
- uint32_t atom32s:1;
- uint32_t atom64s:1;
- uint32_t atom128s:1;
- uint32_t noroprpr:1;
- uint32_t ltrs:1;
- uint32_t tphs:2;
- uint32_t reserved_14_17:4;
- uint32_t obffs:2;
- uint32_t reserved_20_31:12;
-#endif
- } cnf71xx;
-};
-
-union cvmx_pciercx_cfg038 {
- uint32_t u32;
- struct cvmx_pciercx_cfg038_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_15_31:17;
- uint32_t obffe:2;
- uint32_t reserved_11_12:2;
- uint32_t ltre:1;
- uint32_t id0_cp:1;
- uint32_t id0_rq:1;
- uint32_t atom_op_eb:1;
- uint32_t atom_op:1;
- uint32_t ari:1;
- uint32_t ctd:1;
- uint32_t ctv:4;
-#else
- uint32_t ctv:4;
- uint32_t ctd:1;
- uint32_t ari:1;
- uint32_t atom_op:1;
- uint32_t atom_op_eb:1;
- uint32_t id0_rq:1;
- uint32_t id0_cp:1;
- uint32_t ltre:1;
- uint32_t reserved_11_12:2;
- uint32_t obffe:2;
- uint32_t reserved_15_31:17;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_17_31:15,
+ __BITFIELD_FIELD(uint32_t crssv:1,
+ __BITFIELD_FIELD(uint32_t reserved_5_15:11,
+ __BITFIELD_FIELD(uint32_t crssve:1,
+ __BITFIELD_FIELD(uint32_t pmeie:1,
+ __BITFIELD_FIELD(uint32_t sefee:1,
+ __BITFIELD_FIELD(uint32_t senfee:1,
+ __BITFIELD_FIELD(uint32_t secee:1,
+ ;))))))))
} s;
- struct cvmx_pciercx_cfg038_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_5_31:27;
- uint32_t ctd:1;
- uint32_t ctv:4;
-#else
- uint32_t ctv:4;
- uint32_t ctd:1;
- uint32_t reserved_5_31:27;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg038_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg038_cn52xx cn56xx;
- struct cvmx_pciercx_cfg038_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg038_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_10_31:22;
- uint32_t id0_cp:1;
- uint32_t id0_rq:1;
- uint32_t atom_op_eb:1;
- uint32_t atom_op:1;
- uint32_t ari:1;
- uint32_t ctd:1;
- uint32_t ctv:4;
-#else
- uint32_t ctv:4;
- uint32_t ctd:1;
- uint32_t ari:1;
- uint32_t atom_op:1;
- uint32_t atom_op_eb:1;
- uint32_t id0_rq:1;
- uint32_t id0_cp:1;
- uint32_t reserved_10_31:22;
-#endif
- } cn61xx;
- struct cvmx_pciercx_cfg038_cn52xx cn63xx;
- struct cvmx_pciercx_cfg038_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg038_cn61xx cn66xx;
- struct cvmx_pciercx_cfg038_cn61xx cn68xx;
- struct cvmx_pciercx_cfg038_cn61xx cn68xxp1;
- struct cvmx_pciercx_cfg038_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg039 {
- uint32_t u32;
- struct cvmx_pciercx_cfg039_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_9_31:23;
- uint32_t cls:1;
- uint32_t slsv:7;
- uint32_t reserved_0_0:1;
-#else
- uint32_t reserved_0_0:1;
- uint32_t slsv:7;
- uint32_t cls:1;
- uint32_t reserved_9_31:23;
-#endif
- } s;
- struct cvmx_pciercx_cfg039_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_0_31:32;
-#else
- uint32_t reserved_0_31:32;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg039_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg039_cn52xx cn56xx;
- struct cvmx_pciercx_cfg039_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg039_s cn61xx;
- struct cvmx_pciercx_cfg039_s cn63xx;
- struct cvmx_pciercx_cfg039_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg039_s cn66xx;
- struct cvmx_pciercx_cfg039_s cn68xx;
- struct cvmx_pciercx_cfg039_s cn68xxp1;
- struct cvmx_pciercx_cfg039_s cnf71xx;
};
union cvmx_pciercx_cfg040 {
uint32_t u32;
struct cvmx_pciercx_cfg040_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_17_31:15;
- uint32_t cdl:1;
- uint32_t reserved_13_15:3;
- uint32_t cde:1;
- uint32_t csos:1;
- uint32_t emc:1;
- uint32_t tm:3;
- uint32_t sde:1;
- uint32_t hasd:1;
- uint32_t ec:1;
- uint32_t tls:4;
-#else
- uint32_t tls:4;
- uint32_t ec:1;
- uint32_t hasd:1;
- uint32_t sde:1;
- uint32_t tm:3;
- uint32_t emc:1;
- uint32_t csos:1;
- uint32_t cde:1;
- uint32_t reserved_13_15:3;
- uint32_t cdl:1;
- uint32_t reserved_17_31:15;
-#endif
- } s;
- struct cvmx_pciercx_cfg040_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_0_31:32;
-#else
- uint32_t reserved_0_31:32;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg040_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg040_cn52xx cn56xx;
- struct cvmx_pciercx_cfg040_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg040_s cn61xx;
- struct cvmx_pciercx_cfg040_s cn63xx;
- struct cvmx_pciercx_cfg040_s cn63xxp1;
- struct cvmx_pciercx_cfg040_s cn66xx;
- struct cvmx_pciercx_cfg040_s cn68xx;
- struct cvmx_pciercx_cfg040_s cn68xxp1;
- struct cvmx_pciercx_cfg040_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg041 {
- uint32_t u32;
- struct cvmx_pciercx_cfg041_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_0_31:32;
-#else
- uint32_t reserved_0_31:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg041_s cn52xx;
- struct cvmx_pciercx_cfg041_s cn52xxp1;
- struct cvmx_pciercx_cfg041_s cn56xx;
- struct cvmx_pciercx_cfg041_s cn56xxp1;
- struct cvmx_pciercx_cfg041_s cn61xx;
- struct cvmx_pciercx_cfg041_s cn63xx;
- struct cvmx_pciercx_cfg041_s cn63xxp1;
- struct cvmx_pciercx_cfg041_s cn66xx;
- struct cvmx_pciercx_cfg041_s cn68xx;
- struct cvmx_pciercx_cfg041_s cn68xxp1;
- struct cvmx_pciercx_cfg041_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg042 {
- uint32_t u32;
- struct cvmx_pciercx_cfg042_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_0_31:32;
-#else
- uint32_t reserved_0_31:32;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_22_31:10,
+ __BITFIELD_FIELD(uint32_t ler:1,
+ __BITFIELD_FIELD(uint32_t ep3s:1,
+ __BITFIELD_FIELD(uint32_t ep2s:1,
+ __BITFIELD_FIELD(uint32_t ep1s:1,
+ __BITFIELD_FIELD(uint32_t eqc:1,
+ __BITFIELD_FIELD(uint32_t cdl:1,
+ __BITFIELD_FIELD(uint32_t cde:4,
+ __BITFIELD_FIELD(uint32_t csos:1,
+ __BITFIELD_FIELD(uint32_t emc:1,
+ __BITFIELD_FIELD(uint32_t tm:3,
+ __BITFIELD_FIELD(uint32_t sde:1,
+ __BITFIELD_FIELD(uint32_t hasd:1,
+ __BITFIELD_FIELD(uint32_t ec:1,
+ __BITFIELD_FIELD(uint32_t tls:4,
+ ;)))))))))))))))
} s;
- struct cvmx_pciercx_cfg042_s cn52xx;
- struct cvmx_pciercx_cfg042_s cn52xxp1;
- struct cvmx_pciercx_cfg042_s cn56xx;
- struct cvmx_pciercx_cfg042_s cn56xxp1;
- struct cvmx_pciercx_cfg042_s cn61xx;
- struct cvmx_pciercx_cfg042_s cn63xx;
- struct cvmx_pciercx_cfg042_s cn63xxp1;
- struct cvmx_pciercx_cfg042_s cn66xx;
- struct cvmx_pciercx_cfg042_s cn68xx;
- struct cvmx_pciercx_cfg042_s cn68xxp1;
- struct cvmx_pciercx_cfg042_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg064 {
- uint32_t u32;
- struct cvmx_pciercx_cfg064_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t nco:12;
- uint32_t cv:4;
- uint32_t pcieec:16;
-#else
- uint32_t pcieec:16;
- uint32_t cv:4;
- uint32_t nco:12;
-#endif
- } s;
- struct cvmx_pciercx_cfg064_s cn52xx;
- struct cvmx_pciercx_cfg064_s cn52xxp1;
- struct cvmx_pciercx_cfg064_s cn56xx;
- struct cvmx_pciercx_cfg064_s cn56xxp1;
- struct cvmx_pciercx_cfg064_s cn61xx;
- struct cvmx_pciercx_cfg064_s cn63xx;
- struct cvmx_pciercx_cfg064_s cn63xxp1;
- struct cvmx_pciercx_cfg064_s cn66xx;
- struct cvmx_pciercx_cfg064_s cn68xx;
- struct cvmx_pciercx_cfg064_s cn68xxp1;
- struct cvmx_pciercx_cfg064_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg065 {
- uint32_t u32;
- struct cvmx_pciercx_cfg065_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t uatombs:1;
- uint32_t reserved_23_23:1;
- uint32_t ucies:1;
- uint32_t reserved_21_21:1;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpes:1;
- uint32_t sdes:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlps:1;
- uint32_t fcpes:1;
- uint32_t cts:1;
- uint32_t cas:1;
- uint32_t ucs:1;
- uint32_t ros:1;
- uint32_t mtlps:1;
- uint32_t ecrces:1;
- uint32_t ures:1;
- uint32_t reserved_21_21:1;
- uint32_t ucies:1;
- uint32_t reserved_23_23:1;
- uint32_t uatombs:1;
- uint32_t reserved_25_31:7;
-#endif
- } s;
- struct cvmx_pciercx_cfg065_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_21_31:11;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpes:1;
- uint32_t sdes:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlps:1;
- uint32_t fcpes:1;
- uint32_t cts:1;
- uint32_t cas:1;
- uint32_t ucs:1;
- uint32_t ros:1;
- uint32_t mtlps:1;
- uint32_t ecrces:1;
- uint32_t ures:1;
- uint32_t reserved_21_31:11;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg065_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg065_cn52xx cn56xx;
- struct cvmx_pciercx_cfg065_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg065_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t uatombs:1;
- uint32_t reserved_21_23:3;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpes:1;
- uint32_t sdes:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlps:1;
- uint32_t fcpes:1;
- uint32_t cts:1;
- uint32_t cas:1;
- uint32_t ucs:1;
- uint32_t ros:1;
- uint32_t mtlps:1;
- uint32_t ecrces:1;
- uint32_t ures:1;
- uint32_t reserved_21_23:3;
- uint32_t uatombs:1;
- uint32_t reserved_25_31:7;
-#endif
- } cn61xx;
- struct cvmx_pciercx_cfg065_cn52xx cn63xx;
- struct cvmx_pciercx_cfg065_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg065_cn61xx cn66xx;
- struct cvmx_pciercx_cfg065_cn61xx cn68xx;
- struct cvmx_pciercx_cfg065_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg065_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg066 {
- uint32_t u32;
- struct cvmx_pciercx_cfg066_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t uatombm:1;
- uint32_t reserved_23_23:1;
- uint32_t uciem:1;
- uint32_t reserved_21_21:1;
- uint32_t urem:1;
- uint32_t ecrcem:1;
- uint32_t mtlpm:1;
- uint32_t rom:1;
- uint32_t ucm:1;
- uint32_t cam:1;
- uint32_t ctm:1;
- uint32_t fcpem:1;
- uint32_t ptlpm:1;
- uint32_t reserved_6_11:6;
- uint32_t sdem:1;
- uint32_t dlpem:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpem:1;
- uint32_t sdem:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlpm:1;
- uint32_t fcpem:1;
- uint32_t ctm:1;
- uint32_t cam:1;
- uint32_t ucm:1;
- uint32_t rom:1;
- uint32_t mtlpm:1;
- uint32_t ecrcem:1;
- uint32_t urem:1;
- uint32_t reserved_21_21:1;
- uint32_t uciem:1;
- uint32_t reserved_23_23:1;
- uint32_t uatombm:1;
- uint32_t reserved_25_31:7;
-#endif
- } s;
- struct cvmx_pciercx_cfg066_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_21_31:11;
- uint32_t urem:1;
- uint32_t ecrcem:1;
- uint32_t mtlpm:1;
- uint32_t rom:1;
- uint32_t ucm:1;
- uint32_t cam:1;
- uint32_t ctm:1;
- uint32_t fcpem:1;
- uint32_t ptlpm:1;
- uint32_t reserved_6_11:6;
- uint32_t sdem:1;
- uint32_t dlpem:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpem:1;
- uint32_t sdem:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlpm:1;
- uint32_t fcpem:1;
- uint32_t ctm:1;
- uint32_t cam:1;
- uint32_t ucm:1;
- uint32_t rom:1;
- uint32_t mtlpm:1;
- uint32_t ecrcem:1;
- uint32_t urem:1;
- uint32_t reserved_21_31:11;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg066_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg066_cn52xx cn56xx;
- struct cvmx_pciercx_cfg066_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg066_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t uatombm:1;
- uint32_t reserved_21_23:3;
- uint32_t urem:1;
- uint32_t ecrcem:1;
- uint32_t mtlpm:1;
- uint32_t rom:1;
- uint32_t ucm:1;
- uint32_t cam:1;
- uint32_t ctm:1;
- uint32_t fcpem:1;
- uint32_t ptlpm:1;
- uint32_t reserved_6_11:6;
- uint32_t sdem:1;
- uint32_t dlpem:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpem:1;
- uint32_t sdem:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlpm:1;
- uint32_t fcpem:1;
- uint32_t ctm:1;
- uint32_t cam:1;
- uint32_t ucm:1;
- uint32_t rom:1;
- uint32_t mtlpm:1;
- uint32_t ecrcem:1;
- uint32_t urem:1;
- uint32_t reserved_21_23:3;
- uint32_t uatombm:1;
- uint32_t reserved_25_31:7;
-#endif
- } cn61xx;
- struct cvmx_pciercx_cfg066_cn52xx cn63xx;
- struct cvmx_pciercx_cfg066_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg066_cn61xx cn66xx;
- struct cvmx_pciercx_cfg066_cn61xx cn68xx;
- struct cvmx_pciercx_cfg066_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg066_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg067 {
- uint32_t u32;
- struct cvmx_pciercx_cfg067_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t uatombs:1;
- uint32_t reserved_23_23:1;
- uint32_t ucies:1;
- uint32_t reserved_21_21:1;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpes:1;
- uint32_t sdes:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlps:1;
- uint32_t fcpes:1;
- uint32_t cts:1;
- uint32_t cas:1;
- uint32_t ucs:1;
- uint32_t ros:1;
- uint32_t mtlps:1;
- uint32_t ecrces:1;
- uint32_t ures:1;
- uint32_t reserved_21_21:1;
- uint32_t ucies:1;
- uint32_t reserved_23_23:1;
- uint32_t uatombs:1;
- uint32_t reserved_25_31:7;
-#endif
- } s;
- struct cvmx_pciercx_cfg067_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_21_31:11;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpes:1;
- uint32_t sdes:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlps:1;
- uint32_t fcpes:1;
- uint32_t cts:1;
- uint32_t cas:1;
- uint32_t ucs:1;
- uint32_t ros:1;
- uint32_t mtlps:1;
- uint32_t ecrces:1;
- uint32_t ures:1;
- uint32_t reserved_21_31:11;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg067_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg067_cn52xx cn56xx;
- struct cvmx_pciercx_cfg067_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg067_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_25_31:7;
- uint32_t uatombs:1;
- uint32_t reserved_21_23:3;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
-#else
- uint32_t reserved_0_3:4;
- uint32_t dlpes:1;
- uint32_t sdes:1;
- uint32_t reserved_6_11:6;
- uint32_t ptlps:1;
- uint32_t fcpes:1;
- uint32_t cts:1;
- uint32_t cas:1;
- uint32_t ucs:1;
- uint32_t ros:1;
- uint32_t mtlps:1;
- uint32_t ecrces:1;
- uint32_t ures:1;
- uint32_t reserved_21_23:3;
- uint32_t uatombs:1;
- uint32_t reserved_25_31:7;
-#endif
- } cn61xx;
- struct cvmx_pciercx_cfg067_cn52xx cn63xx;
- struct cvmx_pciercx_cfg067_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg067_cn61xx cn66xx;
- struct cvmx_pciercx_cfg067_cn61xx cn68xx;
- struct cvmx_pciercx_cfg067_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg067_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg068 {
- uint32_t u32;
- struct cvmx_pciercx_cfg068_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_15_31:17;
- uint32_t cies:1;
- uint32_t anfes:1;
- uint32_t rtts:1;
- uint32_t reserved_9_11:3;
- uint32_t rnrs:1;
- uint32_t bdllps:1;
- uint32_t btlps:1;
- uint32_t reserved_1_5:5;
- uint32_t res:1;
-#else
- uint32_t res:1;
- uint32_t reserved_1_5:5;
- uint32_t btlps:1;
- uint32_t bdllps:1;
- uint32_t rnrs:1;
- uint32_t reserved_9_11:3;
- uint32_t rtts:1;
- uint32_t anfes:1;
- uint32_t cies:1;
- uint32_t reserved_15_31:17;
-#endif
- } s;
- struct cvmx_pciercx_cfg068_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_14_31:18;
- uint32_t anfes:1;
- uint32_t rtts:1;
- uint32_t reserved_9_11:3;
- uint32_t rnrs:1;
- uint32_t bdllps:1;
- uint32_t btlps:1;
- uint32_t reserved_1_5:5;
- uint32_t res:1;
-#else
- uint32_t res:1;
- uint32_t reserved_1_5:5;
- uint32_t btlps:1;
- uint32_t bdllps:1;
- uint32_t rnrs:1;
- uint32_t reserved_9_11:3;
- uint32_t rtts:1;
- uint32_t anfes:1;
- uint32_t reserved_14_31:18;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg068_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg068_cn52xx cn56xx;
- struct cvmx_pciercx_cfg068_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg068_cn52xx cn61xx;
- struct cvmx_pciercx_cfg068_cn52xx cn63xx;
- struct cvmx_pciercx_cfg068_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg068_cn52xx cn66xx;
- struct cvmx_pciercx_cfg068_cn52xx cn68xx;
- struct cvmx_pciercx_cfg068_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg068_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg069 {
- uint32_t u32;
- struct cvmx_pciercx_cfg069_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_15_31:17;
- uint32_t ciem:1;
- uint32_t anfem:1;
- uint32_t rttm:1;
- uint32_t reserved_9_11:3;
- uint32_t rnrm:1;
- uint32_t bdllpm:1;
- uint32_t btlpm:1;
- uint32_t reserved_1_5:5;
- uint32_t rem:1;
-#else
- uint32_t rem:1;
- uint32_t reserved_1_5:5;
- uint32_t btlpm:1;
- uint32_t bdllpm:1;
- uint32_t rnrm:1;
- uint32_t reserved_9_11:3;
- uint32_t rttm:1;
- uint32_t anfem:1;
- uint32_t ciem:1;
- uint32_t reserved_15_31:17;
-#endif
- } s;
- struct cvmx_pciercx_cfg069_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_14_31:18;
- uint32_t anfem:1;
- uint32_t rttm:1;
- uint32_t reserved_9_11:3;
- uint32_t rnrm:1;
- uint32_t bdllpm:1;
- uint32_t btlpm:1;
- uint32_t reserved_1_5:5;
- uint32_t rem:1;
-#else
- uint32_t rem:1;
- uint32_t reserved_1_5:5;
- uint32_t btlpm:1;
- uint32_t bdllpm:1;
- uint32_t rnrm:1;
- uint32_t reserved_9_11:3;
- uint32_t rttm:1;
- uint32_t anfem:1;
- uint32_t reserved_14_31:18;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg069_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg069_cn52xx cn56xx;
- struct cvmx_pciercx_cfg069_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg069_cn52xx cn61xx;
- struct cvmx_pciercx_cfg069_cn52xx cn63xx;
- struct cvmx_pciercx_cfg069_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg069_cn52xx cn66xx;
- struct cvmx_pciercx_cfg069_cn52xx cn68xx;
- struct cvmx_pciercx_cfg069_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg069_s cnf71xx;
};
union cvmx_pciercx_cfg070 {
uint32_t u32;
struct cvmx_pciercx_cfg070_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_9_31:23;
- uint32_t ce:1;
- uint32_t cc:1;
- uint32_t ge:1;
- uint32_t gc:1;
- uint32_t fep:5;
-#else
- uint32_t fep:5;
- uint32_t gc:1;
- uint32_t ge:1;
- uint32_t cc:1;
- uint32_t ce:1;
- uint32_t reserved_9_31:23;
-#endif
- } s;
- struct cvmx_pciercx_cfg070_s cn52xx;
- struct cvmx_pciercx_cfg070_s cn52xxp1;
- struct cvmx_pciercx_cfg070_s cn56xx;
- struct cvmx_pciercx_cfg070_s cn56xxp1;
- struct cvmx_pciercx_cfg070_s cn61xx;
- struct cvmx_pciercx_cfg070_s cn63xx;
- struct cvmx_pciercx_cfg070_s cn63xxp1;
- struct cvmx_pciercx_cfg070_s cn66xx;
- struct cvmx_pciercx_cfg070_s cn68xx;
- struct cvmx_pciercx_cfg070_s cn68xxp1;
- struct cvmx_pciercx_cfg070_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg071 {
- uint32_t u32;
- struct cvmx_pciercx_cfg071_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dword1:32;
-#else
- uint32_t dword1:32;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_12_31:20,
+ __BITFIELD_FIELD(uint32_t tplp:1,
+ __BITFIELD_FIELD(uint32_t reserved_9_10:2,
+ __BITFIELD_FIELD(uint32_t ce:1,
+ __BITFIELD_FIELD(uint32_t cc:1,
+ __BITFIELD_FIELD(uint32_t ge:1,
+ __BITFIELD_FIELD(uint32_t gc:1,
+ __BITFIELD_FIELD(uint32_t fep:5,
+ ;))))))))
} s;
- struct cvmx_pciercx_cfg071_s cn52xx;
- struct cvmx_pciercx_cfg071_s cn52xxp1;
- struct cvmx_pciercx_cfg071_s cn56xx;
- struct cvmx_pciercx_cfg071_s cn56xxp1;
- struct cvmx_pciercx_cfg071_s cn61xx;
- struct cvmx_pciercx_cfg071_s cn63xx;
- struct cvmx_pciercx_cfg071_s cn63xxp1;
- struct cvmx_pciercx_cfg071_s cn66xx;
- struct cvmx_pciercx_cfg071_s cn68xx;
- struct cvmx_pciercx_cfg071_s cn68xxp1;
- struct cvmx_pciercx_cfg071_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg072 {
- uint32_t u32;
- struct cvmx_pciercx_cfg072_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dword2:32;
-#else
- uint32_t dword2:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg072_s cn52xx;
- struct cvmx_pciercx_cfg072_s cn52xxp1;
- struct cvmx_pciercx_cfg072_s cn56xx;
- struct cvmx_pciercx_cfg072_s cn56xxp1;
- struct cvmx_pciercx_cfg072_s cn61xx;
- struct cvmx_pciercx_cfg072_s cn63xx;
- struct cvmx_pciercx_cfg072_s cn63xxp1;
- struct cvmx_pciercx_cfg072_s cn66xx;
- struct cvmx_pciercx_cfg072_s cn68xx;
- struct cvmx_pciercx_cfg072_s cn68xxp1;
- struct cvmx_pciercx_cfg072_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg073 {
- uint32_t u32;
- struct cvmx_pciercx_cfg073_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dword3:32;
-#else
- uint32_t dword3:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg073_s cn52xx;
- struct cvmx_pciercx_cfg073_s cn52xxp1;
- struct cvmx_pciercx_cfg073_s cn56xx;
- struct cvmx_pciercx_cfg073_s cn56xxp1;
- struct cvmx_pciercx_cfg073_s cn61xx;
- struct cvmx_pciercx_cfg073_s cn63xx;
- struct cvmx_pciercx_cfg073_s cn63xxp1;
- struct cvmx_pciercx_cfg073_s cn66xx;
- struct cvmx_pciercx_cfg073_s cn68xx;
- struct cvmx_pciercx_cfg073_s cn68xxp1;
- struct cvmx_pciercx_cfg073_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg074 {
- uint32_t u32;
- struct cvmx_pciercx_cfg074_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dword4:32;
-#else
- uint32_t dword4:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg074_s cn52xx;
- struct cvmx_pciercx_cfg074_s cn52xxp1;
- struct cvmx_pciercx_cfg074_s cn56xx;
- struct cvmx_pciercx_cfg074_s cn56xxp1;
- struct cvmx_pciercx_cfg074_s cn61xx;
- struct cvmx_pciercx_cfg074_s cn63xx;
- struct cvmx_pciercx_cfg074_s cn63xxp1;
- struct cvmx_pciercx_cfg074_s cn66xx;
- struct cvmx_pciercx_cfg074_s cn68xx;
- struct cvmx_pciercx_cfg074_s cn68xxp1;
- struct cvmx_pciercx_cfg074_s cnf71xx;
};
union cvmx_pciercx_cfg075 {
uint32_t u32;
struct cvmx_pciercx_cfg075_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_3_31:29;
- uint32_t fere:1;
- uint32_t nfere:1;
- uint32_t cere:1;
-#else
- uint32_t cere:1;
- uint32_t nfere:1;
- uint32_t fere:1;
- uint32_t reserved_3_31:29;
-#endif
- } s;
- struct cvmx_pciercx_cfg075_s cn52xx;
- struct cvmx_pciercx_cfg075_s cn52xxp1;
- struct cvmx_pciercx_cfg075_s cn56xx;
- struct cvmx_pciercx_cfg075_s cn56xxp1;
- struct cvmx_pciercx_cfg075_s cn61xx;
- struct cvmx_pciercx_cfg075_s cn63xx;
- struct cvmx_pciercx_cfg075_s cn63xxp1;
- struct cvmx_pciercx_cfg075_s cn66xx;
- struct cvmx_pciercx_cfg075_s cn68xx;
- struct cvmx_pciercx_cfg075_s cn68xxp1;
- struct cvmx_pciercx_cfg075_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg076 {
- uint32_t u32;
- struct cvmx_pciercx_cfg076_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t aeimn:5;
- uint32_t reserved_7_26:20;
- uint32_t femr:1;
- uint32_t nfemr:1;
- uint32_t fuf:1;
- uint32_t multi_efnfr:1;
- uint32_t efnfr:1;
- uint32_t multi_ecr:1;
- uint32_t ecr:1;
-#else
- uint32_t ecr:1;
- uint32_t multi_ecr:1;
- uint32_t efnfr:1;
- uint32_t multi_efnfr:1;
- uint32_t fuf:1;
- uint32_t nfemr:1;
- uint32_t femr:1;
- uint32_t reserved_7_26:20;
- uint32_t aeimn:5;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_3_31:29,
+ __BITFIELD_FIELD(uint32_t fere:1,
+ __BITFIELD_FIELD(uint32_t nfere:1,
+ __BITFIELD_FIELD(uint32_t cere:1,
+ ;))))
} s;
- struct cvmx_pciercx_cfg076_s cn52xx;
- struct cvmx_pciercx_cfg076_s cn52xxp1;
- struct cvmx_pciercx_cfg076_s cn56xx;
- struct cvmx_pciercx_cfg076_s cn56xxp1;
- struct cvmx_pciercx_cfg076_s cn61xx;
- struct cvmx_pciercx_cfg076_s cn63xx;
- struct cvmx_pciercx_cfg076_s cn63xxp1;
- struct cvmx_pciercx_cfg076_s cn66xx;
- struct cvmx_pciercx_cfg076_s cn68xx;
- struct cvmx_pciercx_cfg076_s cn68xxp1;
- struct cvmx_pciercx_cfg076_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg077 {
- uint32_t u32;
- struct cvmx_pciercx_cfg077_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t efnfsi:16;
- uint32_t ecsi:16;
-#else
- uint32_t ecsi:16;
- uint32_t efnfsi:16;
-#endif
- } s;
- struct cvmx_pciercx_cfg077_s cn52xx;
- struct cvmx_pciercx_cfg077_s cn52xxp1;
- struct cvmx_pciercx_cfg077_s cn56xx;
- struct cvmx_pciercx_cfg077_s cn56xxp1;
- struct cvmx_pciercx_cfg077_s cn61xx;
- struct cvmx_pciercx_cfg077_s cn63xx;
- struct cvmx_pciercx_cfg077_s cn63xxp1;
- struct cvmx_pciercx_cfg077_s cn66xx;
- struct cvmx_pciercx_cfg077_s cn68xx;
- struct cvmx_pciercx_cfg077_s cn68xxp1;
- struct cvmx_pciercx_cfg077_s cnf71xx;
};
union cvmx_pciercx_cfg448 {
uint32_t u32;
struct cvmx_pciercx_cfg448_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rtl:16;
- uint32_t rtltl:16;
-#else
- uint32_t rtltl:16;
- uint32_t rtl:16;
-#endif
- } s;
- struct cvmx_pciercx_cfg448_s cn52xx;
- struct cvmx_pciercx_cfg448_s cn52xxp1;
- struct cvmx_pciercx_cfg448_s cn56xx;
- struct cvmx_pciercx_cfg448_s cn56xxp1;
- struct cvmx_pciercx_cfg448_s cn61xx;
- struct cvmx_pciercx_cfg448_s cn63xx;
- struct cvmx_pciercx_cfg448_s cn63xxp1;
- struct cvmx_pciercx_cfg448_s cn66xx;
- struct cvmx_pciercx_cfg448_s cn68xx;
- struct cvmx_pciercx_cfg448_s cn68xxp1;
- struct cvmx_pciercx_cfg448_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg449 {
- uint32_t u32;
- struct cvmx_pciercx_cfg449_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t omr:32;
-#else
- uint32_t omr:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg449_s cn52xx;
- struct cvmx_pciercx_cfg449_s cn52xxp1;
- struct cvmx_pciercx_cfg449_s cn56xx;
- struct cvmx_pciercx_cfg449_s cn56xxp1;
- struct cvmx_pciercx_cfg449_s cn61xx;
- struct cvmx_pciercx_cfg449_s cn63xx;
- struct cvmx_pciercx_cfg449_s cn63xxp1;
- struct cvmx_pciercx_cfg449_s cn66xx;
- struct cvmx_pciercx_cfg449_s cn68xx;
- struct cvmx_pciercx_cfg449_s cn68xxp1;
- struct cvmx_pciercx_cfg449_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg450 {
- uint32_t u32;
- struct cvmx_pciercx_cfg450_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t lpec:8;
- uint32_t reserved_22_23:2;
- uint32_t link_state:6;
- uint32_t force_link:1;
- uint32_t reserved_8_14:7;
- uint32_t link_num:8;
-#else
- uint32_t link_num:8;
- uint32_t reserved_8_14:7;
- uint32_t force_link:1;
- uint32_t link_state:6;
- uint32_t reserved_22_23:2;
- uint32_t lpec:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg450_s cn52xx;
- struct cvmx_pciercx_cfg450_s cn52xxp1;
- struct cvmx_pciercx_cfg450_s cn56xx;
- struct cvmx_pciercx_cfg450_s cn56xxp1;
- struct cvmx_pciercx_cfg450_s cn61xx;
- struct cvmx_pciercx_cfg450_s cn63xx;
- struct cvmx_pciercx_cfg450_s cn63xxp1;
- struct cvmx_pciercx_cfg450_s cn66xx;
- struct cvmx_pciercx_cfg450_s cn68xx;
- struct cvmx_pciercx_cfg450_s cn68xxp1;
- struct cvmx_pciercx_cfg450_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg451 {
- uint32_t u32;
- struct cvmx_pciercx_cfg451_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_31_31:1;
- uint32_t easpml1:1;
- uint32_t l1el:3;
- uint32_t l0el:3;
- uint32_t n_fts_cc:8;
- uint32_t n_fts:8;
- uint32_t ack_freq:8;
-#else
- uint32_t ack_freq:8;
- uint32_t n_fts:8;
- uint32_t n_fts_cc:8;
- uint32_t l0el:3;
- uint32_t l1el:3;
- uint32_t easpml1:1;
- uint32_t reserved_31_31:1;
-#endif
+ __BITFIELD_FIELD(uint32_t rtl:16,
+ __BITFIELD_FIELD(uint32_t rtltl:16,
+ ;))
} s;
- struct cvmx_pciercx_cfg451_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_30_31:2;
- uint32_t l1el:3;
- uint32_t l0el:3;
- uint32_t n_fts_cc:8;
- uint32_t n_fts:8;
- uint32_t ack_freq:8;
-#else
- uint32_t ack_freq:8;
- uint32_t n_fts:8;
- uint32_t n_fts_cc:8;
- uint32_t l0el:3;
- uint32_t l1el:3;
- uint32_t reserved_30_31:2;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg451_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg451_cn52xx cn56xx;
- struct cvmx_pciercx_cfg451_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg451_s cn61xx;
- struct cvmx_pciercx_cfg451_cn52xx cn63xx;
- struct cvmx_pciercx_cfg451_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg451_s cn66xx;
- struct cvmx_pciercx_cfg451_s cn68xx;
- struct cvmx_pciercx_cfg451_s cn68xxp1;
- struct cvmx_pciercx_cfg451_s cnf71xx;
};
union cvmx_pciercx_cfg452 {
uint32_t u32;
struct cvmx_pciercx_cfg452_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_26_31:6;
- uint32_t eccrc:1;
- uint32_t reserved_22_24:3;
- uint32_t lme:6;
- uint32_t reserved_8_15:8;
- uint32_t flm:1;
- uint32_t reserved_6_6:1;
- uint32_t dllle:1;
- uint32_t reserved_4_4:1;
- uint32_t ra:1;
- uint32_t le:1;
- uint32_t sd:1;
- uint32_t omr:1;
-#else
- uint32_t omr:1;
- uint32_t sd:1;
- uint32_t le:1;
- uint32_t ra:1;
- uint32_t reserved_4_4:1;
- uint32_t dllle:1;
- uint32_t reserved_6_6:1;
- uint32_t flm:1;
- uint32_t reserved_8_15:8;
- uint32_t lme:6;
- uint32_t reserved_22_24:3;
- uint32_t eccrc:1;
- uint32_t reserved_26_31:6;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_26_31:6,
+ __BITFIELD_FIELD(uint32_t eccrc:1,
+ __BITFIELD_FIELD(uint32_t reserved_22_24:3,
+ __BITFIELD_FIELD(uint32_t lme:6,
+ __BITFIELD_FIELD(uint32_t reserved_12_15:4,
+ __BITFIELD_FIELD(uint32_t link_rate:4,
+ __BITFIELD_FIELD(uint32_t flm:1,
+ __BITFIELD_FIELD(uint32_t reserved_6_6:1,
+ __BITFIELD_FIELD(uint32_t dllle:1,
+ __BITFIELD_FIELD(uint32_t reserved_4_4:1,
+ __BITFIELD_FIELD(uint32_t ra:1,
+ __BITFIELD_FIELD(uint32_t le:1,
+ __BITFIELD_FIELD(uint32_t sd:1,
+ __BITFIELD_FIELD(uint32_t omr:1,
+ ;))))))))))))))
} s;
- struct cvmx_pciercx_cfg452_s cn52xx;
- struct cvmx_pciercx_cfg452_s cn52xxp1;
- struct cvmx_pciercx_cfg452_s cn56xx;
- struct cvmx_pciercx_cfg452_s cn56xxp1;
- struct cvmx_pciercx_cfg452_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_22_31:10;
- uint32_t lme:6;
- uint32_t reserved_8_15:8;
- uint32_t flm:1;
- uint32_t reserved_6_6:1;
- uint32_t dllle:1;
- uint32_t reserved_4_4:1;
- uint32_t ra:1;
- uint32_t le:1;
- uint32_t sd:1;
- uint32_t omr:1;
-#else
- uint32_t omr:1;
- uint32_t sd:1;
- uint32_t le:1;
- uint32_t ra:1;
- uint32_t reserved_4_4:1;
- uint32_t dllle:1;
- uint32_t reserved_6_6:1;
- uint32_t flm:1;
- uint32_t reserved_8_15:8;
- uint32_t lme:6;
- uint32_t reserved_22_31:10;
-#endif
- } cn61xx;
- struct cvmx_pciercx_cfg452_s cn63xx;
- struct cvmx_pciercx_cfg452_s cn63xxp1;
- struct cvmx_pciercx_cfg452_cn61xx cn66xx;
- struct cvmx_pciercx_cfg452_cn61xx cn68xx;
- struct cvmx_pciercx_cfg452_cn61xx cn68xxp1;
- struct cvmx_pciercx_cfg452_cn61xx cnf71xx;
-};
-
-union cvmx_pciercx_cfg453 {
- uint32_t u32;
- struct cvmx_pciercx_cfg453_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dlld:1;
- uint32_t reserved_26_30:5;
- uint32_t ack_nak:1;
- uint32_t fcd:1;
- uint32_t ilst:24;
-#else
- uint32_t ilst:24;
- uint32_t fcd:1;
- uint32_t ack_nak:1;
- uint32_t reserved_26_30:5;
- uint32_t dlld:1;
-#endif
- } s;
- struct cvmx_pciercx_cfg453_s cn52xx;
- struct cvmx_pciercx_cfg453_s cn52xxp1;
- struct cvmx_pciercx_cfg453_s cn56xx;
- struct cvmx_pciercx_cfg453_s cn56xxp1;
- struct cvmx_pciercx_cfg453_s cn61xx;
- struct cvmx_pciercx_cfg453_s cn63xx;
- struct cvmx_pciercx_cfg453_s cn63xxp1;
- struct cvmx_pciercx_cfg453_s cn66xx;
- struct cvmx_pciercx_cfg453_s cn68xx;
- struct cvmx_pciercx_cfg453_s cn68xxp1;
- struct cvmx_pciercx_cfg453_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg454 {
- uint32_t u32;
- struct cvmx_pciercx_cfg454_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t cx_nfunc:3;
- uint32_t tmfcwt:5;
- uint32_t tmanlt:5;
- uint32_t tmrt:5;
- uint32_t reserved_11_13:3;
- uint32_t nskps:3;
- uint32_t reserved_0_7:8;
-#else
- uint32_t reserved_0_7:8;
- uint32_t nskps:3;
- uint32_t reserved_11_13:3;
- uint32_t tmrt:5;
- uint32_t tmanlt:5;
- uint32_t tmfcwt:5;
- uint32_t cx_nfunc:3;
-#endif
- } s;
- struct cvmx_pciercx_cfg454_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_29_31:3;
- uint32_t tmfcwt:5;
- uint32_t tmanlt:5;
- uint32_t tmrt:5;
- uint32_t reserved_11_13:3;
- uint32_t nskps:3;
- uint32_t reserved_4_7:4;
- uint32_t ntss:4;
-#else
- uint32_t ntss:4;
- uint32_t reserved_4_7:4;
- uint32_t nskps:3;
- uint32_t reserved_11_13:3;
- uint32_t tmrt:5;
- uint32_t tmanlt:5;
- uint32_t tmfcwt:5;
- uint32_t reserved_29_31:3;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg454_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg454_cn52xx cn56xx;
- struct cvmx_pciercx_cfg454_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg454_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t cx_nfunc:3;
- uint32_t tmfcwt:5;
- uint32_t tmanlt:5;
- uint32_t tmrt:5;
- uint32_t reserved_8_13:6;
- uint32_t mfuncn:8;
-#else
- uint32_t mfuncn:8;
- uint32_t reserved_8_13:6;
- uint32_t tmrt:5;
- uint32_t tmanlt:5;
- uint32_t tmfcwt:5;
- uint32_t cx_nfunc:3;
-#endif
- } cn61xx;
- struct cvmx_pciercx_cfg454_cn52xx cn63xx;
- struct cvmx_pciercx_cfg454_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg454_cn61xx cn66xx;
- struct cvmx_pciercx_cfg454_cn61xx cn68xx;
- struct cvmx_pciercx_cfg454_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg454_cn61xx cnf71xx;
};
union cvmx_pciercx_cfg455 {
uint32_t u32;
struct cvmx_pciercx_cfg455_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t m_cfg0_filt:1;
- uint32_t m_io_filt:1;
- uint32_t msg_ctrl:1;
- uint32_t m_cpl_ecrc_filt:1;
- uint32_t m_ecrc_filt:1;
- uint32_t m_cpl_len_err:1;
- uint32_t m_cpl_attr_err:1;
- uint32_t m_cpl_tc_err:1;
- uint32_t m_cpl_fun_err:1;
- uint32_t m_cpl_rid_err:1;
- uint32_t m_cpl_tag_err:1;
- uint32_t m_lk_filt:1;
- uint32_t m_cfg1_filt:1;
- uint32_t m_bar_match:1;
- uint32_t m_pois_filt:1;
- uint32_t m_fun:1;
- uint32_t dfcwt:1;
- uint32_t reserved_11_14:4;
- uint32_t skpiv:11;
-#else
- uint32_t skpiv:11;
- uint32_t reserved_11_14:4;
- uint32_t dfcwt:1;
- uint32_t m_fun:1;
- uint32_t m_pois_filt:1;
- uint32_t m_bar_match:1;
- uint32_t m_cfg1_filt:1;
- uint32_t m_lk_filt:1;
- uint32_t m_cpl_tag_err:1;
- uint32_t m_cpl_rid_err:1;
- uint32_t m_cpl_fun_err:1;
- uint32_t m_cpl_tc_err:1;
- uint32_t m_cpl_attr_err:1;
- uint32_t m_cpl_len_err:1;
- uint32_t m_ecrc_filt:1;
- uint32_t m_cpl_ecrc_filt:1;
- uint32_t msg_ctrl:1;
- uint32_t m_io_filt:1;
- uint32_t m_cfg0_filt:1;
-#endif
- } s;
- struct cvmx_pciercx_cfg455_s cn52xx;
- struct cvmx_pciercx_cfg455_s cn52xxp1;
- struct cvmx_pciercx_cfg455_s cn56xx;
- struct cvmx_pciercx_cfg455_s cn56xxp1;
- struct cvmx_pciercx_cfg455_s cn61xx;
- struct cvmx_pciercx_cfg455_s cn63xx;
- struct cvmx_pciercx_cfg455_s cn63xxp1;
- struct cvmx_pciercx_cfg455_s cn66xx;
- struct cvmx_pciercx_cfg455_s cn68xx;
- struct cvmx_pciercx_cfg455_s cn68xxp1;
- struct cvmx_pciercx_cfg455_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg456 {
- uint32_t u32;
- struct cvmx_pciercx_cfg456_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_4_31:28;
- uint32_t m_handle_flush:1;
- uint32_t m_dabort_4ucpl:1;
- uint32_t m_vend1_drp:1;
- uint32_t m_vend0_drp:1;
-#else
- uint32_t m_vend0_drp:1;
- uint32_t m_vend1_drp:1;
- uint32_t m_dabort_4ucpl:1;
- uint32_t m_handle_flush:1;
- uint32_t reserved_4_31:28;
-#endif
- } s;
- struct cvmx_pciercx_cfg456_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_2_31:30;
- uint32_t m_vend1_drp:1;
- uint32_t m_vend0_drp:1;
-#else
- uint32_t m_vend0_drp:1;
- uint32_t m_vend1_drp:1;
- uint32_t reserved_2_31:30;
-#endif
- } cn52xx;
- struct cvmx_pciercx_cfg456_cn52xx cn52xxp1;
- struct cvmx_pciercx_cfg456_cn52xx cn56xx;
- struct cvmx_pciercx_cfg456_cn52xx cn56xxp1;
- struct cvmx_pciercx_cfg456_s cn61xx;
- struct cvmx_pciercx_cfg456_cn52xx cn63xx;
- struct cvmx_pciercx_cfg456_cn52xx cn63xxp1;
- struct cvmx_pciercx_cfg456_s cn66xx;
- struct cvmx_pciercx_cfg456_s cn68xx;
- struct cvmx_pciercx_cfg456_cn52xx cn68xxp1;
- struct cvmx_pciercx_cfg456_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg458 {
- uint32_t u32;
- struct cvmx_pciercx_cfg458_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dbg_info_l32:32;
-#else
- uint32_t dbg_info_l32:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg458_s cn52xx;
- struct cvmx_pciercx_cfg458_s cn52xxp1;
- struct cvmx_pciercx_cfg458_s cn56xx;
- struct cvmx_pciercx_cfg458_s cn56xxp1;
- struct cvmx_pciercx_cfg458_s cn61xx;
- struct cvmx_pciercx_cfg458_s cn63xx;
- struct cvmx_pciercx_cfg458_s cn63xxp1;
- struct cvmx_pciercx_cfg458_s cn66xx;
- struct cvmx_pciercx_cfg458_s cn68xx;
- struct cvmx_pciercx_cfg458_s cn68xxp1;
- struct cvmx_pciercx_cfg458_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg459 {
- uint32_t u32;
- struct cvmx_pciercx_cfg459_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t dbg_info_u32:32;
-#else
- uint32_t dbg_info_u32:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg459_s cn52xx;
- struct cvmx_pciercx_cfg459_s cn52xxp1;
- struct cvmx_pciercx_cfg459_s cn56xx;
- struct cvmx_pciercx_cfg459_s cn56xxp1;
- struct cvmx_pciercx_cfg459_s cn61xx;
- struct cvmx_pciercx_cfg459_s cn63xx;
- struct cvmx_pciercx_cfg459_s cn63xxp1;
- struct cvmx_pciercx_cfg459_s cn66xx;
- struct cvmx_pciercx_cfg459_s cn68xx;
- struct cvmx_pciercx_cfg459_s cn68xxp1;
- struct cvmx_pciercx_cfg459_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg460 {
- uint32_t u32;
- struct cvmx_pciercx_cfg460_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_20_31:12;
- uint32_t tphfcc:8;
- uint32_t tpdfcc:12;
-#else
- uint32_t tpdfcc:12;
- uint32_t tphfcc:8;
- uint32_t reserved_20_31:12;
-#endif
- } s;
- struct cvmx_pciercx_cfg460_s cn52xx;
- struct cvmx_pciercx_cfg460_s cn52xxp1;
- struct cvmx_pciercx_cfg460_s cn56xx;
- struct cvmx_pciercx_cfg460_s cn56xxp1;
- struct cvmx_pciercx_cfg460_s cn61xx;
- struct cvmx_pciercx_cfg460_s cn63xx;
- struct cvmx_pciercx_cfg460_s cn63xxp1;
- struct cvmx_pciercx_cfg460_s cn66xx;
- struct cvmx_pciercx_cfg460_s cn68xx;
- struct cvmx_pciercx_cfg460_s cn68xxp1;
- struct cvmx_pciercx_cfg460_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg461 {
- uint32_t u32;
- struct cvmx_pciercx_cfg461_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_20_31:12;
- uint32_t tchfcc:8;
- uint32_t tcdfcc:12;
-#else
- uint32_t tcdfcc:12;
- uint32_t tchfcc:8;
- uint32_t reserved_20_31:12;
-#endif
- } s;
- struct cvmx_pciercx_cfg461_s cn52xx;
- struct cvmx_pciercx_cfg461_s cn52xxp1;
- struct cvmx_pciercx_cfg461_s cn56xx;
- struct cvmx_pciercx_cfg461_s cn56xxp1;
- struct cvmx_pciercx_cfg461_s cn61xx;
- struct cvmx_pciercx_cfg461_s cn63xx;
- struct cvmx_pciercx_cfg461_s cn63xxp1;
- struct cvmx_pciercx_cfg461_s cn66xx;
- struct cvmx_pciercx_cfg461_s cn68xx;
- struct cvmx_pciercx_cfg461_s cn68xxp1;
- struct cvmx_pciercx_cfg461_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg462 {
- uint32_t u32;
- struct cvmx_pciercx_cfg462_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_20_31:12;
- uint32_t tchfcc:8;
- uint32_t tcdfcc:12;
-#else
- uint32_t tcdfcc:12;
- uint32_t tchfcc:8;
- uint32_t reserved_20_31:12;
-#endif
- } s;
- struct cvmx_pciercx_cfg462_s cn52xx;
- struct cvmx_pciercx_cfg462_s cn52xxp1;
- struct cvmx_pciercx_cfg462_s cn56xx;
- struct cvmx_pciercx_cfg462_s cn56xxp1;
- struct cvmx_pciercx_cfg462_s cn61xx;
- struct cvmx_pciercx_cfg462_s cn63xx;
- struct cvmx_pciercx_cfg462_s cn63xxp1;
- struct cvmx_pciercx_cfg462_s cn66xx;
- struct cvmx_pciercx_cfg462_s cn68xx;
- struct cvmx_pciercx_cfg462_s cn68xxp1;
- struct cvmx_pciercx_cfg462_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg463 {
- uint32_t u32;
- struct cvmx_pciercx_cfg463_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_3_31:29;
- uint32_t rqne:1;
- uint32_t trbne:1;
- uint32_t rtlpfccnr:1;
-#else
- uint32_t rtlpfccnr:1;
- uint32_t trbne:1;
- uint32_t rqne:1;
- uint32_t reserved_3_31:29;
-#endif
- } s;
- struct cvmx_pciercx_cfg463_s cn52xx;
- struct cvmx_pciercx_cfg463_s cn52xxp1;
- struct cvmx_pciercx_cfg463_s cn56xx;
- struct cvmx_pciercx_cfg463_s cn56xxp1;
- struct cvmx_pciercx_cfg463_s cn61xx;
- struct cvmx_pciercx_cfg463_s cn63xx;
- struct cvmx_pciercx_cfg463_s cn63xxp1;
- struct cvmx_pciercx_cfg463_s cn66xx;
- struct cvmx_pciercx_cfg463_s cn68xx;
- struct cvmx_pciercx_cfg463_s cn68xxp1;
- struct cvmx_pciercx_cfg463_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg464 {
- uint32_t u32;
- struct cvmx_pciercx_cfg464_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t wrr_vc3:8;
- uint32_t wrr_vc2:8;
- uint32_t wrr_vc1:8;
- uint32_t wrr_vc0:8;
-#else
- uint32_t wrr_vc0:8;
- uint32_t wrr_vc1:8;
- uint32_t wrr_vc2:8;
- uint32_t wrr_vc3:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg464_s cn52xx;
- struct cvmx_pciercx_cfg464_s cn52xxp1;
- struct cvmx_pciercx_cfg464_s cn56xx;
- struct cvmx_pciercx_cfg464_s cn56xxp1;
- struct cvmx_pciercx_cfg464_s cn61xx;
- struct cvmx_pciercx_cfg464_s cn63xx;
- struct cvmx_pciercx_cfg464_s cn63xxp1;
- struct cvmx_pciercx_cfg464_s cn66xx;
- struct cvmx_pciercx_cfg464_s cn68xx;
- struct cvmx_pciercx_cfg464_s cn68xxp1;
- struct cvmx_pciercx_cfg464_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg465 {
- uint32_t u32;
- struct cvmx_pciercx_cfg465_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t wrr_vc7:8;
- uint32_t wrr_vc6:8;
- uint32_t wrr_vc5:8;
- uint32_t wrr_vc4:8;
-#else
- uint32_t wrr_vc4:8;
- uint32_t wrr_vc5:8;
- uint32_t wrr_vc6:8;
- uint32_t wrr_vc7:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg465_s cn52xx;
- struct cvmx_pciercx_cfg465_s cn52xxp1;
- struct cvmx_pciercx_cfg465_s cn56xx;
- struct cvmx_pciercx_cfg465_s cn56xxp1;
- struct cvmx_pciercx_cfg465_s cn61xx;
- struct cvmx_pciercx_cfg465_s cn63xx;
- struct cvmx_pciercx_cfg465_s cn63xxp1;
- struct cvmx_pciercx_cfg465_s cn66xx;
- struct cvmx_pciercx_cfg465_s cn68xx;
- struct cvmx_pciercx_cfg465_s cn68xxp1;
- struct cvmx_pciercx_cfg465_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg466 {
- uint32_t u32;
- struct cvmx_pciercx_cfg466_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rx_queue_order:1;
- uint32_t type_ordering:1;
- uint32_t reserved_24_29:6;
- uint32_t queue_mode:3;
- uint32_t reserved_20_20:1;
- uint32_t header_credits:8;
- uint32_t data_credits:12;
-#else
- uint32_t data_credits:12;
- uint32_t header_credits:8;
- uint32_t reserved_20_20:1;
- uint32_t queue_mode:3;
- uint32_t reserved_24_29:6;
- uint32_t type_ordering:1;
- uint32_t rx_queue_order:1;
-#endif
- } s;
- struct cvmx_pciercx_cfg466_s cn52xx;
- struct cvmx_pciercx_cfg466_s cn52xxp1;
- struct cvmx_pciercx_cfg466_s cn56xx;
- struct cvmx_pciercx_cfg466_s cn56xxp1;
- struct cvmx_pciercx_cfg466_s cn61xx;
- struct cvmx_pciercx_cfg466_s cn63xx;
- struct cvmx_pciercx_cfg466_s cn63xxp1;
- struct cvmx_pciercx_cfg466_s cn66xx;
- struct cvmx_pciercx_cfg466_s cn68xx;
- struct cvmx_pciercx_cfg466_s cn68xxp1;
- struct cvmx_pciercx_cfg466_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg467 {
- uint32_t u32;
- struct cvmx_pciercx_cfg467_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_24_31:8;
- uint32_t queue_mode:3;
- uint32_t reserved_20_20:1;
- uint32_t header_credits:8;
- uint32_t data_credits:12;
-#else
- uint32_t data_credits:12;
- uint32_t header_credits:8;
- uint32_t reserved_20_20:1;
- uint32_t queue_mode:3;
- uint32_t reserved_24_31:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg467_s cn52xx;
- struct cvmx_pciercx_cfg467_s cn52xxp1;
- struct cvmx_pciercx_cfg467_s cn56xx;
- struct cvmx_pciercx_cfg467_s cn56xxp1;
- struct cvmx_pciercx_cfg467_s cn61xx;
- struct cvmx_pciercx_cfg467_s cn63xx;
- struct cvmx_pciercx_cfg467_s cn63xxp1;
- struct cvmx_pciercx_cfg467_s cn66xx;
- struct cvmx_pciercx_cfg467_s cn68xx;
- struct cvmx_pciercx_cfg467_s cn68xxp1;
- struct cvmx_pciercx_cfg467_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg468 {
- uint32_t u32;
- struct cvmx_pciercx_cfg468_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_24_31:8;
- uint32_t queue_mode:3;
- uint32_t reserved_20_20:1;
- uint32_t header_credits:8;
- uint32_t data_credits:12;
-#else
- uint32_t data_credits:12;
- uint32_t header_credits:8;
- uint32_t reserved_20_20:1;
- uint32_t queue_mode:3;
- uint32_t reserved_24_31:8;
-#endif
- } s;
- struct cvmx_pciercx_cfg468_s cn52xx;
- struct cvmx_pciercx_cfg468_s cn52xxp1;
- struct cvmx_pciercx_cfg468_s cn56xx;
- struct cvmx_pciercx_cfg468_s cn56xxp1;
- struct cvmx_pciercx_cfg468_s cn61xx;
- struct cvmx_pciercx_cfg468_s cn63xx;
- struct cvmx_pciercx_cfg468_s cn63xxp1;
- struct cvmx_pciercx_cfg468_s cn66xx;
- struct cvmx_pciercx_cfg468_s cn68xx;
- struct cvmx_pciercx_cfg468_s cn68xxp1;
- struct cvmx_pciercx_cfg468_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg490 {
- uint32_t u32;
- struct cvmx_pciercx_cfg490_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_26_31:6;
- uint32_t header_depth:10;
- uint32_t reserved_14_15:2;
- uint32_t data_depth:14;
-#else
- uint32_t data_depth:14;
- uint32_t reserved_14_15:2;
- uint32_t header_depth:10;
- uint32_t reserved_26_31:6;
-#endif
+ __BITFIELD_FIELD(uint32_t m_cfg0_filt:1,
+ __BITFIELD_FIELD(uint32_t m_io_filt:1,
+ __BITFIELD_FIELD(uint32_t msg_ctrl:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_ecrc_filt:1,
+ __BITFIELD_FIELD(uint32_t m_ecrc_filt:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_len_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_attr_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_tc_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_fun_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_rid_err:1,
+ __BITFIELD_FIELD(uint32_t m_cpl_tag_err:1,
+ __BITFIELD_FIELD(uint32_t m_lk_filt:1,
+ __BITFIELD_FIELD(uint32_t m_cfg1_filt:1,
+ __BITFIELD_FIELD(uint32_t m_bar_match:1,
+ __BITFIELD_FIELD(uint32_t m_pois_filt:1,
+ __BITFIELD_FIELD(uint32_t m_fun:1,
+ __BITFIELD_FIELD(uint32_t dfcwt:1,
+ __BITFIELD_FIELD(uint32_t reserved_11_14:4,
+ __BITFIELD_FIELD(uint32_t skpiv:11,
+ ;)))))))))))))))))))
} s;
- struct cvmx_pciercx_cfg490_s cn52xx;
- struct cvmx_pciercx_cfg490_s cn52xxp1;
- struct cvmx_pciercx_cfg490_s cn56xx;
- struct cvmx_pciercx_cfg490_s cn56xxp1;
- struct cvmx_pciercx_cfg490_s cn61xx;
- struct cvmx_pciercx_cfg490_s cn63xx;
- struct cvmx_pciercx_cfg490_s cn63xxp1;
- struct cvmx_pciercx_cfg490_s cn66xx;
- struct cvmx_pciercx_cfg490_s cn68xx;
- struct cvmx_pciercx_cfg490_s cn68xxp1;
- struct cvmx_pciercx_cfg490_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg491 {
- uint32_t u32;
- struct cvmx_pciercx_cfg491_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_26_31:6;
- uint32_t header_depth:10;
- uint32_t reserved_14_15:2;
- uint32_t data_depth:14;
-#else
- uint32_t data_depth:14;
- uint32_t reserved_14_15:2;
- uint32_t header_depth:10;
- uint32_t reserved_26_31:6;
-#endif
- } s;
- struct cvmx_pciercx_cfg491_s cn52xx;
- struct cvmx_pciercx_cfg491_s cn52xxp1;
- struct cvmx_pciercx_cfg491_s cn56xx;
- struct cvmx_pciercx_cfg491_s cn56xxp1;
- struct cvmx_pciercx_cfg491_s cn61xx;
- struct cvmx_pciercx_cfg491_s cn63xx;
- struct cvmx_pciercx_cfg491_s cn63xxp1;
- struct cvmx_pciercx_cfg491_s cn66xx;
- struct cvmx_pciercx_cfg491_s cn68xx;
- struct cvmx_pciercx_cfg491_s cn68xxp1;
- struct cvmx_pciercx_cfg491_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg492 {
- uint32_t u32;
- struct cvmx_pciercx_cfg492_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_26_31:6;
- uint32_t header_depth:10;
- uint32_t reserved_14_15:2;
- uint32_t data_depth:14;
-#else
- uint32_t data_depth:14;
- uint32_t reserved_14_15:2;
- uint32_t header_depth:10;
- uint32_t reserved_26_31:6;
-#endif
- } s;
- struct cvmx_pciercx_cfg492_s cn52xx;
- struct cvmx_pciercx_cfg492_s cn52xxp1;
- struct cvmx_pciercx_cfg492_s cn56xx;
- struct cvmx_pciercx_cfg492_s cn56xxp1;
- struct cvmx_pciercx_cfg492_s cn61xx;
- struct cvmx_pciercx_cfg492_s cn63xx;
- struct cvmx_pciercx_cfg492_s cn63xxp1;
- struct cvmx_pciercx_cfg492_s cn66xx;
- struct cvmx_pciercx_cfg492_s cn68xx;
- struct cvmx_pciercx_cfg492_s cn68xxp1;
- struct cvmx_pciercx_cfg492_s cnf71xx;
};
union cvmx_pciercx_cfg515 {
uint32_t u32;
struct cvmx_pciercx_cfg515_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t reserved_21_31:11;
- uint32_t s_d_e:1;
- uint32_t ctcrb:1;
- uint32_t cpyts:1;
- uint32_t dsc:1;
- uint32_t le:9;
- uint32_t n_fts:8;
-#else
- uint32_t n_fts:8;
- uint32_t le:9;
- uint32_t dsc:1;
- uint32_t cpyts:1;
- uint32_t ctcrb:1;
- uint32_t s_d_e:1;
- uint32_t reserved_21_31:11;
-#endif
- } s;
- struct cvmx_pciercx_cfg515_s cn61xx;
- struct cvmx_pciercx_cfg515_s cn63xx;
- struct cvmx_pciercx_cfg515_s cn63xxp1;
- struct cvmx_pciercx_cfg515_s cn66xx;
- struct cvmx_pciercx_cfg515_s cn68xx;
- struct cvmx_pciercx_cfg515_s cn68xxp1;
- struct cvmx_pciercx_cfg515_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg516 {
- uint32_t u32;
- struct cvmx_pciercx_cfg516_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t phy_stat:32;
-#else
- uint32_t phy_stat:32;
-#endif
- } s;
- struct cvmx_pciercx_cfg516_s cn52xx;
- struct cvmx_pciercx_cfg516_s cn52xxp1;
- struct cvmx_pciercx_cfg516_s cn56xx;
- struct cvmx_pciercx_cfg516_s cn56xxp1;
- struct cvmx_pciercx_cfg516_s cn61xx;
- struct cvmx_pciercx_cfg516_s cn63xx;
- struct cvmx_pciercx_cfg516_s cn63xxp1;
- struct cvmx_pciercx_cfg516_s cn66xx;
- struct cvmx_pciercx_cfg516_s cn68xx;
- struct cvmx_pciercx_cfg516_s cn68xxp1;
- struct cvmx_pciercx_cfg516_s cnf71xx;
-};
-
-union cvmx_pciercx_cfg517 {
- uint32_t u32;
- struct cvmx_pciercx_cfg517_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t phy_ctrl:32;
-#else
- uint32_t phy_ctrl:32;
-#endif
+ __BITFIELD_FIELD(uint32_t reserved_21_31:11,
+ __BITFIELD_FIELD(uint32_t s_d_e:1,
+ __BITFIELD_FIELD(uint32_t ctcrb:1,
+ __BITFIELD_FIELD(uint32_t cpyts:1,
+ __BITFIELD_FIELD(uint32_t dsc:1,
+ __BITFIELD_FIELD(uint32_t le:9,
+ __BITFIELD_FIELD(uint32_t n_fts:8,
+ ;)))))))
} s;
- struct cvmx_pciercx_cfg517_s cn52xx;
- struct cvmx_pciercx_cfg517_s cn52xxp1;
- struct cvmx_pciercx_cfg517_s cn56xx;
- struct cvmx_pciercx_cfg517_s cn56xxp1;
- struct cvmx_pciercx_cfg517_s cn61xx;
- struct cvmx_pciercx_cfg517_s cn63xx;
- struct cvmx_pciercx_cfg517_s cn63xxp1;
- struct cvmx_pciercx_cfg517_s cn66xx;
- struct cvmx_pciercx_cfg517_s cn68xx;
- struct cvmx_pciercx_cfg517_s cn68xxp1;
- struct cvmx_pciercx_cfg517_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-sli-defs.h b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
index e697c2f52a62..52cf96ea43e5 100644
--- a/arch/mips/include/asm/octeon/cvmx-sli-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2012 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -28,3494 +28,101 @@
#ifndef __CVMX_SLI_DEFS_H__
#define __CVMX_SLI_DEFS_H__
-#define CVMX_SLI_BIST_STATUS (0x0000000000000580ull)
-#define CVMX_SLI_CTL_PORTX(offset) (0x0000000000000050ull + ((offset) & 3) * 16)
-#define CVMX_SLI_CTL_STATUS (0x0000000000000570ull)
-#define CVMX_SLI_DATA_OUT_CNT (0x00000000000005F0ull)
-#define CVMX_SLI_DBG_DATA (0x0000000000000310ull)
-#define CVMX_SLI_DBG_SELECT (0x0000000000000300ull)
-#define CVMX_SLI_DMAX_CNT(offset) (0x0000000000000400ull + ((offset) & 1) * 16)
-#define CVMX_SLI_DMAX_INT_LEVEL(offset) (0x00000000000003E0ull + ((offset) & 1) * 16)
-#define CVMX_SLI_DMAX_TIM(offset) (0x0000000000000420ull + ((offset) & 1) * 16)
-#define CVMX_SLI_INT_ENB_CIU (0x0000000000003CD0ull)
-#define CVMX_SLI_INT_ENB_PORTX(offset) (0x0000000000000340ull + ((offset) & 1) * 16)
-#define CVMX_SLI_INT_SUM (0x0000000000000330ull)
-#define CVMX_SLI_LAST_WIN_RDATA0 (0x0000000000000600ull)
-#define CVMX_SLI_LAST_WIN_RDATA1 (0x0000000000000610ull)
-#define CVMX_SLI_LAST_WIN_RDATA2 (0x00000000000006C0ull)
-#define CVMX_SLI_LAST_WIN_RDATA3 (0x00000000000006D0ull)
-#define CVMX_SLI_MAC_CREDIT_CNT (0x0000000000003D70ull)
-#define CVMX_SLI_MAC_CREDIT_CNT2 (0x0000000000003E10ull)
-#define CVMX_SLI_MAC_NUMBER (0x0000000000003E00ull)
-#define CVMX_SLI_MEM_ACCESS_CTL (0x00000000000002F0ull)
-#define CVMX_SLI_MEM_ACCESS_SUBIDX(offset) (0x00000000000000E0ull + ((offset) & 31) * 16 - 16*12)
-#define CVMX_SLI_MSI_ENB0 (0x0000000000003C50ull)
-#define CVMX_SLI_MSI_ENB1 (0x0000000000003C60ull)
-#define CVMX_SLI_MSI_ENB2 (0x0000000000003C70ull)
-#define CVMX_SLI_MSI_ENB3 (0x0000000000003C80ull)
-#define CVMX_SLI_MSI_RCV0 (0x0000000000003C10ull)
-#define CVMX_SLI_MSI_RCV1 (0x0000000000003C20ull)
-#define CVMX_SLI_MSI_RCV2 (0x0000000000003C30ull)
-#define CVMX_SLI_MSI_RCV3 (0x0000000000003C40ull)
-#define CVMX_SLI_MSI_RD_MAP (0x0000000000003CA0ull)
-#define CVMX_SLI_MSI_W1C_ENB0 (0x0000000000003CF0ull)
-#define CVMX_SLI_MSI_W1C_ENB1 (0x0000000000003D00ull)
-#define CVMX_SLI_MSI_W1C_ENB2 (0x0000000000003D10ull)
-#define CVMX_SLI_MSI_W1C_ENB3 (0x0000000000003D20ull)
-#define CVMX_SLI_MSI_W1S_ENB0 (0x0000000000003D30ull)
-#define CVMX_SLI_MSI_W1S_ENB1 (0x0000000000003D40ull)
-#define CVMX_SLI_MSI_W1S_ENB2 (0x0000000000003D50ull)
-#define CVMX_SLI_MSI_W1S_ENB3 (0x0000000000003D60ull)
-#define CVMX_SLI_MSI_WR_MAP (0x0000000000003C90ull)
-#define CVMX_SLI_PCIE_MSI_RCV (0x0000000000003CB0ull)
-#define CVMX_SLI_PCIE_MSI_RCV_B1 (0x0000000000000650ull)
-#define CVMX_SLI_PCIE_MSI_RCV_B2 (0x0000000000000660ull)
-#define CVMX_SLI_PCIE_MSI_RCV_B3 (0x0000000000000670ull)
-#define CVMX_SLI_PKTX_CNTS(offset) (0x0000000000002400ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_INSTR_BADDR(offset) (0x0000000000002800ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_INSTR_BAOFF_DBELL(offset) (0x0000000000002C00ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_INSTR_FIFO_RSIZE(offset) (0x0000000000003000ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_INSTR_HEADER(offset) (0x0000000000003400ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_IN_BP(offset) (0x0000000000003800ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_OUT_SIZE(offset) (0x0000000000000C00ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_SLIST_BADDR(offset) (0x0000000000001400ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_SLIST_BAOFF_DBELL(offset) (0x0000000000001800ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKTX_SLIST_FIFO_RSIZE(offset) (0x0000000000001C00ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKT_CNT_INT (0x0000000000001130ull)
-#define CVMX_SLI_PKT_CNT_INT_ENB (0x0000000000001150ull)
-#define CVMX_SLI_PKT_CTL (0x0000000000001220ull)
-#define CVMX_SLI_PKT_DATA_OUT_ES (0x00000000000010B0ull)
-#define CVMX_SLI_PKT_DATA_OUT_NS (0x00000000000010A0ull)
-#define CVMX_SLI_PKT_DATA_OUT_ROR (0x0000000000001090ull)
-#define CVMX_SLI_PKT_DPADDR (0x0000000000001080ull)
-#define CVMX_SLI_PKT_INPUT_CONTROL (0x0000000000001170ull)
-#define CVMX_SLI_PKT_INSTR_ENB (0x0000000000001000ull)
-#define CVMX_SLI_PKT_INSTR_RD_SIZE (0x00000000000011A0ull)
-#define CVMX_SLI_PKT_INSTR_SIZE (0x0000000000001020ull)
-#define CVMX_SLI_PKT_INT_LEVELS (0x0000000000001120ull)
-#define CVMX_SLI_PKT_IN_BP (0x0000000000001210ull)
-#define CVMX_SLI_PKT_IN_DONEX_CNTS(offset) (0x0000000000002000ull + ((offset) & 31) * 16)
-#define CVMX_SLI_PKT_IN_INSTR_COUNTS (0x0000000000001200ull)
-#define CVMX_SLI_PKT_IN_PCIE_PORT (0x00000000000011B0ull)
-#define CVMX_SLI_PKT_IPTR (0x0000000000001070ull)
-#define CVMX_SLI_PKT_OUTPUT_WMARK (0x0000000000001180ull)
-#define CVMX_SLI_PKT_OUT_BMODE (0x00000000000010D0ull)
-#define CVMX_SLI_PKT_OUT_BP_EN (0x0000000000001240ull)
-#define CVMX_SLI_PKT_OUT_ENB (0x0000000000001010ull)
-#define CVMX_SLI_PKT_PCIE_PORT (0x00000000000010E0ull)
-#define CVMX_SLI_PKT_PORT_IN_RST (0x00000000000011F0ull)
-#define CVMX_SLI_PKT_SLIST_ES (0x0000000000001050ull)
-#define CVMX_SLI_PKT_SLIST_NS (0x0000000000001040ull)
-#define CVMX_SLI_PKT_SLIST_ROR (0x0000000000001030ull)
-#define CVMX_SLI_PKT_TIME_INT (0x0000000000001140ull)
-#define CVMX_SLI_PKT_TIME_INT_ENB (0x0000000000001160ull)
-#define CVMX_SLI_PORTX_PKIND(offset) (0x0000000000000800ull + ((offset) & 31) * 16)
-#define CVMX_SLI_S2M_PORTX_CTL(offset) (0x0000000000003D80ull + ((offset) & 3) * 16)
-#define CVMX_SLI_SCRATCH_1 (0x00000000000003C0ull)
-#define CVMX_SLI_SCRATCH_2 (0x00000000000003D0ull)
-#define CVMX_SLI_STATE1 (0x0000000000000620ull)
-#define CVMX_SLI_STATE2 (0x0000000000000630ull)
-#define CVMX_SLI_STATE3 (0x0000000000000640ull)
-#define CVMX_SLI_TX_PIPE (0x0000000000001230ull)
-#define CVMX_SLI_WINDOW_CTL (0x00000000000002E0ull)
-#define CVMX_SLI_WIN_RD_ADDR (0x0000000000000010ull)
-#define CVMX_SLI_WIN_RD_DATA (0x0000000000000040ull)
-#define CVMX_SLI_WIN_WR_ADDR (0x0000000000000000ull)
-#define CVMX_SLI_WIN_WR_DATA (0x0000000000000020ull)
-#define CVMX_SLI_WIN_WR_MASK (0x0000000000000030ull)
+#include <uapi/asm/bitfield.h>
+
+#define CVMX_SLI_PCIE_MSI_RCV CVMX_SLI_PCIE_MSI_RCV_FUNC()
+static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_FUNC(void)
+{
+ switch (cvmx_get_octeon_family()) {
+ case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN70XX & OCTEON_FAMILY_MASK:
+ return 0x0000000000003CB0ull;
+ case OCTEON_CNF75XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN73XX & OCTEON_FAMILY_MASK:
+ case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
+ return 0x0000000000003CB0ull;
+ default:
+ return 0x0000000000023CB0ull;
+ }
+}
-union cvmx_sli_bist_status {
- uint64_t u64;
- struct cvmx_sli_bist_status_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t ncb_req:1;
- uint64_t n2p0_c:1;
- uint64_t n2p0_o:1;
- uint64_t n2p1_c:1;
- uint64_t n2p1_o:1;
- uint64_t cpl_p0:1;
- uint64_t cpl_p1:1;
- uint64_t reserved_19_24:6;
- uint64_t p2n0_c0:1;
- uint64_t p2n0_c1:1;
- uint64_t p2n0_n:1;
- uint64_t p2n0_p0:1;
- uint64_t p2n0_p1:1;
- uint64_t p2n1_c0:1;
- uint64_t p2n1_c1:1;
- uint64_t p2n1_n:1;
- uint64_t p2n1_p0:1;
- uint64_t p2n1_p1:1;
- uint64_t reserved_6_8:3;
- uint64_t dsi1_1:1;
- uint64_t dsi1_0:1;
- uint64_t dsi0_1:1;
- uint64_t dsi0_0:1;
- uint64_t msi:1;
- uint64_t ncb_cmd:1;
-#else
- uint64_t ncb_cmd:1;
- uint64_t msi:1;
- uint64_t dsi0_0:1;
- uint64_t dsi0_1:1;
- uint64_t dsi1_0:1;
- uint64_t dsi1_1:1;
- uint64_t reserved_6_8:3;
- uint64_t p2n1_p1:1;
- uint64_t p2n1_p0:1;
- uint64_t p2n1_n:1;
- uint64_t p2n1_c1:1;
- uint64_t p2n1_c0:1;
- uint64_t p2n0_p1:1;
- uint64_t p2n0_p0:1;
- uint64_t p2n0_n:1;
- uint64_t p2n0_c1:1;
- uint64_t p2n0_c0:1;
- uint64_t reserved_19_24:6;
- uint64_t cpl_p1:1;
- uint64_t cpl_p0:1;
- uint64_t n2p1_o:1;
- uint64_t n2p1_c:1;
- uint64_t n2p0_o:1;
- uint64_t n2p0_c:1;
- uint64_t ncb_req:1;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_bist_status_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_31_63:33;
- uint64_t n2p0_c:1;
- uint64_t n2p0_o:1;
- uint64_t reserved_27_28:2;
- uint64_t cpl_p0:1;
- uint64_t cpl_p1:1;
- uint64_t reserved_19_24:6;
- uint64_t p2n0_c0:1;
- uint64_t p2n0_c1:1;
- uint64_t p2n0_n:1;
- uint64_t p2n0_p0:1;
- uint64_t p2n0_p1:1;
- uint64_t p2n1_c0:1;
- uint64_t p2n1_c1:1;
- uint64_t p2n1_n:1;
- uint64_t p2n1_p0:1;
- uint64_t p2n1_p1:1;
- uint64_t reserved_6_8:3;
- uint64_t dsi1_1:1;
- uint64_t dsi1_0:1;
- uint64_t dsi0_1:1;
- uint64_t dsi0_0:1;
- uint64_t msi:1;
- uint64_t ncb_cmd:1;
-#else
- uint64_t ncb_cmd:1;
- uint64_t msi:1;
- uint64_t dsi0_0:1;
- uint64_t dsi0_1:1;
- uint64_t dsi1_0:1;
- uint64_t dsi1_1:1;
- uint64_t reserved_6_8:3;
- uint64_t p2n1_p1:1;
- uint64_t p2n1_p0:1;
- uint64_t p2n1_n:1;
- uint64_t p2n1_c1:1;
- uint64_t p2n1_c0:1;
- uint64_t p2n0_p1:1;
- uint64_t p2n0_p0:1;
- uint64_t p2n0_n:1;
- uint64_t p2n0_c1:1;
- uint64_t p2n0_c0:1;
- uint64_t reserved_19_24:6;
- uint64_t cpl_p1:1;
- uint64_t cpl_p0:1;
- uint64_t reserved_27_28:2;
- uint64_t n2p0_o:1;
- uint64_t n2p0_c:1;
- uint64_t reserved_31_63:33;
-#endif
- } cn61xx;
- struct cvmx_sli_bist_status_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_31_63:33;
- uint64_t n2p0_c:1;
- uint64_t n2p0_o:1;
- uint64_t n2p1_c:1;
- uint64_t n2p1_o:1;
- uint64_t cpl_p0:1;
- uint64_t cpl_p1:1;
- uint64_t reserved_19_24:6;
- uint64_t p2n0_c0:1;
- uint64_t p2n0_c1:1;
- uint64_t p2n0_n:1;
- uint64_t p2n0_p0:1;
- uint64_t p2n0_p1:1;
- uint64_t p2n1_c0:1;
- uint64_t p2n1_c1:1;
- uint64_t p2n1_n:1;
- uint64_t p2n1_p0:1;
- uint64_t p2n1_p1:1;
- uint64_t reserved_6_8:3;
- uint64_t dsi1_1:1;
- uint64_t dsi1_0:1;
- uint64_t dsi0_1:1;
- uint64_t dsi0_0:1;
- uint64_t msi:1;
- uint64_t ncb_cmd:1;
-#else
- uint64_t ncb_cmd:1;
- uint64_t msi:1;
- uint64_t dsi0_0:1;
- uint64_t dsi0_1:1;
- uint64_t dsi1_0:1;
- uint64_t dsi1_1:1;
- uint64_t reserved_6_8:3;
- uint64_t p2n1_p1:1;
- uint64_t p2n1_p0:1;
- uint64_t p2n1_n:1;
- uint64_t p2n1_c1:1;
- uint64_t p2n1_c0:1;
- uint64_t p2n0_p1:1;
- uint64_t p2n0_p0:1;
- uint64_t p2n0_n:1;
- uint64_t p2n0_c1:1;
- uint64_t p2n0_c0:1;
- uint64_t reserved_19_24:6;
- uint64_t cpl_p1:1;
- uint64_t cpl_p0:1;
- uint64_t n2p1_o:1;
- uint64_t n2p1_c:1;
- uint64_t n2p0_o:1;
- uint64_t n2p0_c:1;
- uint64_t reserved_31_63:33;
-#endif
- } cn63xx;
- struct cvmx_sli_bist_status_cn63xx cn63xxp1;
- struct cvmx_sli_bist_status_cn61xx cn66xx;
- struct cvmx_sli_bist_status_s cn68xx;
- struct cvmx_sli_bist_status_s cn68xxp1;
- struct cvmx_sli_bist_status_cn61xx cnf71xx;
-};
union cvmx_sli_ctl_portx {
uint64_t u64;
struct cvmx_sli_ctl_portx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_22_63:42;
- uint64_t intd:1;
- uint64_t intc:1;
- uint64_t intb:1;
- uint64_t inta:1;
- uint64_t dis_port:1;
- uint64_t waitl_com:1;
- uint64_t intd_map:2;
- uint64_t intc_map:2;
- uint64_t intb_map:2;
- uint64_t inta_map:2;
- uint64_t ctlp_ro:1;
- uint64_t reserved_6_6:1;
- uint64_t ptlp_ro:1;
- uint64_t reserved_1_4:4;
- uint64_t wait_com:1;
-#else
- uint64_t wait_com:1;
- uint64_t reserved_1_4:4;
- uint64_t ptlp_ro:1;
- uint64_t reserved_6_6:1;
- uint64_t ctlp_ro:1;
- uint64_t inta_map:2;
- uint64_t intb_map:2;
- uint64_t intc_map:2;
- uint64_t intd_map:2;
- uint64_t waitl_com:1;
- uint64_t dis_port:1;
- uint64_t inta:1;
- uint64_t intb:1;
- uint64_t intc:1;
- uint64_t intd:1;
- uint64_t reserved_22_63:42;
-#endif
- } s;
- struct cvmx_sli_ctl_portx_s cn61xx;
- struct cvmx_sli_ctl_portx_s cn63xx;
- struct cvmx_sli_ctl_portx_s cn63xxp1;
- struct cvmx_sli_ctl_portx_s cn66xx;
- struct cvmx_sli_ctl_portx_s cn68xx;
- struct cvmx_sli_ctl_portx_s cn68xxp1;
- struct cvmx_sli_ctl_portx_s cnf71xx;
-};
-
-union cvmx_sli_ctl_status {
- uint64_t u64;
- struct cvmx_sli_ctl_status_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t p1_ntags:6;
- uint64_t p0_ntags:6;
- uint64_t chip_rev:8;
-#else
- uint64_t chip_rev:8;
- uint64_t p0_ntags:6;
- uint64_t p1_ntags:6;
- uint64_t reserved_20_63:44;
-#endif
- } s;
- struct cvmx_sli_ctl_status_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t p0_ntags:6;
- uint64_t chip_rev:8;
-#else
- uint64_t chip_rev:8;
- uint64_t p0_ntags:6;
- uint64_t reserved_14_63:50;
-#endif
- } cn61xx;
- struct cvmx_sli_ctl_status_s cn63xx;
- struct cvmx_sli_ctl_status_s cn63xxp1;
- struct cvmx_sli_ctl_status_cn61xx cn66xx;
- struct cvmx_sli_ctl_status_s cn68xx;
- struct cvmx_sli_ctl_status_s cn68xxp1;
- struct cvmx_sli_ctl_status_cn61xx cnf71xx;
-};
-
-union cvmx_sli_data_out_cnt {
- uint64_t u64;
- struct cvmx_sli_data_out_cnt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_44_63:20;
- uint64_t p1_ucnt:16;
- uint64_t p1_fcnt:6;
- uint64_t p0_ucnt:16;
- uint64_t p0_fcnt:6;
-#else
- uint64_t p0_fcnt:6;
- uint64_t p0_ucnt:16;
- uint64_t p1_fcnt:6;
- uint64_t p1_ucnt:16;
- uint64_t reserved_44_63:20;
-#endif
- } s;
- struct cvmx_sli_data_out_cnt_s cn61xx;
- struct cvmx_sli_data_out_cnt_s cn63xx;
- struct cvmx_sli_data_out_cnt_s cn63xxp1;
- struct cvmx_sli_data_out_cnt_s cn66xx;
- struct cvmx_sli_data_out_cnt_s cn68xx;
- struct cvmx_sli_data_out_cnt_s cn68xxp1;
- struct cvmx_sli_data_out_cnt_s cnf71xx;
-};
-
-union cvmx_sli_dbg_data {
- uint64_t u64;
- struct cvmx_sli_dbg_data_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t dsel_ext:1;
- uint64_t data:17;
-#else
- uint64_t data:17;
- uint64_t dsel_ext:1;
- uint64_t reserved_18_63:46;
-#endif
- } s;
- struct cvmx_sli_dbg_data_s cn61xx;
- struct cvmx_sli_dbg_data_s cn63xx;
- struct cvmx_sli_dbg_data_s cn63xxp1;
- struct cvmx_sli_dbg_data_s cn66xx;
- struct cvmx_sli_dbg_data_s cn68xx;
- struct cvmx_sli_dbg_data_s cn68xxp1;
- struct cvmx_sli_dbg_data_s cnf71xx;
-};
-
-union cvmx_sli_dbg_select {
- uint64_t u64;
- struct cvmx_sli_dbg_select_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_33_63:31;
- uint64_t adbg_sel:1;
- uint64_t dbg_sel:32;
-#else
- uint64_t dbg_sel:32;
- uint64_t adbg_sel:1;
- uint64_t reserved_33_63:31;
-#endif
- } s;
- struct cvmx_sli_dbg_select_s cn61xx;
- struct cvmx_sli_dbg_select_s cn63xx;
- struct cvmx_sli_dbg_select_s cn63xxp1;
- struct cvmx_sli_dbg_select_s cn66xx;
- struct cvmx_sli_dbg_select_s cn68xx;
- struct cvmx_sli_dbg_select_s cn68xxp1;
- struct cvmx_sli_dbg_select_s cnf71xx;
-};
-
-union cvmx_sli_dmax_cnt {
- uint64_t u64;
- struct cvmx_sli_dmax_cnt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_dmax_cnt_s cn61xx;
- struct cvmx_sli_dmax_cnt_s cn63xx;
- struct cvmx_sli_dmax_cnt_s cn63xxp1;
- struct cvmx_sli_dmax_cnt_s cn66xx;
- struct cvmx_sli_dmax_cnt_s cn68xx;
- struct cvmx_sli_dmax_cnt_s cn68xxp1;
- struct cvmx_sli_dmax_cnt_s cnf71xx;
-};
-
-union cvmx_sli_dmax_int_level {
- uint64_t u64;
- struct cvmx_sli_dmax_int_level_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t time:32;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t time:32;
-#endif
- } s;
- struct cvmx_sli_dmax_int_level_s cn61xx;
- struct cvmx_sli_dmax_int_level_s cn63xx;
- struct cvmx_sli_dmax_int_level_s cn63xxp1;
- struct cvmx_sli_dmax_int_level_s cn66xx;
- struct cvmx_sli_dmax_int_level_s cn68xx;
- struct cvmx_sli_dmax_int_level_s cn68xxp1;
- struct cvmx_sli_dmax_int_level_s cnf71xx;
-};
-
-union cvmx_sli_dmax_tim {
- uint64_t u64;
- struct cvmx_sli_dmax_tim_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t tim:32;
-#else
- uint64_t tim:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_dmax_tim_s cn61xx;
- struct cvmx_sli_dmax_tim_s cn63xx;
- struct cvmx_sli_dmax_tim_s cn63xxp1;
- struct cvmx_sli_dmax_tim_s cn66xx;
- struct cvmx_sli_dmax_tim_s cn68xx;
- struct cvmx_sli_dmax_tim_s cn68xxp1;
- struct cvmx_sli_dmax_tim_s cnf71xx;
-};
-
-union cvmx_sli_int_enb_ciu {
- uint64_t u64;
- struct cvmx_sli_int_enb_ciu_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t pipe_err:1;
- uint64_t ill_pad:1;
- uint64_t sprt3_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_28_31:4;
- uint64_t m3_un_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_up_b0:1;
- uint64_t reserved_18_19:2;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t reserved_18_19:2;
- uint64_t m2_up_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_un_wi:1;
- uint64_t reserved_28_31:4;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt3_err:1;
- uint64_t ill_pad:1;
- uint64_t pipe_err:1;
- uint64_t reserved_62_63:2;
-#endif
- } s;
- struct cvmx_sli_int_enb_ciu_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_61_63:3;
- uint64_t ill_pad:1;
- uint64_t sprt3_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_28_31:4;
- uint64_t m3_un_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_up_b0:1;
- uint64_t reserved_18_19:2;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t reserved_18_19:2;
- uint64_t m2_up_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_un_wi:1;
- uint64_t reserved_28_31:4;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt3_err:1;
- uint64_t ill_pad:1;
- uint64_t reserved_61_63:3;
-#endif
- } cn61xx;
- struct cvmx_sli_int_enb_ciu_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_61_63:3;
- uint64_t ill_pad:1;
- uint64_t reserved_58_59:2;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_18_31:14;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t reserved_18_31:14;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t reserved_58_59:2;
- uint64_t ill_pad:1;
- uint64_t reserved_61_63:3;
-#endif
- } cn63xx;
- struct cvmx_sli_int_enb_ciu_cn63xx cn63xxp1;
- struct cvmx_sli_int_enb_ciu_cn61xx cn66xx;
- struct cvmx_sli_int_enb_ciu_cn68xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t pipe_err:1;
- uint64_t ill_pad:1;
- uint64_t reserved_58_59:2;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t reserved_51_51:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_18_31:14;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t reserved_18_31:14;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t reserved_51_51:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t reserved_58_59:2;
- uint64_t ill_pad:1;
- uint64_t pipe_err:1;
- uint64_t reserved_62_63:2;
-#endif
- } cn68xx;
- struct cvmx_sli_int_enb_ciu_cn68xx cn68xxp1;
- struct cvmx_sli_int_enb_ciu_cn61xx cnf71xx;
-};
-
-union cvmx_sli_int_enb_portx {
- uint64_t u64;
- struct cvmx_sli_int_enb_portx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t pipe_err:1;
- uint64_t ill_pad:1;
- uint64_t sprt3_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_28_31:4;
- uint64_t m3_un_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_up_b0:1;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t m2_up_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_un_wi:1;
- uint64_t reserved_28_31:4;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt3_err:1;
- uint64_t ill_pad:1;
- uint64_t pipe_err:1;
- uint64_t reserved_62_63:2;
-#endif
- } s;
- struct cvmx_sli_int_enb_portx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_61_63:3;
- uint64_t ill_pad:1;
- uint64_t sprt3_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_28_31:4;
- uint64_t m3_un_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_up_b0:1;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t m2_up_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_un_wi:1;
- uint64_t reserved_28_31:4;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt3_err:1;
- uint64_t ill_pad:1;
- uint64_t reserved_61_63:3;
-#endif
- } cn61xx;
- struct cvmx_sli_int_enb_portx_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_61_63:3;
- uint64_t ill_pad:1;
- uint64_t reserved_58_59:2;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_20_31:12;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t reserved_20_31:12;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t reserved_58_59:2;
- uint64_t ill_pad:1;
- uint64_t reserved_61_63:3;
-#endif
- } cn63xx;
- struct cvmx_sli_int_enb_portx_cn63xx cn63xxp1;
- struct cvmx_sli_int_enb_portx_cn61xx cn66xx;
- struct cvmx_sli_int_enb_portx_cn68xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t pipe_err:1;
- uint64_t ill_pad:1;
- uint64_t reserved_58_59:2;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t reserved_51_51:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_20_31:12;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t reserved_20_31:12;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t reserved_51_51:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t reserved_58_59:2;
- uint64_t ill_pad:1;
- uint64_t pipe_err:1;
- uint64_t reserved_62_63:2;
-#endif
- } cn68xx;
- struct cvmx_sli_int_enb_portx_cn68xx cn68xxp1;
- struct cvmx_sli_int_enb_portx_cn61xx cnf71xx;
-};
-
-union cvmx_sli_int_sum {
- uint64_t u64;
- struct cvmx_sli_int_sum_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t pipe_err:1;
- uint64_t ill_pad:1;
- uint64_t sprt3_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_28_31:4;
- uint64_t m3_un_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_up_b0:1;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t m2_up_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_un_wi:1;
- uint64_t reserved_28_31:4;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt3_err:1;
- uint64_t ill_pad:1;
- uint64_t pipe_err:1;
- uint64_t reserved_62_63:2;
-#endif
- } s;
- struct cvmx_sli_int_sum_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_61_63:3;
- uint64_t ill_pad:1;
- uint64_t sprt3_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_28_31:4;
- uint64_t m3_un_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_up_b0:1;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t m2_up_b0:1;
- uint64_t m2_up_wi:1;
- uint64_t m2_un_b0:1;
- uint64_t m2_un_wi:1;
- uint64_t m3_up_b0:1;
- uint64_t m3_up_wi:1;
- uint64_t m3_un_b0:1;
- uint64_t m3_un_wi:1;
- uint64_t reserved_28_31:4;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t sprt2_err:1;
- uint64_t sprt3_err:1;
- uint64_t ill_pad:1;
- uint64_t reserved_61_63:3;
-#endif
- } cn61xx;
- struct cvmx_sli_int_sum_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_61_63:3;
- uint64_t ill_pad:1;
- uint64_t reserved_58_59:2;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t pin_bp:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_20_31:12;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t reserved_20_31:12;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t pin_bp:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t reserved_58_59:2;
- uint64_t ill_pad:1;
- uint64_t reserved_61_63:3;
-#endif
- } cn63xx;
- struct cvmx_sli_int_sum_cn63xx cn63xxp1;
- struct cvmx_sli_int_sum_cn61xx cn66xx;
- struct cvmx_sli_int_sum_cn68xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_62_63:2;
- uint64_t pipe_err:1;
- uint64_t ill_pad:1;
- uint64_t reserved_58_59:2;
- uint64_t sprt1_err:1;
- uint64_t sprt0_err:1;
- uint64_t pins_err:1;
- uint64_t pop_err:1;
- uint64_t pdi_err:1;
- uint64_t pgl_err:1;
- uint64_t reserved_51_51:1;
- uint64_t pout_err:1;
- uint64_t psldbof:1;
- uint64_t pidbof:1;
- uint64_t reserved_38_47:10;
- uint64_t dtime:2;
- uint64_t dcnt:2;
- uint64_t dmafi:2;
- uint64_t reserved_20_31:12;
- uint64_t mac1_int:1;
- uint64_t mac0_int:1;
- uint64_t mio_int1:1;
- uint64_t mio_int0:1;
- uint64_t m1_un_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_up_b0:1;
- uint64_t reserved_6_7:2;
- uint64_t ptime:1;
- uint64_t pcnt:1;
- uint64_t iob2big:1;
- uint64_t bar0_to:1;
- uint64_t reserved_1_1:1;
- uint64_t rml_to:1;
-#else
- uint64_t rml_to:1;
- uint64_t reserved_1_1:1;
- uint64_t bar0_to:1;
- uint64_t iob2big:1;
- uint64_t pcnt:1;
- uint64_t ptime:1;
- uint64_t reserved_6_7:2;
- uint64_t m0_up_b0:1;
- uint64_t m0_up_wi:1;
- uint64_t m0_un_b0:1;
- uint64_t m0_un_wi:1;
- uint64_t m1_up_b0:1;
- uint64_t m1_up_wi:1;
- uint64_t m1_un_b0:1;
- uint64_t m1_un_wi:1;
- uint64_t mio_int0:1;
- uint64_t mio_int1:1;
- uint64_t mac0_int:1;
- uint64_t mac1_int:1;
- uint64_t reserved_20_31:12;
- uint64_t dmafi:2;
- uint64_t dcnt:2;
- uint64_t dtime:2;
- uint64_t reserved_38_47:10;
- uint64_t pidbof:1;
- uint64_t psldbof:1;
- uint64_t pout_err:1;
- uint64_t reserved_51_51:1;
- uint64_t pgl_err:1;
- uint64_t pdi_err:1;
- uint64_t pop_err:1;
- uint64_t pins_err:1;
- uint64_t sprt0_err:1;
- uint64_t sprt1_err:1;
- uint64_t reserved_58_59:2;
- uint64_t ill_pad:1;
- uint64_t pipe_err:1;
- uint64_t reserved_62_63:2;
-#endif
- } cn68xx;
- struct cvmx_sli_int_sum_cn68xx cn68xxp1;
- struct cvmx_sli_int_sum_cn61xx cnf71xx;
-};
-
-union cvmx_sli_last_win_rdata0 {
- uint64_t u64;
- struct cvmx_sli_last_win_rdata0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t data:64;
-#else
- uint64_t data:64;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_22_63:42,
+ __BITFIELD_FIELD(uint64_t intd:1,
+ __BITFIELD_FIELD(uint64_t intc:1,
+ __BITFIELD_FIELD(uint64_t intb:1,
+ __BITFIELD_FIELD(uint64_t inta:1,
+ __BITFIELD_FIELD(uint64_t dis_port:1,
+ __BITFIELD_FIELD(uint64_t waitl_com:1,
+ __BITFIELD_FIELD(uint64_t intd_map:2,
+ __BITFIELD_FIELD(uint64_t intc_map:2,
+ __BITFIELD_FIELD(uint64_t intb_map:2,
+ __BITFIELD_FIELD(uint64_t inta_map:2,
+ __BITFIELD_FIELD(uint64_t ctlp_ro:1,
+ __BITFIELD_FIELD(uint64_t reserved_6_6:1,
+ __BITFIELD_FIELD(uint64_t ptlp_ro:1,
+ __BITFIELD_FIELD(uint64_t reserved_1_4:4,
+ __BITFIELD_FIELD(uint64_t wait_com:1,
+ ;))))))))))))))))
} s;
- struct cvmx_sli_last_win_rdata0_s cn61xx;
- struct cvmx_sli_last_win_rdata0_s cn63xx;
- struct cvmx_sli_last_win_rdata0_s cn63xxp1;
- struct cvmx_sli_last_win_rdata0_s cn66xx;
- struct cvmx_sli_last_win_rdata0_s cn68xx;
- struct cvmx_sli_last_win_rdata0_s cn68xxp1;
- struct cvmx_sli_last_win_rdata0_s cnf71xx;
-};
-
-union cvmx_sli_last_win_rdata1 {
- uint64_t u64;
- struct cvmx_sli_last_win_rdata1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t data:64;
-#else
- uint64_t data:64;
-#endif
- } s;
- struct cvmx_sli_last_win_rdata1_s cn61xx;
- struct cvmx_sli_last_win_rdata1_s cn63xx;
- struct cvmx_sli_last_win_rdata1_s cn63xxp1;
- struct cvmx_sli_last_win_rdata1_s cn66xx;
- struct cvmx_sli_last_win_rdata1_s cn68xx;
- struct cvmx_sli_last_win_rdata1_s cn68xxp1;
- struct cvmx_sli_last_win_rdata1_s cnf71xx;
-};
-
-union cvmx_sli_last_win_rdata2 {
- uint64_t u64;
- struct cvmx_sli_last_win_rdata2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t data:64;
-#else
- uint64_t data:64;
-#endif
- } s;
- struct cvmx_sli_last_win_rdata2_s cn61xx;
- struct cvmx_sli_last_win_rdata2_s cn66xx;
- struct cvmx_sli_last_win_rdata2_s cnf71xx;
-};
-
-union cvmx_sli_last_win_rdata3 {
- uint64_t u64;
- struct cvmx_sli_last_win_rdata3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t data:64;
-#else
- uint64_t data:64;
-#endif
- } s;
- struct cvmx_sli_last_win_rdata3_s cn61xx;
- struct cvmx_sli_last_win_rdata3_s cn66xx;
- struct cvmx_sli_last_win_rdata3_s cnf71xx;
-};
-
-union cvmx_sli_mac_credit_cnt {
- uint64_t u64;
- struct cvmx_sli_mac_credit_cnt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t p1_c_d:1;
- uint64_t p1_n_d:1;
- uint64_t p1_p_d:1;
- uint64_t p0_c_d:1;
- uint64_t p0_n_d:1;
- uint64_t p0_p_d:1;
- uint64_t p1_ccnt:8;
- uint64_t p1_ncnt:8;
- uint64_t p1_pcnt:8;
- uint64_t p0_ccnt:8;
- uint64_t p0_ncnt:8;
- uint64_t p0_pcnt:8;
-#else
- uint64_t p0_pcnt:8;
- uint64_t p0_ncnt:8;
- uint64_t p0_ccnt:8;
- uint64_t p1_pcnt:8;
- uint64_t p1_ncnt:8;
- uint64_t p1_ccnt:8;
- uint64_t p0_p_d:1;
- uint64_t p0_n_d:1;
- uint64_t p0_c_d:1;
- uint64_t p1_p_d:1;
- uint64_t p1_n_d:1;
- uint64_t p1_c_d:1;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_sli_mac_credit_cnt_s cn61xx;
- struct cvmx_sli_mac_credit_cnt_s cn63xx;
- struct cvmx_sli_mac_credit_cnt_cn63xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_48_63:16;
- uint64_t p1_ccnt:8;
- uint64_t p1_ncnt:8;
- uint64_t p1_pcnt:8;
- uint64_t p0_ccnt:8;
- uint64_t p0_ncnt:8;
- uint64_t p0_pcnt:8;
-#else
- uint64_t p0_pcnt:8;
- uint64_t p0_ncnt:8;
- uint64_t p0_ccnt:8;
- uint64_t p1_pcnt:8;
- uint64_t p1_ncnt:8;
- uint64_t p1_ccnt:8;
- uint64_t reserved_48_63:16;
-#endif
- } cn63xxp1;
- struct cvmx_sli_mac_credit_cnt_s cn66xx;
- struct cvmx_sli_mac_credit_cnt_s cn68xx;
- struct cvmx_sli_mac_credit_cnt_s cn68xxp1;
- struct cvmx_sli_mac_credit_cnt_s cnf71xx;
-};
-
-union cvmx_sli_mac_credit_cnt2 {
- uint64_t u64;
- struct cvmx_sli_mac_credit_cnt2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t p3_c_d:1;
- uint64_t p3_n_d:1;
- uint64_t p3_p_d:1;
- uint64_t p2_c_d:1;
- uint64_t p2_n_d:1;
- uint64_t p2_p_d:1;
- uint64_t p3_ccnt:8;
- uint64_t p3_ncnt:8;
- uint64_t p3_pcnt:8;
- uint64_t p2_ccnt:8;
- uint64_t p2_ncnt:8;
- uint64_t p2_pcnt:8;
-#else
- uint64_t p2_pcnt:8;
- uint64_t p2_ncnt:8;
- uint64_t p2_ccnt:8;
- uint64_t p3_pcnt:8;
- uint64_t p3_ncnt:8;
- uint64_t p3_ccnt:8;
- uint64_t p2_p_d:1;
- uint64_t p2_n_d:1;
- uint64_t p2_c_d:1;
- uint64_t p3_p_d:1;
- uint64_t p3_n_d:1;
- uint64_t p3_c_d:1;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_sli_mac_credit_cnt2_s cn61xx;
- struct cvmx_sli_mac_credit_cnt2_s cn66xx;
- struct cvmx_sli_mac_credit_cnt2_s cnf71xx;
-};
-
-union cvmx_sli_mac_number {
- uint64_t u64;
- struct cvmx_sli_mac_number_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t a_mode:1;
- uint64_t num:8;
-#else
- uint64_t num:8;
- uint64_t a_mode:1;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_sli_mac_number_s cn61xx;
- struct cvmx_sli_mac_number_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t num:8;
-#else
- uint64_t num:8;
- uint64_t reserved_8_63:56;
-#endif
- } cn63xx;
- struct cvmx_sli_mac_number_s cn66xx;
- struct cvmx_sli_mac_number_cn63xx cn68xx;
- struct cvmx_sli_mac_number_cn63xx cn68xxp1;
- struct cvmx_sli_mac_number_s cnf71xx;
};
union cvmx_sli_mem_access_ctl {
uint64_t u64;
struct cvmx_sli_mem_access_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t max_word:4;
- uint64_t timer:10;
-#else
- uint64_t timer:10;
- uint64_t max_word:4;
- uint64_t reserved_14_63:50;
-#endif
- } s;
- struct cvmx_sli_mem_access_ctl_s cn61xx;
- struct cvmx_sli_mem_access_ctl_s cn63xx;
- struct cvmx_sli_mem_access_ctl_s cn63xxp1;
- struct cvmx_sli_mem_access_ctl_s cn66xx;
- struct cvmx_sli_mem_access_ctl_s cn68xx;
- struct cvmx_sli_mem_access_ctl_s cn68xxp1;
- struct cvmx_sli_mem_access_ctl_s cnf71xx;
-};
-
-union cvmx_sli_mem_access_subidx {
- uint64_t u64;
- struct cvmx_sli_mem_access_subidx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t zero:1;
- uint64_t port:3;
- uint64_t nmerge:1;
- uint64_t esr:2;
- uint64_t esw:2;
- uint64_t wtype:2;
- uint64_t rtype:2;
- uint64_t reserved_0_29:30;
-#else
- uint64_t reserved_0_29:30;
- uint64_t rtype:2;
- uint64_t wtype:2;
- uint64_t esw:2;
- uint64_t esr:2;
- uint64_t nmerge:1;
- uint64_t port:3;
- uint64_t zero:1;
- uint64_t reserved_43_63:21;
-#endif
- } s;
- struct cvmx_sli_mem_access_subidx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t zero:1;
- uint64_t port:3;
- uint64_t nmerge:1;
- uint64_t esr:2;
- uint64_t esw:2;
- uint64_t wtype:2;
- uint64_t rtype:2;
- uint64_t ba:30;
-#else
- uint64_t ba:30;
- uint64_t rtype:2;
- uint64_t wtype:2;
- uint64_t esw:2;
- uint64_t esr:2;
- uint64_t nmerge:1;
- uint64_t port:3;
- uint64_t zero:1;
- uint64_t reserved_43_63:21;
-#endif
- } cn61xx;
- struct cvmx_sli_mem_access_subidx_cn61xx cn63xx;
- struct cvmx_sli_mem_access_subidx_cn61xx cn63xxp1;
- struct cvmx_sli_mem_access_subidx_cn61xx cn66xx;
- struct cvmx_sli_mem_access_subidx_cn68xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_43_63:21;
- uint64_t zero:1;
- uint64_t port:3;
- uint64_t nmerge:1;
- uint64_t esr:2;
- uint64_t esw:2;
- uint64_t wtype:2;
- uint64_t rtype:2;
- uint64_t ba:28;
- uint64_t reserved_0_1:2;
-#else
- uint64_t reserved_0_1:2;
- uint64_t ba:28;
- uint64_t rtype:2;
- uint64_t wtype:2;
- uint64_t esw:2;
- uint64_t esr:2;
- uint64_t nmerge:1;
- uint64_t port:3;
- uint64_t zero:1;
- uint64_t reserved_43_63:21;
-#endif
- } cn68xx;
- struct cvmx_sli_mem_access_subidx_cn68xx cn68xxp1;
- struct cvmx_sli_mem_access_subidx_cn61xx cnf71xx;
-};
-
-union cvmx_sli_msi_enb0 {
- uint64_t u64;
- struct cvmx_sli_msi_enb0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t enb:64;
-#else
- uint64_t enb:64;
-#endif
- } s;
- struct cvmx_sli_msi_enb0_s cn61xx;
- struct cvmx_sli_msi_enb0_s cn63xx;
- struct cvmx_sli_msi_enb0_s cn63xxp1;
- struct cvmx_sli_msi_enb0_s cn66xx;
- struct cvmx_sli_msi_enb0_s cn68xx;
- struct cvmx_sli_msi_enb0_s cn68xxp1;
- struct cvmx_sli_msi_enb0_s cnf71xx;
-};
-
-union cvmx_sli_msi_enb1 {
- uint64_t u64;
- struct cvmx_sli_msi_enb1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t enb:64;
-#else
- uint64_t enb:64;
-#endif
- } s;
- struct cvmx_sli_msi_enb1_s cn61xx;
- struct cvmx_sli_msi_enb1_s cn63xx;
- struct cvmx_sli_msi_enb1_s cn63xxp1;
- struct cvmx_sli_msi_enb1_s cn66xx;
- struct cvmx_sli_msi_enb1_s cn68xx;
- struct cvmx_sli_msi_enb1_s cn68xxp1;
- struct cvmx_sli_msi_enb1_s cnf71xx;
-};
-
-union cvmx_sli_msi_enb2 {
- uint64_t u64;
- struct cvmx_sli_msi_enb2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t enb:64;
-#else
- uint64_t enb:64;
-#endif
- } s;
- struct cvmx_sli_msi_enb2_s cn61xx;
- struct cvmx_sli_msi_enb2_s cn63xx;
- struct cvmx_sli_msi_enb2_s cn63xxp1;
- struct cvmx_sli_msi_enb2_s cn66xx;
- struct cvmx_sli_msi_enb2_s cn68xx;
- struct cvmx_sli_msi_enb2_s cn68xxp1;
- struct cvmx_sli_msi_enb2_s cnf71xx;
-};
-
-union cvmx_sli_msi_enb3 {
- uint64_t u64;
- struct cvmx_sli_msi_enb3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t enb:64;
-#else
- uint64_t enb:64;
-#endif
- } s;
- struct cvmx_sli_msi_enb3_s cn61xx;
- struct cvmx_sli_msi_enb3_s cn63xx;
- struct cvmx_sli_msi_enb3_s cn63xxp1;
- struct cvmx_sli_msi_enb3_s cn66xx;
- struct cvmx_sli_msi_enb3_s cn68xx;
- struct cvmx_sli_msi_enb3_s cn68xxp1;
- struct cvmx_sli_msi_enb3_s cnf71xx;
-};
-
-union cvmx_sli_msi_rcv0 {
- uint64_t u64;
- struct cvmx_sli_msi_rcv0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t intr:64;
-#else
- uint64_t intr:64;
-#endif
- } s;
- struct cvmx_sli_msi_rcv0_s cn61xx;
- struct cvmx_sli_msi_rcv0_s cn63xx;
- struct cvmx_sli_msi_rcv0_s cn63xxp1;
- struct cvmx_sli_msi_rcv0_s cn66xx;
- struct cvmx_sli_msi_rcv0_s cn68xx;
- struct cvmx_sli_msi_rcv0_s cn68xxp1;
- struct cvmx_sli_msi_rcv0_s cnf71xx;
-};
-
-union cvmx_sli_msi_rcv1 {
- uint64_t u64;
- struct cvmx_sli_msi_rcv1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t intr:64;
-#else
- uint64_t intr:64;
-#endif
- } s;
- struct cvmx_sli_msi_rcv1_s cn61xx;
- struct cvmx_sli_msi_rcv1_s cn63xx;
- struct cvmx_sli_msi_rcv1_s cn63xxp1;
- struct cvmx_sli_msi_rcv1_s cn66xx;
- struct cvmx_sli_msi_rcv1_s cn68xx;
- struct cvmx_sli_msi_rcv1_s cn68xxp1;
- struct cvmx_sli_msi_rcv1_s cnf71xx;
-};
-
-union cvmx_sli_msi_rcv2 {
- uint64_t u64;
- struct cvmx_sli_msi_rcv2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t intr:64;
-#else
- uint64_t intr:64;
-#endif
- } s;
- struct cvmx_sli_msi_rcv2_s cn61xx;
- struct cvmx_sli_msi_rcv2_s cn63xx;
- struct cvmx_sli_msi_rcv2_s cn63xxp1;
- struct cvmx_sli_msi_rcv2_s cn66xx;
- struct cvmx_sli_msi_rcv2_s cn68xx;
- struct cvmx_sli_msi_rcv2_s cn68xxp1;
- struct cvmx_sli_msi_rcv2_s cnf71xx;
-};
-
-union cvmx_sli_msi_rcv3 {
- uint64_t u64;
- struct cvmx_sli_msi_rcv3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t intr:64;
-#else
- uint64_t intr:64;
-#endif
- } s;
- struct cvmx_sli_msi_rcv3_s cn61xx;
- struct cvmx_sli_msi_rcv3_s cn63xx;
- struct cvmx_sli_msi_rcv3_s cn63xxp1;
- struct cvmx_sli_msi_rcv3_s cn66xx;
- struct cvmx_sli_msi_rcv3_s cn68xx;
- struct cvmx_sli_msi_rcv3_s cn68xxp1;
- struct cvmx_sli_msi_rcv3_s cnf71xx;
-};
-
-union cvmx_sli_msi_rd_map {
- uint64_t u64;
- struct cvmx_sli_msi_rd_map_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t rd_int:8;
- uint64_t msi_int:8;
-#else
- uint64_t msi_int:8;
- uint64_t rd_int:8;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_sli_msi_rd_map_s cn61xx;
- struct cvmx_sli_msi_rd_map_s cn63xx;
- struct cvmx_sli_msi_rd_map_s cn63xxp1;
- struct cvmx_sli_msi_rd_map_s cn66xx;
- struct cvmx_sli_msi_rd_map_s cn68xx;
- struct cvmx_sli_msi_rd_map_s cn68xxp1;
- struct cvmx_sli_msi_rd_map_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1c_enb0 {
- uint64_t u64;
- struct cvmx_sli_msi_w1c_enb0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t clr:64;
-#else
- uint64_t clr:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1c_enb0_s cn61xx;
- struct cvmx_sli_msi_w1c_enb0_s cn63xx;
- struct cvmx_sli_msi_w1c_enb0_s cn63xxp1;
- struct cvmx_sli_msi_w1c_enb0_s cn66xx;
- struct cvmx_sli_msi_w1c_enb0_s cn68xx;
- struct cvmx_sli_msi_w1c_enb0_s cn68xxp1;
- struct cvmx_sli_msi_w1c_enb0_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1c_enb1 {
- uint64_t u64;
- struct cvmx_sli_msi_w1c_enb1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t clr:64;
-#else
- uint64_t clr:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1c_enb1_s cn61xx;
- struct cvmx_sli_msi_w1c_enb1_s cn63xx;
- struct cvmx_sli_msi_w1c_enb1_s cn63xxp1;
- struct cvmx_sli_msi_w1c_enb1_s cn66xx;
- struct cvmx_sli_msi_w1c_enb1_s cn68xx;
- struct cvmx_sli_msi_w1c_enb1_s cn68xxp1;
- struct cvmx_sli_msi_w1c_enb1_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1c_enb2 {
- uint64_t u64;
- struct cvmx_sli_msi_w1c_enb2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t clr:64;
-#else
- uint64_t clr:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1c_enb2_s cn61xx;
- struct cvmx_sli_msi_w1c_enb2_s cn63xx;
- struct cvmx_sli_msi_w1c_enb2_s cn63xxp1;
- struct cvmx_sli_msi_w1c_enb2_s cn66xx;
- struct cvmx_sli_msi_w1c_enb2_s cn68xx;
- struct cvmx_sli_msi_w1c_enb2_s cn68xxp1;
- struct cvmx_sli_msi_w1c_enb2_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1c_enb3 {
- uint64_t u64;
- struct cvmx_sli_msi_w1c_enb3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t clr:64;
-#else
- uint64_t clr:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1c_enb3_s cn61xx;
- struct cvmx_sli_msi_w1c_enb3_s cn63xx;
- struct cvmx_sli_msi_w1c_enb3_s cn63xxp1;
- struct cvmx_sli_msi_w1c_enb3_s cn66xx;
- struct cvmx_sli_msi_w1c_enb3_s cn68xx;
- struct cvmx_sli_msi_w1c_enb3_s cn68xxp1;
- struct cvmx_sli_msi_w1c_enb3_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1s_enb0 {
- uint64_t u64;
- struct cvmx_sli_msi_w1s_enb0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t set:64;
-#else
- uint64_t set:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1s_enb0_s cn61xx;
- struct cvmx_sli_msi_w1s_enb0_s cn63xx;
- struct cvmx_sli_msi_w1s_enb0_s cn63xxp1;
- struct cvmx_sli_msi_w1s_enb0_s cn66xx;
- struct cvmx_sli_msi_w1s_enb0_s cn68xx;
- struct cvmx_sli_msi_w1s_enb0_s cn68xxp1;
- struct cvmx_sli_msi_w1s_enb0_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1s_enb1 {
- uint64_t u64;
- struct cvmx_sli_msi_w1s_enb1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t set:64;
-#else
- uint64_t set:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1s_enb1_s cn61xx;
- struct cvmx_sli_msi_w1s_enb1_s cn63xx;
- struct cvmx_sli_msi_w1s_enb1_s cn63xxp1;
- struct cvmx_sli_msi_w1s_enb1_s cn66xx;
- struct cvmx_sli_msi_w1s_enb1_s cn68xx;
- struct cvmx_sli_msi_w1s_enb1_s cn68xxp1;
- struct cvmx_sli_msi_w1s_enb1_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1s_enb2 {
- uint64_t u64;
- struct cvmx_sli_msi_w1s_enb2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t set:64;
-#else
- uint64_t set:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1s_enb2_s cn61xx;
- struct cvmx_sli_msi_w1s_enb2_s cn63xx;
- struct cvmx_sli_msi_w1s_enb2_s cn63xxp1;
- struct cvmx_sli_msi_w1s_enb2_s cn66xx;
- struct cvmx_sli_msi_w1s_enb2_s cn68xx;
- struct cvmx_sli_msi_w1s_enb2_s cn68xxp1;
- struct cvmx_sli_msi_w1s_enb2_s cnf71xx;
-};
-
-union cvmx_sli_msi_w1s_enb3 {
- uint64_t u64;
- struct cvmx_sli_msi_w1s_enb3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t set:64;
-#else
- uint64_t set:64;
-#endif
- } s;
- struct cvmx_sli_msi_w1s_enb3_s cn61xx;
- struct cvmx_sli_msi_w1s_enb3_s cn63xx;
- struct cvmx_sli_msi_w1s_enb3_s cn63xxp1;
- struct cvmx_sli_msi_w1s_enb3_s cn66xx;
- struct cvmx_sli_msi_w1s_enb3_s cn68xx;
- struct cvmx_sli_msi_w1s_enb3_s cn68xxp1;
- struct cvmx_sli_msi_w1s_enb3_s cnf71xx;
-};
-
-union cvmx_sli_msi_wr_map {
- uint64_t u64;
- struct cvmx_sli_msi_wr_map_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t ciu_int:8;
- uint64_t msi_int:8;
-#else
- uint64_t msi_int:8;
- uint64_t ciu_int:8;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_sli_msi_wr_map_s cn61xx;
- struct cvmx_sli_msi_wr_map_s cn63xx;
- struct cvmx_sli_msi_wr_map_s cn63xxp1;
- struct cvmx_sli_msi_wr_map_s cn66xx;
- struct cvmx_sli_msi_wr_map_s cn68xx;
- struct cvmx_sli_msi_wr_map_s cn68xxp1;
- struct cvmx_sli_msi_wr_map_s cnf71xx;
-};
-
-union cvmx_sli_pcie_msi_rcv {
- uint64_t u64;
- struct cvmx_sli_pcie_msi_rcv_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t intr:8;
-#else
- uint64_t intr:8;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_sli_pcie_msi_rcv_s cn61xx;
- struct cvmx_sli_pcie_msi_rcv_s cn63xx;
- struct cvmx_sli_pcie_msi_rcv_s cn63xxp1;
- struct cvmx_sli_pcie_msi_rcv_s cn66xx;
- struct cvmx_sli_pcie_msi_rcv_s cn68xx;
- struct cvmx_sli_pcie_msi_rcv_s cn68xxp1;
- struct cvmx_sli_pcie_msi_rcv_s cnf71xx;
-};
-
-union cvmx_sli_pcie_msi_rcv_b1 {
- uint64_t u64;
- struct cvmx_sli_pcie_msi_rcv_b1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t intr:8;
- uint64_t reserved_0_7:8;
-#else
- uint64_t reserved_0_7:8;
- uint64_t intr:8;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_sli_pcie_msi_rcv_b1_s cn61xx;
- struct cvmx_sli_pcie_msi_rcv_b1_s cn63xx;
- struct cvmx_sli_pcie_msi_rcv_b1_s cn63xxp1;
- struct cvmx_sli_pcie_msi_rcv_b1_s cn66xx;
- struct cvmx_sli_pcie_msi_rcv_b1_s cn68xx;
- struct cvmx_sli_pcie_msi_rcv_b1_s cn68xxp1;
- struct cvmx_sli_pcie_msi_rcv_b1_s cnf71xx;
-};
-
-union cvmx_sli_pcie_msi_rcv_b2 {
- uint64_t u64;
- struct cvmx_sli_pcie_msi_rcv_b2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_24_63:40;
- uint64_t intr:8;
- uint64_t reserved_0_15:16;
-#else
- uint64_t reserved_0_15:16;
- uint64_t intr:8;
- uint64_t reserved_24_63:40;
-#endif
- } s;
- struct cvmx_sli_pcie_msi_rcv_b2_s cn61xx;
- struct cvmx_sli_pcie_msi_rcv_b2_s cn63xx;
- struct cvmx_sli_pcie_msi_rcv_b2_s cn63xxp1;
- struct cvmx_sli_pcie_msi_rcv_b2_s cn66xx;
- struct cvmx_sli_pcie_msi_rcv_b2_s cn68xx;
- struct cvmx_sli_pcie_msi_rcv_b2_s cn68xxp1;
- struct cvmx_sli_pcie_msi_rcv_b2_s cnf71xx;
-};
-
-union cvmx_sli_pcie_msi_rcv_b3 {
- uint64_t u64;
- struct cvmx_sli_pcie_msi_rcv_b3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t intr:8;
- uint64_t reserved_0_23:24;
-#else
- uint64_t reserved_0_23:24;
- uint64_t intr:8;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pcie_msi_rcv_b3_s cn61xx;
- struct cvmx_sli_pcie_msi_rcv_b3_s cn63xx;
- struct cvmx_sli_pcie_msi_rcv_b3_s cn63xxp1;
- struct cvmx_sli_pcie_msi_rcv_b3_s cn66xx;
- struct cvmx_sli_pcie_msi_rcv_b3_s cn68xx;
- struct cvmx_sli_pcie_msi_rcv_b3_s cn68xxp1;
- struct cvmx_sli_pcie_msi_rcv_b3_s cnf71xx;
-};
-
-union cvmx_sli_pktx_cnts {
- uint64_t u64;
- struct cvmx_sli_pktx_cnts_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t timer:22;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t timer:22;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_sli_pktx_cnts_s cn61xx;
- struct cvmx_sli_pktx_cnts_s cn63xx;
- struct cvmx_sli_pktx_cnts_s cn63xxp1;
- struct cvmx_sli_pktx_cnts_s cn66xx;
- struct cvmx_sli_pktx_cnts_s cn68xx;
- struct cvmx_sli_pktx_cnts_s cn68xxp1;
- struct cvmx_sli_pktx_cnts_s cnf71xx;
-};
-
-union cvmx_sli_pktx_in_bp {
- uint64_t u64;
- struct cvmx_sli_pktx_in_bp_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t wmark:32;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t wmark:32;
-#endif
- } s;
- struct cvmx_sli_pktx_in_bp_s cn61xx;
- struct cvmx_sli_pktx_in_bp_s cn63xx;
- struct cvmx_sli_pktx_in_bp_s cn63xxp1;
- struct cvmx_sli_pktx_in_bp_s cn66xx;
- struct cvmx_sli_pktx_in_bp_s cnf71xx;
-};
-
-union cvmx_sli_pktx_instr_baddr {
- uint64_t u64;
- struct cvmx_sli_pktx_instr_baddr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t addr:61;
- uint64_t reserved_0_2:3;
-#else
- uint64_t reserved_0_2:3;
- uint64_t addr:61;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_14_63:50,
+ __BITFIELD_FIELD(uint64_t max_word:4,
+ __BITFIELD_FIELD(uint64_t timer:10,
+ ;)))
} s;
- struct cvmx_sli_pktx_instr_baddr_s cn61xx;
- struct cvmx_sli_pktx_instr_baddr_s cn63xx;
- struct cvmx_sli_pktx_instr_baddr_s cn63xxp1;
- struct cvmx_sli_pktx_instr_baddr_s cn66xx;
- struct cvmx_sli_pktx_instr_baddr_s cn68xx;
- struct cvmx_sli_pktx_instr_baddr_s cn68xxp1;
- struct cvmx_sli_pktx_instr_baddr_s cnf71xx;
-};
-
-union cvmx_sli_pktx_instr_baoff_dbell {
- uint64_t u64;
- struct cvmx_sli_pktx_instr_baoff_dbell_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t aoff:32;
- uint64_t dbell:32;
-#else
- uint64_t dbell:32;
- uint64_t aoff:32;
-#endif
- } s;
- struct cvmx_sli_pktx_instr_baoff_dbell_s cn61xx;
- struct cvmx_sli_pktx_instr_baoff_dbell_s cn63xx;
- struct cvmx_sli_pktx_instr_baoff_dbell_s cn63xxp1;
- struct cvmx_sli_pktx_instr_baoff_dbell_s cn66xx;
- struct cvmx_sli_pktx_instr_baoff_dbell_s cn68xx;
- struct cvmx_sli_pktx_instr_baoff_dbell_s cn68xxp1;
- struct cvmx_sli_pktx_instr_baoff_dbell_s cnf71xx;
-};
-
-union cvmx_sli_pktx_instr_fifo_rsize {
- uint64_t u64;
- struct cvmx_sli_pktx_instr_fifo_rsize_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t max:9;
- uint64_t rrp:9;
- uint64_t wrp:9;
- uint64_t fcnt:5;
- uint64_t rsize:32;
-#else
- uint64_t rsize:32;
- uint64_t fcnt:5;
- uint64_t wrp:9;
- uint64_t rrp:9;
- uint64_t max:9;
-#endif
- } s;
- struct cvmx_sli_pktx_instr_fifo_rsize_s cn61xx;
- struct cvmx_sli_pktx_instr_fifo_rsize_s cn63xx;
- struct cvmx_sli_pktx_instr_fifo_rsize_s cn63xxp1;
- struct cvmx_sli_pktx_instr_fifo_rsize_s cn66xx;
- struct cvmx_sli_pktx_instr_fifo_rsize_s cn68xx;
- struct cvmx_sli_pktx_instr_fifo_rsize_s cn68xxp1;
- struct cvmx_sli_pktx_instr_fifo_rsize_s cnf71xx;
-};
-
-union cvmx_sli_pktx_instr_header {
- uint64_t u64;
- struct cvmx_sli_pktx_instr_header_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_44_63:20;
- uint64_t pbp:1;
- uint64_t reserved_38_42:5;
- uint64_t rparmode:2;
- uint64_t reserved_35_35:1;
- uint64_t rskp_len:7;
- uint64_t rngrpext:2;
- uint64_t rnqos:1;
- uint64_t rngrp:1;
- uint64_t rntt:1;
- uint64_t rntag:1;
- uint64_t use_ihdr:1;
- uint64_t reserved_16_20:5;
- uint64_t par_mode:2;
- uint64_t reserved_13_13:1;
- uint64_t skp_len:7;
- uint64_t ngrpext:2;
- uint64_t nqos:1;
- uint64_t ngrp:1;
- uint64_t ntt:1;
- uint64_t ntag:1;
-#else
- uint64_t ntag:1;
- uint64_t ntt:1;
- uint64_t ngrp:1;
- uint64_t nqos:1;
- uint64_t ngrpext:2;
- uint64_t skp_len:7;
- uint64_t reserved_13_13:1;
- uint64_t par_mode:2;
- uint64_t reserved_16_20:5;
- uint64_t use_ihdr:1;
- uint64_t rntag:1;
- uint64_t rntt:1;
- uint64_t rngrp:1;
- uint64_t rnqos:1;
- uint64_t rngrpext:2;
- uint64_t rskp_len:7;
- uint64_t reserved_35_35:1;
- uint64_t rparmode:2;
- uint64_t reserved_38_42:5;
- uint64_t pbp:1;
- uint64_t reserved_44_63:20;
-#endif
- } s;
- struct cvmx_sli_pktx_instr_header_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_44_63:20;
- uint64_t pbp:1;
- uint64_t reserved_38_42:5;
- uint64_t rparmode:2;
- uint64_t reserved_35_35:1;
- uint64_t rskp_len:7;
- uint64_t reserved_26_27:2;
- uint64_t rnqos:1;
- uint64_t rngrp:1;
- uint64_t rntt:1;
- uint64_t rntag:1;
- uint64_t use_ihdr:1;
- uint64_t reserved_16_20:5;
- uint64_t par_mode:2;
- uint64_t reserved_13_13:1;
- uint64_t skp_len:7;
- uint64_t reserved_4_5:2;
- uint64_t nqos:1;
- uint64_t ngrp:1;
- uint64_t ntt:1;
- uint64_t ntag:1;
-#else
- uint64_t ntag:1;
- uint64_t ntt:1;
- uint64_t ngrp:1;
- uint64_t nqos:1;
- uint64_t reserved_4_5:2;
- uint64_t skp_len:7;
- uint64_t reserved_13_13:1;
- uint64_t par_mode:2;
- uint64_t reserved_16_20:5;
- uint64_t use_ihdr:1;
- uint64_t rntag:1;
- uint64_t rntt:1;
- uint64_t rngrp:1;
- uint64_t rnqos:1;
- uint64_t reserved_26_27:2;
- uint64_t rskp_len:7;
- uint64_t reserved_35_35:1;
- uint64_t rparmode:2;
- uint64_t reserved_38_42:5;
- uint64_t pbp:1;
- uint64_t reserved_44_63:20;
-#endif
- } cn61xx;
- struct cvmx_sli_pktx_instr_header_cn61xx cn63xx;
- struct cvmx_sli_pktx_instr_header_cn61xx cn63xxp1;
- struct cvmx_sli_pktx_instr_header_cn61xx cn66xx;
- struct cvmx_sli_pktx_instr_header_s cn68xx;
- struct cvmx_sli_pktx_instr_header_cn61xx cn68xxp1;
- struct cvmx_sli_pktx_instr_header_cn61xx cnf71xx;
-};
-
-union cvmx_sli_pktx_out_size {
- uint64_t u64;
- struct cvmx_sli_pktx_out_size_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_23_63:41;
- uint64_t isize:7;
- uint64_t bsize:16;
-#else
- uint64_t bsize:16;
- uint64_t isize:7;
- uint64_t reserved_23_63:41;
-#endif
- } s;
- struct cvmx_sli_pktx_out_size_s cn61xx;
- struct cvmx_sli_pktx_out_size_s cn63xx;
- struct cvmx_sli_pktx_out_size_s cn63xxp1;
- struct cvmx_sli_pktx_out_size_s cn66xx;
- struct cvmx_sli_pktx_out_size_s cn68xx;
- struct cvmx_sli_pktx_out_size_s cn68xxp1;
- struct cvmx_sli_pktx_out_size_s cnf71xx;
-};
-
-union cvmx_sli_pktx_slist_baddr {
- uint64_t u64;
- struct cvmx_sli_pktx_slist_baddr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t addr:60;
- uint64_t reserved_0_3:4;
-#else
- uint64_t reserved_0_3:4;
- uint64_t addr:60;
-#endif
- } s;
- struct cvmx_sli_pktx_slist_baddr_s cn61xx;
- struct cvmx_sli_pktx_slist_baddr_s cn63xx;
- struct cvmx_sli_pktx_slist_baddr_s cn63xxp1;
- struct cvmx_sli_pktx_slist_baddr_s cn66xx;
- struct cvmx_sli_pktx_slist_baddr_s cn68xx;
- struct cvmx_sli_pktx_slist_baddr_s cn68xxp1;
- struct cvmx_sli_pktx_slist_baddr_s cnf71xx;
-};
-
-union cvmx_sli_pktx_slist_baoff_dbell {
- uint64_t u64;
- struct cvmx_sli_pktx_slist_baoff_dbell_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t aoff:32;
- uint64_t dbell:32;
-#else
- uint64_t dbell:32;
- uint64_t aoff:32;
-#endif
- } s;
- struct cvmx_sli_pktx_slist_baoff_dbell_s cn61xx;
- struct cvmx_sli_pktx_slist_baoff_dbell_s cn63xx;
- struct cvmx_sli_pktx_slist_baoff_dbell_s cn63xxp1;
- struct cvmx_sli_pktx_slist_baoff_dbell_s cn66xx;
- struct cvmx_sli_pktx_slist_baoff_dbell_s cn68xx;
- struct cvmx_sli_pktx_slist_baoff_dbell_s cn68xxp1;
- struct cvmx_sli_pktx_slist_baoff_dbell_s cnf71xx;
-};
-
-union cvmx_sli_pktx_slist_fifo_rsize {
- uint64_t u64;
- struct cvmx_sli_pktx_slist_fifo_rsize_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t rsize:32;
-#else
- uint64_t rsize:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pktx_slist_fifo_rsize_s cn61xx;
- struct cvmx_sli_pktx_slist_fifo_rsize_s cn63xx;
- struct cvmx_sli_pktx_slist_fifo_rsize_s cn63xxp1;
- struct cvmx_sli_pktx_slist_fifo_rsize_s cn66xx;
- struct cvmx_sli_pktx_slist_fifo_rsize_s cn68xx;
- struct cvmx_sli_pktx_slist_fifo_rsize_s cn68xxp1;
- struct cvmx_sli_pktx_slist_fifo_rsize_s cnf71xx;
-};
-
-union cvmx_sli_pkt_cnt_int {
- uint64_t u64;
- struct cvmx_sli_pkt_cnt_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t port:32;
-#else
- uint64_t port:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_cnt_int_s cn61xx;
- struct cvmx_sli_pkt_cnt_int_s cn63xx;
- struct cvmx_sli_pkt_cnt_int_s cn63xxp1;
- struct cvmx_sli_pkt_cnt_int_s cn66xx;
- struct cvmx_sli_pkt_cnt_int_s cn68xx;
- struct cvmx_sli_pkt_cnt_int_s cn68xxp1;
- struct cvmx_sli_pkt_cnt_int_s cnf71xx;
-};
-
-union cvmx_sli_pkt_cnt_int_enb {
- uint64_t u64;
- struct cvmx_sli_pkt_cnt_int_enb_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t port:32;
-#else
- uint64_t port:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_cnt_int_enb_s cn61xx;
- struct cvmx_sli_pkt_cnt_int_enb_s cn63xx;
- struct cvmx_sli_pkt_cnt_int_enb_s cn63xxp1;
- struct cvmx_sli_pkt_cnt_int_enb_s cn66xx;
- struct cvmx_sli_pkt_cnt_int_enb_s cn68xx;
- struct cvmx_sli_pkt_cnt_int_enb_s cn68xxp1;
- struct cvmx_sli_pkt_cnt_int_enb_s cnf71xx;
-};
-
-union cvmx_sli_pkt_ctl {
- uint64_t u64;
- struct cvmx_sli_pkt_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t ring_en:1;
- uint64_t pkt_bp:4;
-#else
- uint64_t pkt_bp:4;
- uint64_t ring_en:1;
- uint64_t reserved_5_63:59;
-#endif
- } s;
- struct cvmx_sli_pkt_ctl_s cn61xx;
- struct cvmx_sli_pkt_ctl_s cn63xx;
- struct cvmx_sli_pkt_ctl_s cn63xxp1;
- struct cvmx_sli_pkt_ctl_s cn66xx;
- struct cvmx_sli_pkt_ctl_s cn68xx;
- struct cvmx_sli_pkt_ctl_s cn68xxp1;
- struct cvmx_sli_pkt_ctl_s cnf71xx;
-};
-
-union cvmx_sli_pkt_data_out_es {
- uint64_t u64;
- struct cvmx_sli_pkt_data_out_es_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t es:64;
-#else
- uint64_t es:64;
-#endif
- } s;
- struct cvmx_sli_pkt_data_out_es_s cn61xx;
- struct cvmx_sli_pkt_data_out_es_s cn63xx;
- struct cvmx_sli_pkt_data_out_es_s cn63xxp1;
- struct cvmx_sli_pkt_data_out_es_s cn66xx;
- struct cvmx_sli_pkt_data_out_es_s cn68xx;
- struct cvmx_sli_pkt_data_out_es_s cn68xxp1;
- struct cvmx_sli_pkt_data_out_es_s cnf71xx;
-};
-
-union cvmx_sli_pkt_data_out_ns {
- uint64_t u64;
- struct cvmx_sli_pkt_data_out_ns_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t nsr:32;
-#else
- uint64_t nsr:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_data_out_ns_s cn61xx;
- struct cvmx_sli_pkt_data_out_ns_s cn63xx;
- struct cvmx_sli_pkt_data_out_ns_s cn63xxp1;
- struct cvmx_sli_pkt_data_out_ns_s cn66xx;
- struct cvmx_sli_pkt_data_out_ns_s cn68xx;
- struct cvmx_sli_pkt_data_out_ns_s cn68xxp1;
- struct cvmx_sli_pkt_data_out_ns_s cnf71xx;
-};
-
-union cvmx_sli_pkt_data_out_ror {
- uint64_t u64;
- struct cvmx_sli_pkt_data_out_ror_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t ror:32;
-#else
- uint64_t ror:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_data_out_ror_s cn61xx;
- struct cvmx_sli_pkt_data_out_ror_s cn63xx;
- struct cvmx_sli_pkt_data_out_ror_s cn63xxp1;
- struct cvmx_sli_pkt_data_out_ror_s cn66xx;
- struct cvmx_sli_pkt_data_out_ror_s cn68xx;
- struct cvmx_sli_pkt_data_out_ror_s cn68xxp1;
- struct cvmx_sli_pkt_data_out_ror_s cnf71xx;
-};
-
-union cvmx_sli_pkt_dpaddr {
- uint64_t u64;
- struct cvmx_sli_pkt_dpaddr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t dptr:32;
-#else
- uint64_t dptr:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_dpaddr_s cn61xx;
- struct cvmx_sli_pkt_dpaddr_s cn63xx;
- struct cvmx_sli_pkt_dpaddr_s cn63xxp1;
- struct cvmx_sli_pkt_dpaddr_s cn66xx;
- struct cvmx_sli_pkt_dpaddr_s cn68xx;
- struct cvmx_sli_pkt_dpaddr_s cn68xxp1;
- struct cvmx_sli_pkt_dpaddr_s cnf71xx;
-};
-
-union cvmx_sli_pkt_in_bp {
- uint64_t u64;
- struct cvmx_sli_pkt_in_bp_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t bp:32;
-#else
- uint64_t bp:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_in_bp_s cn61xx;
- struct cvmx_sli_pkt_in_bp_s cn63xx;
- struct cvmx_sli_pkt_in_bp_s cn63xxp1;
- struct cvmx_sli_pkt_in_bp_s cn66xx;
- struct cvmx_sli_pkt_in_bp_s cnf71xx;
-};
-
-union cvmx_sli_pkt_in_donex_cnts {
- uint64_t u64;
- struct cvmx_sli_pkt_in_donex_cnts_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_in_donex_cnts_s cn61xx;
- struct cvmx_sli_pkt_in_donex_cnts_s cn63xx;
- struct cvmx_sli_pkt_in_donex_cnts_s cn63xxp1;
- struct cvmx_sli_pkt_in_donex_cnts_s cn66xx;
- struct cvmx_sli_pkt_in_donex_cnts_s cn68xx;
- struct cvmx_sli_pkt_in_donex_cnts_s cn68xxp1;
- struct cvmx_sli_pkt_in_donex_cnts_s cnf71xx;
-};
-
-union cvmx_sli_pkt_in_instr_counts {
- uint64_t u64;
- struct cvmx_sli_pkt_in_instr_counts_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t wr_cnt:32;
- uint64_t rd_cnt:32;
-#else
- uint64_t rd_cnt:32;
- uint64_t wr_cnt:32;
-#endif
- } s;
- struct cvmx_sli_pkt_in_instr_counts_s cn61xx;
- struct cvmx_sli_pkt_in_instr_counts_s cn63xx;
- struct cvmx_sli_pkt_in_instr_counts_s cn63xxp1;
- struct cvmx_sli_pkt_in_instr_counts_s cn66xx;
- struct cvmx_sli_pkt_in_instr_counts_s cn68xx;
- struct cvmx_sli_pkt_in_instr_counts_s cn68xxp1;
- struct cvmx_sli_pkt_in_instr_counts_s cnf71xx;
-};
-
-union cvmx_sli_pkt_in_pcie_port {
- uint64_t u64;
- struct cvmx_sli_pkt_in_pcie_port_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t pp:64;
-#else
- uint64_t pp:64;
-#endif
- } s;
- struct cvmx_sli_pkt_in_pcie_port_s cn61xx;
- struct cvmx_sli_pkt_in_pcie_port_s cn63xx;
- struct cvmx_sli_pkt_in_pcie_port_s cn63xxp1;
- struct cvmx_sli_pkt_in_pcie_port_s cn66xx;
- struct cvmx_sli_pkt_in_pcie_port_s cn68xx;
- struct cvmx_sli_pkt_in_pcie_port_s cn68xxp1;
- struct cvmx_sli_pkt_in_pcie_port_s cnf71xx;
-};
-
-union cvmx_sli_pkt_input_control {
- uint64_t u64;
- struct cvmx_sli_pkt_input_control_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t prd_erst:1;
- uint64_t prd_rds:7;
- uint64_t gii_erst:1;
- uint64_t gii_rds:7;
- uint64_t reserved_41_47:7;
- uint64_t prc_idle:1;
- uint64_t reserved_24_39:16;
- uint64_t pin_rst:1;
- uint64_t pkt_rr:1;
- uint64_t pbp_dhi:13;
- uint64_t d_nsr:1;
- uint64_t d_esr:2;
- uint64_t d_ror:1;
- uint64_t use_csr:1;
- uint64_t nsr:1;
- uint64_t esr:2;
- uint64_t ror:1;
-#else
- uint64_t ror:1;
- uint64_t esr:2;
- uint64_t nsr:1;
- uint64_t use_csr:1;
- uint64_t d_ror:1;
- uint64_t d_esr:2;
- uint64_t d_nsr:1;
- uint64_t pbp_dhi:13;
- uint64_t pkt_rr:1;
- uint64_t pin_rst:1;
- uint64_t reserved_24_39:16;
- uint64_t prc_idle:1;
- uint64_t reserved_41_47:7;
- uint64_t gii_rds:7;
- uint64_t gii_erst:1;
- uint64_t prd_rds:7;
- uint64_t prd_erst:1;
-#endif
- } s;
- struct cvmx_sli_pkt_input_control_s cn61xx;
- struct cvmx_sli_pkt_input_control_cn63xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_23_63:41;
- uint64_t pkt_rr:1;
- uint64_t pbp_dhi:13;
- uint64_t d_nsr:1;
- uint64_t d_esr:2;
- uint64_t d_ror:1;
- uint64_t use_csr:1;
- uint64_t nsr:1;
- uint64_t esr:2;
- uint64_t ror:1;
-#else
- uint64_t ror:1;
- uint64_t esr:2;
- uint64_t nsr:1;
- uint64_t use_csr:1;
- uint64_t d_ror:1;
- uint64_t d_esr:2;
- uint64_t d_nsr:1;
- uint64_t pbp_dhi:13;
- uint64_t pkt_rr:1;
- uint64_t reserved_23_63:41;
-#endif
- } cn63xx;
- struct cvmx_sli_pkt_input_control_cn63xx cn63xxp1;
- struct cvmx_sli_pkt_input_control_s cn66xx;
- struct cvmx_sli_pkt_input_control_s cn68xx;
- struct cvmx_sli_pkt_input_control_s cn68xxp1;
- struct cvmx_sli_pkt_input_control_s cnf71xx;
-};
-
-union cvmx_sli_pkt_instr_enb {
- uint64_t u64;
- struct cvmx_sli_pkt_instr_enb_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t enb:32;
-#else
- uint64_t enb:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_instr_enb_s cn61xx;
- struct cvmx_sli_pkt_instr_enb_s cn63xx;
- struct cvmx_sli_pkt_instr_enb_s cn63xxp1;
- struct cvmx_sli_pkt_instr_enb_s cn66xx;
- struct cvmx_sli_pkt_instr_enb_s cn68xx;
- struct cvmx_sli_pkt_instr_enb_s cn68xxp1;
- struct cvmx_sli_pkt_instr_enb_s cnf71xx;
-};
-
-union cvmx_sli_pkt_instr_rd_size {
- uint64_t u64;
- struct cvmx_sli_pkt_instr_rd_size_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rdsize:64;
-#else
- uint64_t rdsize:64;
-#endif
- } s;
- struct cvmx_sli_pkt_instr_rd_size_s cn61xx;
- struct cvmx_sli_pkt_instr_rd_size_s cn63xx;
- struct cvmx_sli_pkt_instr_rd_size_s cn63xxp1;
- struct cvmx_sli_pkt_instr_rd_size_s cn66xx;
- struct cvmx_sli_pkt_instr_rd_size_s cn68xx;
- struct cvmx_sli_pkt_instr_rd_size_s cn68xxp1;
- struct cvmx_sli_pkt_instr_rd_size_s cnf71xx;
-};
-
-union cvmx_sli_pkt_instr_size {
- uint64_t u64;
- struct cvmx_sli_pkt_instr_size_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t is_64b:32;
-#else
- uint64_t is_64b:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_instr_size_s cn61xx;
- struct cvmx_sli_pkt_instr_size_s cn63xx;
- struct cvmx_sli_pkt_instr_size_s cn63xxp1;
- struct cvmx_sli_pkt_instr_size_s cn66xx;
- struct cvmx_sli_pkt_instr_size_s cn68xx;
- struct cvmx_sli_pkt_instr_size_s cn68xxp1;
- struct cvmx_sli_pkt_instr_size_s cnf71xx;
-};
-
-union cvmx_sli_pkt_int_levels {
- uint64_t u64;
- struct cvmx_sli_pkt_int_levels_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t time:22;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t time:22;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_sli_pkt_int_levels_s cn61xx;
- struct cvmx_sli_pkt_int_levels_s cn63xx;
- struct cvmx_sli_pkt_int_levels_s cn63xxp1;
- struct cvmx_sli_pkt_int_levels_s cn66xx;
- struct cvmx_sli_pkt_int_levels_s cn68xx;
- struct cvmx_sli_pkt_int_levels_s cn68xxp1;
- struct cvmx_sli_pkt_int_levels_s cnf71xx;
-};
-
-union cvmx_sli_pkt_iptr {
- uint64_t u64;
- struct cvmx_sli_pkt_iptr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t iptr:32;
-#else
- uint64_t iptr:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_iptr_s cn61xx;
- struct cvmx_sli_pkt_iptr_s cn63xx;
- struct cvmx_sli_pkt_iptr_s cn63xxp1;
- struct cvmx_sli_pkt_iptr_s cn66xx;
- struct cvmx_sli_pkt_iptr_s cn68xx;
- struct cvmx_sli_pkt_iptr_s cn68xxp1;
- struct cvmx_sli_pkt_iptr_s cnf71xx;
-};
-
-union cvmx_sli_pkt_out_bmode {
- uint64_t u64;
- struct cvmx_sli_pkt_out_bmode_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t bmode:32;
-#else
- uint64_t bmode:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_out_bmode_s cn61xx;
- struct cvmx_sli_pkt_out_bmode_s cn63xx;
- struct cvmx_sli_pkt_out_bmode_s cn63xxp1;
- struct cvmx_sli_pkt_out_bmode_s cn66xx;
- struct cvmx_sli_pkt_out_bmode_s cn68xx;
- struct cvmx_sli_pkt_out_bmode_s cn68xxp1;
- struct cvmx_sli_pkt_out_bmode_s cnf71xx;
-};
-
-union cvmx_sli_pkt_out_bp_en {
- uint64_t u64;
- struct cvmx_sli_pkt_out_bp_en_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t bp_en:32;
-#else
- uint64_t bp_en:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_out_bp_en_s cn68xx;
- struct cvmx_sli_pkt_out_bp_en_s cn68xxp1;
-};
-
-union cvmx_sli_pkt_out_enb {
- uint64_t u64;
- struct cvmx_sli_pkt_out_enb_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t enb:32;
-#else
- uint64_t enb:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_out_enb_s cn61xx;
- struct cvmx_sli_pkt_out_enb_s cn63xx;
- struct cvmx_sli_pkt_out_enb_s cn63xxp1;
- struct cvmx_sli_pkt_out_enb_s cn66xx;
- struct cvmx_sli_pkt_out_enb_s cn68xx;
- struct cvmx_sli_pkt_out_enb_s cn68xxp1;
- struct cvmx_sli_pkt_out_enb_s cnf71xx;
-};
-
-union cvmx_sli_pkt_output_wmark {
- uint64_t u64;
- struct cvmx_sli_pkt_output_wmark_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wmark:32;
-#else
- uint64_t wmark:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_output_wmark_s cn61xx;
- struct cvmx_sli_pkt_output_wmark_s cn63xx;
- struct cvmx_sli_pkt_output_wmark_s cn63xxp1;
- struct cvmx_sli_pkt_output_wmark_s cn66xx;
- struct cvmx_sli_pkt_output_wmark_s cn68xx;
- struct cvmx_sli_pkt_output_wmark_s cn68xxp1;
- struct cvmx_sli_pkt_output_wmark_s cnf71xx;
-};
-
-union cvmx_sli_pkt_pcie_port {
- uint64_t u64;
- struct cvmx_sli_pkt_pcie_port_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t pp:64;
-#else
- uint64_t pp:64;
-#endif
- } s;
- struct cvmx_sli_pkt_pcie_port_s cn61xx;
- struct cvmx_sli_pkt_pcie_port_s cn63xx;
- struct cvmx_sli_pkt_pcie_port_s cn63xxp1;
- struct cvmx_sli_pkt_pcie_port_s cn66xx;
- struct cvmx_sli_pkt_pcie_port_s cn68xx;
- struct cvmx_sli_pkt_pcie_port_s cn68xxp1;
- struct cvmx_sli_pkt_pcie_port_s cnf71xx;
-};
-
-union cvmx_sli_pkt_port_in_rst {
- uint64_t u64;
- struct cvmx_sli_pkt_port_in_rst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t in_rst:32;
- uint64_t out_rst:32;
-#else
- uint64_t out_rst:32;
- uint64_t in_rst:32;
-#endif
- } s;
- struct cvmx_sli_pkt_port_in_rst_s cn61xx;
- struct cvmx_sli_pkt_port_in_rst_s cn63xx;
- struct cvmx_sli_pkt_port_in_rst_s cn63xxp1;
- struct cvmx_sli_pkt_port_in_rst_s cn66xx;
- struct cvmx_sli_pkt_port_in_rst_s cn68xx;
- struct cvmx_sli_pkt_port_in_rst_s cn68xxp1;
- struct cvmx_sli_pkt_port_in_rst_s cnf71xx;
-};
-
-union cvmx_sli_pkt_slist_es {
- uint64_t u64;
- struct cvmx_sli_pkt_slist_es_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t es:64;
-#else
- uint64_t es:64;
-#endif
- } s;
- struct cvmx_sli_pkt_slist_es_s cn61xx;
- struct cvmx_sli_pkt_slist_es_s cn63xx;
- struct cvmx_sli_pkt_slist_es_s cn63xxp1;
- struct cvmx_sli_pkt_slist_es_s cn66xx;
- struct cvmx_sli_pkt_slist_es_s cn68xx;
- struct cvmx_sli_pkt_slist_es_s cn68xxp1;
- struct cvmx_sli_pkt_slist_es_s cnf71xx;
-};
-
-union cvmx_sli_pkt_slist_ns {
- uint64_t u64;
- struct cvmx_sli_pkt_slist_ns_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t nsr:32;
-#else
- uint64_t nsr:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_slist_ns_s cn61xx;
- struct cvmx_sli_pkt_slist_ns_s cn63xx;
- struct cvmx_sli_pkt_slist_ns_s cn63xxp1;
- struct cvmx_sli_pkt_slist_ns_s cn66xx;
- struct cvmx_sli_pkt_slist_ns_s cn68xx;
- struct cvmx_sli_pkt_slist_ns_s cn68xxp1;
- struct cvmx_sli_pkt_slist_ns_s cnf71xx;
-};
-
-union cvmx_sli_pkt_slist_ror {
- uint64_t u64;
- struct cvmx_sli_pkt_slist_ror_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t ror:32;
-#else
- uint64_t ror:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_slist_ror_s cn61xx;
- struct cvmx_sli_pkt_slist_ror_s cn63xx;
- struct cvmx_sli_pkt_slist_ror_s cn63xxp1;
- struct cvmx_sli_pkt_slist_ror_s cn66xx;
- struct cvmx_sli_pkt_slist_ror_s cn68xx;
- struct cvmx_sli_pkt_slist_ror_s cn68xxp1;
- struct cvmx_sli_pkt_slist_ror_s cnf71xx;
-};
-
-union cvmx_sli_pkt_time_int {
- uint64_t u64;
- struct cvmx_sli_pkt_time_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t port:32;
-#else
- uint64_t port:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_time_int_s cn61xx;
- struct cvmx_sli_pkt_time_int_s cn63xx;
- struct cvmx_sli_pkt_time_int_s cn63xxp1;
- struct cvmx_sli_pkt_time_int_s cn66xx;
- struct cvmx_sli_pkt_time_int_s cn68xx;
- struct cvmx_sli_pkt_time_int_s cn68xxp1;
- struct cvmx_sli_pkt_time_int_s cnf71xx;
-};
-
-union cvmx_sli_pkt_time_int_enb {
- uint64_t u64;
- struct cvmx_sli_pkt_time_int_enb_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t port:32;
-#else
- uint64_t port:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_sli_pkt_time_int_enb_s cn61xx;
- struct cvmx_sli_pkt_time_int_enb_s cn63xx;
- struct cvmx_sli_pkt_time_int_enb_s cn63xxp1;
- struct cvmx_sli_pkt_time_int_enb_s cn66xx;
- struct cvmx_sli_pkt_time_int_enb_s cn68xx;
- struct cvmx_sli_pkt_time_int_enb_s cn68xxp1;
- struct cvmx_sli_pkt_time_int_enb_s cnf71xx;
-};
-
-union cvmx_sli_portx_pkind {
- uint64_t u64;
- struct cvmx_sli_portx_pkind_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_25_63:39;
- uint64_t rpk_enb:1;
- uint64_t reserved_22_23:2;
- uint64_t pkindr:6;
- uint64_t reserved_14_15:2;
- uint64_t bpkind:6;
- uint64_t reserved_6_7:2;
- uint64_t pkind:6;
-#else
- uint64_t pkind:6;
- uint64_t reserved_6_7:2;
- uint64_t bpkind:6;
- uint64_t reserved_14_15:2;
- uint64_t pkindr:6;
- uint64_t reserved_22_23:2;
- uint64_t rpk_enb:1;
- uint64_t reserved_25_63:39;
-#endif
- } s;
- struct cvmx_sli_portx_pkind_s cn68xx;
- struct cvmx_sli_portx_pkind_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_14_63:50;
- uint64_t bpkind:6;
- uint64_t reserved_6_7:2;
- uint64_t pkind:6;
-#else
- uint64_t pkind:6;
- uint64_t reserved_6_7:2;
- uint64_t bpkind:6;
- uint64_t reserved_14_63:50;
-#endif
- } cn68xxp1;
};
union cvmx_sli_s2m_portx_ctl {
uint64_t u64;
struct cvmx_sli_s2m_portx_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t wind_d:1;
- uint64_t bar0_d:1;
- uint64_t mrrs:3;
-#else
- uint64_t mrrs:3;
- uint64_t bar0_d:1;
- uint64_t wind_d:1;
- uint64_t reserved_5_63:59;
-#endif
- } s;
- struct cvmx_sli_s2m_portx_ctl_s cn61xx;
- struct cvmx_sli_s2m_portx_ctl_s cn63xx;
- struct cvmx_sli_s2m_portx_ctl_s cn63xxp1;
- struct cvmx_sli_s2m_portx_ctl_s cn66xx;
- struct cvmx_sli_s2m_portx_ctl_s cn68xx;
- struct cvmx_sli_s2m_portx_ctl_s cn68xxp1;
- struct cvmx_sli_s2m_portx_ctl_s cnf71xx;
-};
-
-union cvmx_sli_scratch_1 {
- uint64_t u64;
- struct cvmx_sli_scratch_1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t data:64;
-#else
- uint64_t data:64;
-#endif
- } s;
- struct cvmx_sli_scratch_1_s cn61xx;
- struct cvmx_sli_scratch_1_s cn63xx;
- struct cvmx_sli_scratch_1_s cn63xxp1;
- struct cvmx_sli_scratch_1_s cn66xx;
- struct cvmx_sli_scratch_1_s cn68xx;
- struct cvmx_sli_scratch_1_s cn68xxp1;
- struct cvmx_sli_scratch_1_s cnf71xx;
-};
-
-union cvmx_sli_scratch_2 {
- uint64_t u64;
- struct cvmx_sli_scratch_2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t data:64;
-#else
- uint64_t data:64;
-#endif
- } s;
- struct cvmx_sli_scratch_2_s cn61xx;
- struct cvmx_sli_scratch_2_s cn63xx;
- struct cvmx_sli_scratch_2_s cn63xxp1;
- struct cvmx_sli_scratch_2_s cn66xx;
- struct cvmx_sli_scratch_2_s cn68xx;
- struct cvmx_sli_scratch_2_s cn68xxp1;
- struct cvmx_sli_scratch_2_s cnf71xx;
-};
-
-union cvmx_sli_state1 {
- uint64_t u64;
- struct cvmx_sli_state1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t cpl1:12;
- uint64_t cpl0:12;
- uint64_t arb:1;
- uint64_t csr:39;
-#else
- uint64_t csr:39;
- uint64_t arb:1;
- uint64_t cpl0:12;
- uint64_t cpl1:12;
-#endif
- } s;
- struct cvmx_sli_state1_s cn61xx;
- struct cvmx_sli_state1_s cn63xx;
- struct cvmx_sli_state1_s cn63xxp1;
- struct cvmx_sli_state1_s cn66xx;
- struct cvmx_sli_state1_s cn68xx;
- struct cvmx_sli_state1_s cn68xxp1;
- struct cvmx_sli_state1_s cnf71xx;
-};
-
-union cvmx_sli_state2 {
- uint64_t u64;
- struct cvmx_sli_state2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t nnp1:8;
- uint64_t reserved_47_47:1;
- uint64_t rac:1;
- uint64_t csm1:15;
- uint64_t csm0:15;
- uint64_t nnp0:8;
- uint64_t nnd:8;
-#else
- uint64_t nnd:8;
- uint64_t nnp0:8;
- uint64_t csm0:15;
- uint64_t csm1:15;
- uint64_t rac:1;
- uint64_t reserved_47_47:1;
- uint64_t nnp1:8;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_sli_state2_s cn61xx;
- struct cvmx_sli_state2_s cn63xx;
- struct cvmx_sli_state2_s cn63xxp1;
- struct cvmx_sli_state2_s cn66xx;
- struct cvmx_sli_state2_s cn68xx;
- struct cvmx_sli_state2_s cn68xxp1;
- struct cvmx_sli_state2_s cnf71xx;
-};
-
-union cvmx_sli_state3 {
- uint64_t u64;
- struct cvmx_sli_state3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t psm1:15;
- uint64_t psm0:15;
- uint64_t nsm1:13;
- uint64_t nsm0:13;
-#else
- uint64_t nsm0:13;
- uint64_t nsm1:13;
- uint64_t psm0:15;
- uint64_t psm1:15;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_sli_state3_s cn61xx;
- struct cvmx_sli_state3_s cn63xx;
- struct cvmx_sli_state3_s cn63xxp1;
- struct cvmx_sli_state3_s cn66xx;
- struct cvmx_sli_state3_s cn68xx;
- struct cvmx_sli_state3_s cn68xxp1;
- struct cvmx_sli_state3_s cnf71xx;
-};
-
-union cvmx_sli_tx_pipe {
- uint64_t u64;
- struct cvmx_sli_tx_pipe_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_24_63:40;
- uint64_t nump:8;
- uint64_t reserved_7_15:9;
- uint64_t base:7;
-#else
- uint64_t base:7;
- uint64_t reserved_7_15:9;
- uint64_t nump:8;
- uint64_t reserved_24_63:40;
-#endif
+ __BITFIELD_FIELD(uint64_t reserved_5_63:59,
+ __BITFIELD_FIELD(uint64_t wind_d:1,
+ __BITFIELD_FIELD(uint64_t bar0_d:1,
+ __BITFIELD_FIELD(uint64_t mrrs:3,
+ ;))))
} s;
- struct cvmx_sli_tx_pipe_s cn68xx;
- struct cvmx_sli_tx_pipe_s cn68xxp1;
};
-union cvmx_sli_win_rd_addr {
- uint64_t u64;
- struct cvmx_sli_win_rd_addr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_51_63:13;
- uint64_t ld_cmd:2;
- uint64_t iobit:1;
- uint64_t rd_addr:48;
-#else
- uint64_t rd_addr:48;
- uint64_t iobit:1;
- uint64_t ld_cmd:2;
- uint64_t reserved_51_63:13;
-#endif
- } s;
- struct cvmx_sli_win_rd_addr_s cn61xx;
- struct cvmx_sli_win_rd_addr_s cn63xx;
- struct cvmx_sli_win_rd_addr_s cn63xxp1;
- struct cvmx_sli_win_rd_addr_s cn66xx;
- struct cvmx_sli_win_rd_addr_s cn68xx;
- struct cvmx_sli_win_rd_addr_s cn68xxp1;
- struct cvmx_sli_win_rd_addr_s cnf71xx;
-};
-
-union cvmx_sli_win_rd_data {
- uint64_t u64;
- struct cvmx_sli_win_rd_data_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rd_data:64;
-#else
- uint64_t rd_data:64;
-#endif
- } s;
- struct cvmx_sli_win_rd_data_s cn61xx;
- struct cvmx_sli_win_rd_data_s cn63xx;
- struct cvmx_sli_win_rd_data_s cn63xxp1;
- struct cvmx_sli_win_rd_data_s cn66xx;
- struct cvmx_sli_win_rd_data_s cn68xx;
- struct cvmx_sli_win_rd_data_s cn68xxp1;
- struct cvmx_sli_win_rd_data_s cnf71xx;
-};
-
-union cvmx_sli_win_wr_addr {
- uint64_t u64;
- struct cvmx_sli_win_wr_addr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t iobit:1;
- uint64_t wr_addr:45;
- uint64_t reserved_0_2:3;
-#else
- uint64_t reserved_0_2:3;
- uint64_t wr_addr:45;
- uint64_t iobit:1;
- uint64_t reserved_49_63:15;
-#endif
- } s;
- struct cvmx_sli_win_wr_addr_s cn61xx;
- struct cvmx_sli_win_wr_addr_s cn63xx;
- struct cvmx_sli_win_wr_addr_s cn63xxp1;
- struct cvmx_sli_win_wr_addr_s cn66xx;
- struct cvmx_sli_win_wr_addr_s cn68xx;
- struct cvmx_sli_win_wr_addr_s cn68xxp1;
- struct cvmx_sli_win_wr_addr_s cnf71xx;
-};
-
-union cvmx_sli_win_wr_data {
- uint64_t u64;
- struct cvmx_sli_win_wr_data_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t wr_data:64;
-#else
- uint64_t wr_data:64;
-#endif
- } s;
- struct cvmx_sli_win_wr_data_s cn61xx;
- struct cvmx_sli_win_wr_data_s cn63xx;
- struct cvmx_sli_win_wr_data_s cn63xxp1;
- struct cvmx_sli_win_wr_data_s cn66xx;
- struct cvmx_sli_win_wr_data_s cn68xx;
- struct cvmx_sli_win_wr_data_s cn68xxp1;
- struct cvmx_sli_win_wr_data_s cnf71xx;
-};
-
-union cvmx_sli_win_wr_mask {
- uint64_t u64;
- struct cvmx_sli_win_wr_mask_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t wr_mask:8;
-#else
- uint64_t wr_mask:8;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_sli_win_wr_mask_s cn61xx;
- struct cvmx_sli_win_wr_mask_s cn63xx;
- struct cvmx_sli_win_wr_mask_s cn63xxp1;
- struct cvmx_sli_win_wr_mask_s cn66xx;
- struct cvmx_sli_win_wr_mask_s cn68xx;
- struct cvmx_sli_win_wr_mask_s cn68xxp1;
- struct cvmx_sli_win_wr_mask_s cnf71xx;
-};
-
-union cvmx_sli_window_ctl {
+union cvmx_sli_mem_access_subidx {
uint64_t u64;
- struct cvmx_sli_window_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t time:32;
-#else
- uint64_t time:32;
- uint64_t reserved_32_63:32;
-#endif
+ struct cvmx_sli_mem_access_subidx_s {
+ __BITFIELD_FIELD(uint64_t reserved_43_63:21,
+ __BITFIELD_FIELD(uint64_t zero:1,
+ __BITFIELD_FIELD(uint64_t port:3,
+ __BITFIELD_FIELD(uint64_t nmerge:1,
+ __BITFIELD_FIELD(uint64_t esr:2,
+ __BITFIELD_FIELD(uint64_t esw:2,
+ __BITFIELD_FIELD(uint64_t wtype:2,
+ __BITFIELD_FIELD(uint64_t rtype:2,
+ __BITFIELD_FIELD(uint64_t ba:30,
+ ;)))))))))
} s;
- struct cvmx_sli_window_ctl_s cn61xx;
- struct cvmx_sli_window_ctl_s cn63xx;
- struct cvmx_sli_window_ctl_s cn63xxp1;
- struct cvmx_sli_window_ctl_s cn66xx;
- struct cvmx_sli_window_ctl_s cn68xx;
- struct cvmx_sli_window_ctl_s cn68xxp1;
- struct cvmx_sli_window_ctl_s cnf71xx;
+ struct cvmx_sli_mem_access_subidx_cn68xx {
+ __BITFIELD_FIELD(uint64_t reserved_43_63:21,
+ __BITFIELD_FIELD(uint64_t zero:1,
+ __BITFIELD_FIELD(uint64_t port:3,
+ __BITFIELD_FIELD(uint64_t nmerge:1,
+ __BITFIELD_FIELD(uint64_t esr:2,
+ __BITFIELD_FIELD(uint64_t esw:2,
+ __BITFIELD_FIELD(uint64_t wtype:2,
+ __BITFIELD_FIELD(uint64_t rtype:2,
+ __BITFIELD_FIELD(uint64_t ba:28,
+ __BITFIELD_FIELD(uint64_t reserved_0_1:2,
+ ;))))))))))
+ } cn68xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index 2530e8731c8a..9742202f2a32 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2017 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -62,7 +62,6 @@ enum cvmx_mips_space {
#include <asm/octeon/cvmx-iob-defs.h>
#include <asm/octeon/cvmx-ipd-defs.h>
#include <asm/octeon/cvmx-l2c-defs.h>
-#include <asm/octeon/cvmx-l2d-defs.h>
#include <asm/octeon/cvmx-l2t-defs.h>
#include <asm/octeon/cvmx-led-defs.h>
#include <asm/octeon/cvmx-mio-defs.h>
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index a8705f6c8180..a1bdb1ea5234 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -110,6 +110,32 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#endif
+#ifndef __PAGETABLE_PUD_FOLDED
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pud_t *pud;
+
+ pud = (pud_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PUD_ORDER);
+ if (pud)
+ pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
+ return pud;
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ free_pages((unsigned long)pud, PUD_ORDER);
+}
+
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+ set_pgd(pgd, __pgd((unsigned long)pud));
+}
+
+#define __pud_free_tlb(tlb, x, addr) pud_free((tlb)->mm, x)
+
+#endif /* __PAGETABLE_PUD_FOLDED */
+
#define check_pgt_cache() do { } while (0)
extern void pagetable_init(void);
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 6f94bed571c4..74afe8c76bdd 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -19,6 +19,10 @@
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
+#ifdef CONFIG_HIGHMEM
+#include <asm/highmem.h>
+#endif
+
extern int temp_tlb_entry;
/*
@@ -62,7 +66,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
#define VMALLOC_START MAP_BASE
-#define PKMAP_BASE (0xfe000000UL)
+#define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
+#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 130a2a6c1531..67fe6dc5211c 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -20,7 +20,7 @@
#define __ARCH_USE_5LEVEL_HACK
#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
#include <asm-generic/pgtable-nopmd.h>
-#else
+#elif !(defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_MIPS_VA_BITS_48))
#include <asm-generic/pgtable-nopud.h>
#endif
@@ -54,9 +54,18 @@
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
+# ifdef __PAGETABLE_PUD_FOLDED
+# define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+# endif
+#endif
-#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#ifndef __PAGETABLE_PUD_FOLDED
+#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
#endif
+
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
@@ -79,8 +88,13 @@
* of virtual address space.
*/
#ifdef CONFIG_PAGE_SIZE_4KB
-#define PGD_ORDER 1
-#define PUD_ORDER aieeee_attempt_to_allocate_pud
+# ifdef CONFIG_MIPS_VA_BITS_48
+# define PGD_ORDER 0
+# define PUD_ORDER 0
+# else
+# define PGD_ORDER 1
+# define PUD_ORDER aieeee_attempt_to_allocate_pud
+# endif
#define PMD_ORDER 0
#define PTE_ORDER 0
#endif
@@ -118,6 +132,9 @@
#endif
#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#ifndef __PAGETABLE_PUD_FOLDED
+#define PTRS_PER_PUD ((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t))
+#endif
#ifndef __PAGETABLE_PMD_FOLDED
#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
#endif
@@ -134,7 +151,7 @@
#define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE))
#define VMALLOC_END \
(MAP_BASE + \
- min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
+ min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
(1UL << cpu_vmbits)) - (1UL << 32))
#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
@@ -150,12 +167,72 @@
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
#endif
+#ifndef __PAGETABLE_PUD_FOLDED
+#define pud_ERROR(e) \
+ printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
+#endif
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
extern pte_t invalid_pte_table[PTRS_PER_PTE];
extern pte_t empty_bad_page_table[PTRS_PER_PTE];
+#ifndef __PAGETABLE_PUD_FOLDED
+/*
+ * For 4-level pagetables we defines these ourselves, for 3-level the
+ * definitions are below, for 2-level the
+ * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
+ */
+typedef struct { unsigned long pud; } pud_t;
+#define pud_val(x) ((x).pud)
+#define __pud(x) ((pud_t) { (x) })
+
+extern pud_t invalid_pud_table[PTRS_PER_PUD];
+
+/*
+ * Empty pgd entries point to the invalid_pud_table.
+ */
+static inline int pgd_none(pgd_t pgd)
+{
+ return pgd_val(pgd) == (unsigned long)invalid_pud_table;
+}
+
+static inline int pgd_bad(pgd_t pgd)
+{
+ if (unlikely(pgd_val(pgd) & ~PAGE_MASK))
+ return 1;
+
+ return 0;
+}
+
+static inline int pgd_present(pgd_t pgd)
+{
+ return pgd_val(pgd) != (unsigned long)invalid_pud_table;
+}
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+ pgd_val(*pgdp) = (unsigned long)invalid_pud_table;
+}
+
+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+
+static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+{
+ return pgd_val(pgd);
+}
+
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+{
+ return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
+}
+
+static inline void set_pgd(pgd_t *pgd, pgd_t pgdval)
+{
+ *pgd = pgdval;
+}
+
+#endif
#ifndef __PAGETABLE_PMD_FOLDED
/*
@@ -281,6 +358,7 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
* Initialize a new pgd / pmd table with invalid pointers.
*/
extern void pgd_init(unsigned long page);
+extern void pud_init(unsigned long page, unsigned long pagetable);
extern void pmd_init(unsigned long page, unsigned long pagetable);
/*
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index e9a9e2ade1d2..3748f4d120a5 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -21,77 +21,46 @@
#define UASM_EXPORT_SYMBOL(sym)
#endif
-#define _UASM_ISA_CLASSIC 0
-#define _UASM_ISA_MICROMIPS 1
-
-#ifndef UASM_ISA
-#ifdef CONFIG_CPU_MICROMIPS
-#define UASM_ISA _UASM_ISA_MICROMIPS
-#else
-#define UASM_ISA _UASM_ISA_CLASSIC
-#endif
-#endif
-
-#if (UASM_ISA == _UASM_ISA_CLASSIC)
-#ifdef CONFIG_CPU_MICROMIPS
-#define ISAOPC(op) CL_uasm_i##op
-#define ISAFUNC(x) CL_##x
-#else
-#define ISAOPC(op) uasm_i##op
-#define ISAFUNC(x) x
-#endif
-#elif (UASM_ISA == _UASM_ISA_MICROMIPS)
-#ifdef CONFIG_CPU_MICROMIPS
-#define ISAOPC(op) uasm_i##op
-#define ISAFUNC(x) x
-#else
-#define ISAOPC(op) MM_uasm_i##op
-#define ISAFUNC(x) MM_##x
-#endif
-#else
-#error Unsupported micro-assembler ISA!!!
-#endif
-
#define Ip_u1u2u3(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u2u1u3(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u3u2u1(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u3u1u2(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u1u2s3(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2s3u1(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
+void uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
#define Ip_s3s1s2(op) \
-void ISAOPC(op)(u32 **buf, int a, int b, int c)
+void uasm_i##op(u32 **buf, int a, int b, int c)
#define Ip_u2u1s3(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2u1msbu3(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
unsigned int d)
#define Ip_u1u2(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
#define Ip_u2u1(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
+void uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
#define Ip_u1s2(op) \
-void ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
+void uasm_i##op(u32 **buf, unsigned int a, signed int b)
-#define Ip_u1(op) void ISAOPC(op)(u32 **buf, unsigned int a)
+#define Ip_u1(op) void uasm_i##op(u32 **buf, unsigned int a)
-#define Ip_0(op) void ISAOPC(op)(u32 **buf)
+#define Ip_0(op) void uasm_i##op(u32 **buf)
Ip_u2u1s3(_addiu);
Ip_u3u1u2(_addu);
@@ -138,6 +107,7 @@ Ip_u2s3u1(_lb);
Ip_u2s3u1(_ld);
Ip_u3u1u2(_ldx);
Ip_u2s3u1(_lh);
+Ip_u2s3u1(_lhu);
Ip_u2s3u1(_ll);
Ip_u2s3u1(_lld);
Ip_u1s2(_lui);
@@ -190,20 +160,20 @@ struct uasm_label {
int lab;
};
-void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
+void uasm_build_label(struct uasm_label **lab, u32 *addr,
int lid);
#ifdef CONFIG_64BIT
-int ISAFUNC(uasm_in_compat_space_p)(long addr);
+int uasm_in_compat_space_p(long addr);
#endif
-int ISAFUNC(uasm_rel_hi)(long val);
-int ISAFUNC(uasm_rel_lo)(long val);
-void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr);
-void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr);
+int uasm_rel_hi(long val);
+int uasm_rel_lo(long val);
+void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
+void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
#define UASM_L_LA(lb) \
-static inline void ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
+static inline void uasm_l##lb(struct uasm_label **lab, u32 *addr) \
{ \
- ISAFUNC(uasm_build_label)(lab, addr, label##lb); \
+ uasm_build_label(lab, addr, label##lb); \
}
/* convenience macros for instructions */
@@ -255,27 +225,27 @@ static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1,
unsigned int a2, unsigned int a3)
{
if (a3 < 32)
- ISAOPC(_drotr)(p, a1, a2, a3);
+ uasm_i_drotr(p, a1, a2, a3);
else
- ISAOPC(_drotr32)(p, a1, a2, a3 - 32);
+ uasm_i_drotr32(p, a1, a2, a3 - 32);
}
static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1,
unsigned int a2, unsigned int a3)
{
if (a3 < 32)
- ISAOPC(_dsll)(p, a1, a2, a3);
+ uasm_i_dsll(p, a1, a2, a3);
else
- ISAOPC(_dsll32)(p, a1, a2, a3 - 32);
+ uasm_i_dsll32(p, a1, a2, a3 - 32);
}
static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
unsigned int a2, unsigned int a3)
{
if (a3 < 32)
- ISAOPC(_dsrl)(p, a1, a2, a3);
+ uasm_i_dsrl(p, a1, a2, a3);
else
- ISAOPC(_dsrl32)(p, a1, a2, a3 - 32);
+ uasm_i_dsrl32(p, a1, a2, a3 - 32);
}
/* Handle relocations. */
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild
index f2cf41461146..a0266feba9e6 100644
--- a/arch/mips/include/uapi/asm/Kbuild
+++ b/arch/mips/include/uapi/asm/Kbuild
@@ -2,40 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += ipcbuf.h
-
-header-y += auxvec.h
-header-y += bitfield.h
-header-y += bitsperlong.h
-header-y += break.h
-header-y += byteorder.h
-header-y += cachectl.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += inst.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += sgidefs.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += sysmips.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index b11facd11c9d..f702a459a830 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -804,8 +804,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
break;
}
/* Compact branch: BNEZC || JIALC */
- if (insn.i_format.rs)
+ if (!insn.i_format.rs) {
+ /* JIALC: set $31/ra */
regs->regs[31] = epc + 4;
+ }
regs->cp0_epc += 8;
break;
#endif
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 3382892544f0..1aba27786bd5 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -34,6 +34,7 @@
/* Hardware capabilities */
unsigned int elf_hwcap __read_mostly;
+EXPORT_SYMBOL_GPL(elf_hwcap);
/*
* Get the FPU Implementation/Revision.
@@ -1955,6 +1956,12 @@ void cpu_probe(void)
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int cpu = smp_processor_id();
+ /*
+ * Set a default elf platform, cpu probe may later
+ * overwrite it with a more precise value
+ */
+ set_elf_platform(cpu, "mips");
+
c->processor_id = PRID_IMP_UNKNOWN;
c->fpu_id = FPIR_IMP_NONE;
c->cputype = CPU_UNKNOWN;
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 8d83fc2a96b7..38a302919e6b 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -11,6 +11,7 @@
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/compiler.h>
+#include <asm/irqflags.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
@@ -119,6 +120,7 @@ work_pending:
andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
beqz t0, work_notifysig
work_resched:
+ TRACE_IRQS_OFF
jal schedule
local_irq_disable # make sure need_resched and
@@ -155,6 +157,7 @@ syscall_exit_work:
beqz t0, work_pending # trace bit set?
local_irq_enable # could let syscall_trace_leave()
# call schedule() instead
+ TRACE_IRQS_ON
move a0, sp
jal syscall_trace_leave
b resume_userspace
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 30a3b75e88eb..9d9b8fbae202 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -38,20 +38,6 @@ void arch_ftrace_update_code(int command)
#endif
-/*
- * Check if the address is in kernel space
- *
- * Clone core_kernel_text() from kernel/extable.c, but doesn't call
- * init_kernel_text() for Ftrace doesn't trace functions in init sections.
- */
-static inline int in_kernel_space(unsigned long ip)
-{
- if (ip >= (unsigned long)_stext &&
- ip <= (unsigned long)_etext)
- return 1;
- return 0;
-}
-
#ifdef CONFIG_DYNAMIC_FTRACE
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
@@ -198,7 +184,7 @@ int ftrace_make_nop(struct module *mod,
* If ip is in kernel space, no long call, otherwise, long call is
* needed.
*/
- new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
+ new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
#ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new);
#else
@@ -218,12 +204,12 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
unsigned int new;
unsigned long ip = rec->ip;
- new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
+ new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
#ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new);
#else
- return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ?
+ return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
INSN_NOP : insn_la_mcount[1]);
#endif
}
@@ -289,7 +275,7 @@ unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
* instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
* kernel, move after the instruction "move ra, at"(offset is 16)
*/
- ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
+ ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
/*
* search the text until finding the non-store instruction or "s{d,w}
@@ -394,7 +380,7 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
* entries configured through the tracing/set_graph_function interface.
*/
- insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
+ insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
/* Only trace if the calling function expects to */
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index cf052204eb0a..d1bb506adc10 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -106,8 +106,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
beq t0, t1, dtb_found
#endif
li t1, -2
- beq a0, t1, dtb_found
move t2, a1
+ beq a0, t1, dtb_found
li t2, 0
dtb_found:
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 550e7d03090a..ae64c8f56a8c 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -1096,10 +1096,20 @@ repeat:
}
break;
- case beql_op:
- case bnel_op:
case blezl_op:
case bgtzl_op:
+ /*
+ * For BLEZL and BGTZL, rt field must be set to 0. If this
+ * is not the case, this may be an encoding of a MIPS R6
+ * instruction, so return to CPU execution if this occurs
+ */
+ if (MIPSInst_RT(inst)) {
+ err = SIGILL;
+ break;
+ }
+ /* fall through */
+ case beql_op:
+ case bnel_op:
if (delay_slot(regs)) {
err = SIGILL;
break;
@@ -2329,6 +2339,8 @@ static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
__this_cpu_write((mipsr2bremustats).bgezl, 0);
__this_cpu_write((mipsr2bremustats).bltzll, 0);
__this_cpu_write((mipsr2bremustats).bgezll, 0);
+ __this_cpu_write((mipsr2bremustats).bltzall, 0);
+ __this_cpu_write((mipsr2bremustats).bgezall, 0);
__this_cpu_write((mipsr2bremustats).bltzal, 0);
__this_cpu_write((mipsr2bremustats).bgezal, 0);
__this_cpu_write((mipsr2bremustats).beql, 0);
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 313a88b2973f..f3e301f95aef 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1597,7 +1597,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
break;
case CPU_P5600:
case CPU_P6600:
- case CPU_I6400:
/* 8-bit event numbers */
raw_id = config & 0x1ff;
base_id = raw_id & 0xff;
@@ -1610,6 +1609,11 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
raw_event.range = P;
#endif
break;
+ case CPU_I6400:
+ /* 8-bit event numbers */
+ base_id = config & 0xff;
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ break;
case CPU_1004K:
if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 5f928c34c148..d99416094ba9 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -56,7 +56,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
* state. Actually per-core rather than per-CPU.
*/
static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
-static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
/* Indicates online CPUs coupled with the current CPU */
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
@@ -642,7 +641,6 @@ static int cps_pm_online_cpu(unsigned int cpu)
{
enum cps_pm_state state;
unsigned core = cpu_data[cpu].core;
- unsigned dlinesz = cpu_data[cpu].dcache.linesz;
void *entry_fn, *core_rc;
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
@@ -662,16 +660,11 @@ static int cps_pm_online_cpu(unsigned int cpu)
}
if (!per_cpu(ready_count, core)) {
- core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
+ core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
if (!core_rc) {
pr_err("Failed allocate core %u ready_count\n", core);
return -ENOMEM;
}
- per_cpu(ready_count_alloc, core) = core_rc;
-
- /* Ensure ready_count is aligned to a cacheline boundary */
- core_rc += dlinesz - 1;
- core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
per_cpu(ready_count, core) = core_rc;
}
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index b68e10fc453d..5351e1f3950d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -114,13 +114,12 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
/*
* Copy architecture-specific thread state
*/
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long kthread_arg, struct task_struct *p)
+int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+ unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
{
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
unsigned long childksp;
- p->set_child_tid = p->clear_child_tid = NULL;
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
@@ -176,7 +175,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
if (clone_flags & CLONE_SETTLS)
- ti->tp_value = regs->regs[7];
+ ti->tp_value = tls;
return 0;
}
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 758577861523..7b386d54fd65 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -25,12 +25,6 @@
/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
#undef fp
-/*
- * Offset to the current process status flags, the first 32 bytes of the
- * stack are not used.
- */
-#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
-
#ifndef USE_ALTERNATE_RESUME_IMPL
/*
* task_struct *resume(task_struct *prev, task_struct *next,
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 795b4aaf8927..36954ddd0b9f 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -8,6 +8,7 @@
* option) any later version.
*/
+#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/irqchip/mips-gic.h>
@@ -408,7 +409,6 @@ static int cps_cpu_disable(void)
return 0;
}
-static DECLARE_COMPLETION(cpu_death_chosen);
static unsigned cpu_death_sibling;
static enum {
CPU_DEATH_HALT,
@@ -443,7 +443,7 @@ void play_dead(void)
}
/* This CPU has chosen its way out */
- complete(&cpu_death_chosen);
+ (void)cpu_report_death();
if (cpu_death == CPU_DEATH_HALT) {
vpe_id = cpu_vpe_id(&cpu_data[cpu]);
@@ -492,8 +492,7 @@ static void cps_cpu_die(unsigned int cpu)
int err;
/* Wait for the cpu to choose its way out */
- if (!wait_for_completion_timeout(&cpu_death_chosen,
- msecs_to_jiffies(5000))) {
+ if (!cpu_wait_death(cpu, 5)) {
pr_err("CPU%u: didn't offline\n", cpu);
return;
}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index e398cbc3d776..ed6b4df583ea 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -83,6 +83,8 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
if (tc != 0)
smvp_copy_vpe_config();
+ cpu_data[ncpu].vpe_id = tc;
+
return ncpu;
}
@@ -114,49 +116,6 @@ static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
write_tc_c0_tchalt(TCHALT_H);
}
-static void vsmp_send_ipi_single(int cpu, unsigned int action)
-{
- int i;
- unsigned long flags;
- int vpflags;
-
-#ifdef CONFIG_MIPS_GIC
- if (gic_present) {
- mips_smp_send_ipi_single(cpu, action);
- return;
- }
-#endif
- local_irq_save(flags);
-
- vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */
-
- switch (action) {
- case SMP_CALL_FUNCTION:
- i = C_SW1;
- break;
-
- case SMP_RESCHEDULE_YOURSELF:
- default:
- i = C_SW0;
- break;
- }
-
- /* 1:1 mapping of vpe and tc... */
- settc(cpu);
- write_vpe_c0_cause(read_vpe_c0_cause() | i);
- evpe(vpflags);
-
- local_irq_restore(flags);
-}
-
-static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
-{
- unsigned int i;
-
- for_each_cpu(i, mask)
- vsmp_send_ipi_single(i, action);
-}
-
static void vsmp_init_secondary(void)
{
#ifdef CONFIG_MIPS_GIC
@@ -281,8 +240,8 @@ static void __init vsmp_prepare_cpus(unsigned int max_cpus)
}
struct plat_smp_ops vsmp_smp_ops = {
- .send_ipi_single = vsmp_send_ipi_single,
- .send_ipi_mask = vsmp_send_ipi_mask,
+ .send_ipi_single = mips_smp_send_ipi_single,
+ .send_ipi_mask = mips_smp_send_ipi_mask,
.init_secondary = vsmp_init_secondary,
.smp_finish = vsmp_smp_finish,
.boot_secondary = vsmp_boot_secondary,
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 6e71130549ea..aba1afb64b62 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -261,16 +261,20 @@ int mips_smp_ipi_allocate(const struct cpumask *mask)
ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
/*
- * There are systems which only use IPI domains some of the time,
- * depending upon configuration we don't know until runtime. An
- * example is Malta where we may compile in support for GIC & the
- * MT ASE, but run on a system which has multiple VPEs in a single
- * core and doesn't include a GIC. Until all IPI implementations
- * have been converted to use IPI domains the best we can do here
- * is to return & hope some other code sets up the IPIs.
+ * There are systems which use IPI IRQ domains, but only have one
+ * registered when some runtime condition is met. For example a Malta
+ * kernel may include support for GIC & CPU interrupt controller IPI
+ * IRQ domains, but if run on a system with no GIC & no MT ASE then
+ * neither will be supported or registered.
+ *
+ * We only have a problem if we're actually using multiple CPUs so fail
+ * loudly if that is the case. Otherwise simply return, skipping IPI
+ * setup, if we're running with only a single CPU.
*/
- if (!ipidomain)
+ if (!ipidomain) {
+ BUG_ON(num_present_cpus() > 1);
return 0;
+ }
virq = irq_reserve_ipi(ipidomain, mask);
BUG_ON(!virq);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 9681b5877140..38dfa27730ff 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -201,6 +201,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
{
struct pt_regs regs;
mm_segment_t old_fs = get_fs();
+
+ regs.cp0_status = KSU_KERNEL;
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 7c6336dd2638..7cd92166a0b9 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
bool user, bool kernel)
{
- int idx_user, idx_kernel;
+ /*
+ * Initialize idx_user and idx_kernel to workaround bogus
+ * maybe-initialized warning when using GCC 6.
+ */
+ int idx_user = 0, idx_kernel = 0;
unsigned long flags, old_entryhi;
local_irq_save(flags);
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 0ddf3698b85d..33728b7af426 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -274,47 +274,6 @@ static void ltq_hw_irq_handler(struct irq_desc *desc)
ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
}
-#ifdef CONFIG_MIPS_MT_SMP
-void __init arch_init_ipiirq(int irq, struct irqaction *action)
-{
- setup_irq(irq, action);
- irq_set_handler(irq, handle_percpu_irq);
-}
-
-static void ltq_sw0_irqdispatch(void)
-{
- do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
-}
-
-static void ltq_sw1_irqdispatch(void)
-{
- do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
-}
-static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
-{
- scheduler_ipi();
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
-{
- generic_smp_call_function_interrupt();
- return IRQ_HANDLED;
-}
-
-static struct irqaction irq_resched = {
- .handler = ipi_resched_interrupt,
- .flags = IRQF_PERCPU,
- .name = "IPI_resched"
-};
-
-static struct irqaction irq_call = {
- .handler = ipi_call_interrupt,
- .flags = IRQF_PERCPU,
- .name = "IPI_call"
-};
-#endif
-
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
@@ -402,17 +361,6 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
&irq_domain_ops, 0);
-#if defined(CONFIG_MIPS_MT_SMP)
- if (cpu_has_vint) {
- pr_info("Setting up IPI vectored interrupts\n");
- set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ltq_sw0_irqdispatch);
- set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ltq_sw1_irqdispatch);
- }
- arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ,
- &irq_resched);
- arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
-#endif
-
#ifndef CONFIG_MIPS_MT_SMP
set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index a298ac93edcc..f12fde10c8ad 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -439,6 +439,8 @@ int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
union mips_instruction insn = (union mips_instruction)dec_insn.insn;
unsigned int fcr31;
unsigned int bit = 0;
+ unsigned int bit0;
+ union fpureg *fpr;
switch (insn.i_format.opcode) {
case spec_op:
@@ -706,14 +708,14 @@ int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
((insn.i_format.rs == bc1eqz_op) ||
(insn.i_format.rs == bc1nez_op))) {
bit = 0;
+ fpr = &current->thread.fpu.fpr[insn.i_format.rt];
+ bit0 = get_fpr32(fpr, 0) & 0x1;
switch (insn.i_format.rs) {
case bc1eqz_op:
- if (get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)
- bit = 1;
+ bit = bit0 == 0;
break;
case bc1nez_op:
- if (!(get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1))
- bit = 1;
+ bit = bit0 != 0;
break;
}
if (bit)
diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
index 4a2d03c72959..caa62f20a888 100644
--- a/arch/mips/math-emu/dp_maddf.c
+++ b/arch/mips/math-emu/dp_maddf.c
@@ -54,7 +54,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
return ieee754dp_nanxcpt(z);
case IEEE754_CLASS_DNORM:
DPDNORMZ;
- /* QNAN is handled separately below */
+ /* QNAN and ZERO cases are handled separately below */
}
switch (CLPAIR(xc, yc)) {
@@ -210,6 +210,9 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
}
assert(rm & (DP_HIDDEN_BIT << 3));
+ if (zc == IEEE754_CLASS_ZERO)
+ return ieee754dp_format(rs, re, rm);
+
/* And now the addition */
assert(zm & DP_HIDDEN_BIT);
diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
index a8cd8b4f235e..c91d5e5d9b5f 100644
--- a/arch/mips/math-emu/sp_maddf.c
+++ b/arch/mips/math-emu/sp_maddf.c
@@ -54,7 +54,7 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
return ieee754sp_nanxcpt(z);
case IEEE754_CLASS_DNORM:
SPDNORMZ;
- /* QNAN is handled separately below */
+ /* QNAN and ZERO cases are handled separately below */
}
switch (CLPAIR(xc, yc)) {
@@ -203,6 +203,9 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
}
assert(rm & (SP_HIDDEN_BIT << 3));
+ if (zc == IEEE754_CLASS_ZERO)
+ return ieee754sp_format(rs, re, rm);
+
/* And now the addition */
assert(zm & SP_HIDDEN_BIT);
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index fe8df14b6169..e08598c70b3e 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -68,12 +68,25 @@ static inline struct page *dma_addr_to_page(struct device *dev,
* systems and only the R10000 and R12000 are used in such systems, the
* SGI IP28 Indigo² rsp. SGI IP32 aka O2.
*/
-static inline int cpu_needs_post_dma_flush(struct device *dev)
+static inline bool cpu_needs_post_dma_flush(struct device *dev)
{
- return !plat_device_is_coherent(dev) &&
- (boot_cpu_type() == CPU_R10000 ||
- boot_cpu_type() == CPU_R12000 ||
- boot_cpu_type() == CPU_BMIPS5000);
+ if (plat_device_is_coherent(dev))
+ return false;
+
+ switch (boot_cpu_type()) {
+ case CPU_R10000:
+ case CPU_R12000:
+ case CPU_BMIPS5000:
+ return true;
+
+ default:
+ /*
+ * Presence of MAARs suggests that the CPU supports
+ * speculatively prefetching data, and therefore requires
+ * the post-DMA flush/invalidate.
+ */
+ return cpu_has_maar;
+ }
}
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 3bef306cdfdb..4f8f5bf46977 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -267,19 +267,19 @@ do_sigbus:
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
- else
+
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
#if 0
- printk("do_page_fault() #3: sending SIGBUS to %s for "
- "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
- tsk->comm,
- write ? "write access to" : "read access from",
- field, address,
- field, (unsigned long) regs->cp0_epc,
- field, (unsigned long) regs->regs[31]);
+ printk("do_page_fault() #3: sending SIGBUS to %s for "
+ "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
+ tsk->comm,
+ write ? "write access to" : "read access from",
+ field, address,
+ field, (unsigned long) regs->cp0_epc,
+ field, (unsigned long) regs->regs[31]);
#endif
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
tsk->thread.cp0_badvaddr = address;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 3ca20283b31e..8ce2983a7015 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -537,6 +537,9 @@ unsigned long pgd_current[NR_CPUS];
* it in the linker script.
*/
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
+#ifndef __PAGETABLE_PUD_FOLDED
+pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
+#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
EXPORT_SYMBOL_GPL(invalid_pmd_table);
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 64dd8bdd92c3..28adeabe851f 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -93,7 +93,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index adc6911ba748..b19a3c506b1e 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -51,15 +51,15 @@ void __init pagetable_init(void)
/*
* Fixed mappings:
*/
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
+ fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
#ifdef CONFIG_HIGHMEM
/*
* Permanent kmaps:
*/
vaddr = PKMAP_BASE;
- fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+ fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
pgd = swapper_pg_dir + __pgd_offset(vaddr);
pud = pud_offset(pgd, vaddr);
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 0ae7b28b4db5..6fd6e96fdebb 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -19,10 +19,12 @@ void pgd_init(unsigned long page)
unsigned long *p, *end;
unsigned long entry;
-#ifdef __PAGETABLE_PMD_FOLDED
- entry = (unsigned long)invalid_pte_table;
-#else
+#if !defined(__PAGETABLE_PUD_FOLDED)
+ entry = (unsigned long)invalid_pud_table;
+#elif !defined(__PAGETABLE_PMD_FOLDED)
entry = (unsigned long)invalid_pmd_table;
+#else
+ entry = (unsigned long)invalid_pte_table;
#endif
p = (unsigned long *) page;
@@ -64,6 +66,28 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
EXPORT_SYMBOL_GPL(pmd_init);
#endif
+#ifndef __PAGETABLE_PUD_FOLDED
+void pud_init(unsigned long addr, unsigned long pagetable)
+{
+ unsigned long *p, *end;
+
+ p = (unsigned long *)addr;
+ end = p + PTRS_PER_PUD;
+
+ do {
+ p[0] = pagetable;
+ p[1] = pagetable;
+ p[2] = pagetable;
+ p[3] = pagetable;
+ p[4] = pagetable;
+ p += 8;
+ p[-3] = pagetable;
+ p[-2] = pagetable;
+ p[-1] = pagetable;
+ } while (p != end);
+}
+#endif
+
pmd_t mk_pmd(struct page *page, pgprot_t prot)
{
pmd_t pmd;
@@ -87,6 +111,9 @@ void __init pagetable_init(void)
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
+#ifndef __PAGETABLE_PUD_FOLDED
+ pud_init((unsigned long)invalid_pud_table, (unsigned long)invalid_pmd_table);
+#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
#endif
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 4f642e07c2b1..ed1c5297547a 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -865,6 +865,13 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
+#ifndef __PAGETABLE_PUD_FOLDED
+ uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+ uasm_i_ld(p, ptr, 0, ptr); /* get pud pointer */
+ uasm_i_dsrl_safe(p, tmp, tmp, PUD_SHIFT - 3); /* get pud offset in bytes */
+ uasm_i_andi(p, tmp, tmp, (PTRS_PER_PUD - 1) << 3);
+ uasm_i_daddu(p, ptr, ptr, tmp); /* add in pud offset */
+#endif
#ifndef __PAGETABLE_PMD_FOLDED
uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
@@ -1184,6 +1191,21 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
}
+#ifndef __PAGETABLE_PUD_FOLDED
+ /* get pud offset in bytes */
+ uasm_i_dsrl_safe(p, scratch, tmp, PUD_SHIFT - 3);
+ uasm_i_andi(p, scratch, scratch, (PTRS_PER_PUD - 1) << 3);
+
+ if (use_lwx_insns()) {
+ UASM_i_LWX(p, ptr, scratch, ptr);
+ } else {
+ uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
+ UASM_i_LW(p, ptr, 0, ptr);
+ }
+ /* ptr contains a pointer to PMD entry */
+ /* tmp contains the address */
+#endif
+
#ifndef __PAGETABLE_PMD_FOLDED
/* get pmd offset in bytes */
uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 763d3f1edb8a..2277499fe6ae 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -103,6 +103,7 @@ static struct insn insn_table[] = {
{ insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
{ insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_lhu, M(lhu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
#ifndef CONFIG_CPU_MIPSR6
{ insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index a82970442b8a..730363b59bac 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -61,7 +61,7 @@ enum opcode {
insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra, insn_srl,
insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp,
insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, insn_xor,
- insn_xori, insn_yield, insn_lddir, insn_ldpte,
+ insn_xori, insn_yield, insn_lddir, insn_ldpte, insn_lhu,
};
struct insn {
@@ -297,6 +297,7 @@ I_u1(_jr)
I_u2s3u1(_lb)
I_u2s3u1(_ld)
I_u2s3u1(_lh)
+I_u2s3u1(_lhu)
I_u2s3u1(_ll)
I_u2s3u1(_lld)
I_u1s2(_lui)
@@ -349,7 +350,7 @@ I_u2u1u3(_lddir)
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#include <asm/octeon/octeon.h>
-void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
+void uasm_i_pref(u32 **buf, unsigned int a, signed int b,
unsigned int c)
{
if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5)
@@ -361,26 +362,26 @@ void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
else
build_insn(buf, insn_pref, c, a, b);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref));
+UASM_EXPORT_SYMBOL(uasm_i_pref);
#else
I_u2s3u1(_pref)
#endif
/* Handle labels. */
-void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
+void uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
{
(*lab)->addr = addr;
(*lab)->lab = lid;
(*lab)++;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
+UASM_EXPORT_SYMBOL(uasm_build_label);
-int ISAFUNC(uasm_in_compat_space_p)(long addr)
+int uasm_in_compat_space_p(long addr)
{
/* Is this address in 32bit compat space? */
return addr == (int)addr;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
+UASM_EXPORT_SYMBOL(uasm_in_compat_space_p);
static int uasm_rel_highest(long val)
{
@@ -400,64 +401,64 @@ static int uasm_rel_higher(long val)
#endif
}
-int ISAFUNC(uasm_rel_hi)(long val)
+int uasm_rel_hi(long val)
{
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
+UASM_EXPORT_SYMBOL(uasm_rel_hi);
-int ISAFUNC(uasm_rel_lo)(long val)
+int uasm_rel_lo(long val)
{
return ((val & 0xffff) ^ 0x8000) - 0x8000;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
+UASM_EXPORT_SYMBOL(uasm_rel_lo);
-void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
+void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
{
- if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
- ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
+ if (!uasm_in_compat_space_p(addr)) {
+ uasm_i_lui(buf, rs, uasm_rel_highest(addr));
if (uasm_rel_higher(addr))
- ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr));
- if (ISAFUNC(uasm_rel_hi(addr))) {
- ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
- ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
- ISAFUNC(uasm_rel_hi)(addr));
- ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16);
+ uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr));
+ if (uasm_rel_hi(addr)) {
+ uasm_i_dsll(buf, rs, rs, 16);
+ uasm_i_daddiu(buf, rs, rs,
+ uasm_rel_hi(addr));
+ uasm_i_dsll(buf, rs, rs, 16);
} else
- ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0);
+ uasm_i_dsll32(buf, rs, rs, 0);
} else
- ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr)));
+ uasm_i_lui(buf, rs, uasm_rel_hi(addr));
}
-UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
+UASM_EXPORT_SYMBOL(UASM_i_LA_mostly);
-void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
+void UASM_i_LA(u32 **buf, unsigned int rs, long addr)
{
- ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
- if (ISAFUNC(uasm_rel_lo(addr))) {
- if (!ISAFUNC(uasm_in_compat_space_p)(addr))
- ISAFUNC(uasm_i_daddiu)(buf, rs, rs,
- ISAFUNC(uasm_rel_lo(addr)));
+ UASM_i_LA_mostly(buf, rs, addr);
+ if (uasm_rel_lo(addr)) {
+ if (!uasm_in_compat_space_p(addr))
+ uasm_i_daddiu(buf, rs, rs,
+ uasm_rel_lo(addr));
else
- ISAFUNC(uasm_i_addiu)(buf, rs, rs,
- ISAFUNC(uasm_rel_lo(addr)));
+ uasm_i_addiu(buf, rs, rs,
+ uasm_rel_lo(addr));
}
}
-UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
+UASM_EXPORT_SYMBOL(UASM_i_LA);
/* Handle relocations. */
-void ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
+void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
{
(*rel)->addr = addr;
(*rel)->type = R_MIPS_PC16;
(*rel)->lab = lid;
(*rel)++;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
+UASM_EXPORT_SYMBOL(uasm_r_mips_pc16);
static inline void __resolve_relocs(struct uasm_reloc *rel,
struct uasm_label *lab);
-void ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel,
+void uasm_resolve_relocs(struct uasm_reloc *rel,
struct uasm_label *lab)
{
struct uasm_label *l;
@@ -467,39 +468,39 @@ void ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel,
if (rel->lab == l->lab)
__resolve_relocs(rel, l);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
+UASM_EXPORT_SYMBOL(uasm_resolve_relocs);
-void ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end,
+void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end,
long off)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++)
if (rel->addr >= first && rel->addr < end)
rel->addr += off;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
+UASM_EXPORT_SYMBOL(uasm_move_relocs);
-void ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end,
+void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end,
long off)
{
for (; lab->lab != UASM_LABEL_INVALID; lab++)
if (lab->addr >= first && lab->addr < end)
lab->addr += off;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
+UASM_EXPORT_SYMBOL(uasm_move_labels);
-void ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab,
+void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
u32 *first, u32 *end, u32 *target)
{
long off = (long)(target - first);
memcpy(target, first, (end - first) * sizeof(u32));
- ISAFUNC(uasm_move_relocs(rel, first, end, off));
- ISAFUNC(uasm_move_labels(lab, first, end, off));
+ uasm_move_relocs(rel, first, end, off);
+ uasm_move_labels(lab, first, end, off);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
+UASM_EXPORT_SYMBOL(uasm_copy_handler);
-int ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
+int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
{
for (; rel->lab != UASM_LABEL_INVALID; rel++) {
if (rel->addr == addr
@@ -510,92 +511,92 @@ int ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
return 0;
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
+UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay);
/* Convenience functions for labeled branches. */
-void ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_bltz)(p, reg, 0);
+ uasm_i_bltz(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
+UASM_EXPORT_SYMBOL(uasm_il_bltz);
-void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
+void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_b)(p, 0);
+ uasm_i_b(p, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
+UASM_EXPORT_SYMBOL(uasm_il_b);
-void ISAFUNC(uasm_il_beq)(u32 **p, struct uasm_reloc **r, unsigned int r1,
+void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1,
unsigned int r2, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_beq)(p, r1, r2, 0);
+ uasm_i_beq(p, r1, r2, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beq));
+UASM_EXPORT_SYMBOL(uasm_il_beq);
-void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_beqz)(p, reg, 0);
+ uasm_i_beqz(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
+UASM_EXPORT_SYMBOL(uasm_il_beqz);
-void ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_beqzl)(p, reg, 0);
+ uasm_i_beqzl(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
+UASM_EXPORT_SYMBOL(uasm_il_beqzl);
-void ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
unsigned int reg2, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
+ uasm_i_bne(p, reg1, reg2, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
+UASM_EXPORT_SYMBOL(uasm_il_bne);
-void ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_bnez)(p, reg, 0);
+ uasm_i_bnez(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
+UASM_EXPORT_SYMBOL(uasm_il_bnez);
-void ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_bgezl)(p, reg, 0);
+ uasm_i_bgezl(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
+UASM_EXPORT_SYMBOL(uasm_il_bgezl);
-void ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg,
int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_bgez)(p, reg, 0);
+ uasm_i_bgez(p, reg, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
+UASM_EXPORT_SYMBOL(uasm_il_bgez);
-void ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
+ uasm_i_bbit0(p, reg, bit, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
+UASM_EXPORT_SYMBOL(uasm_il_bbit0);
-void ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
+void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
unsigned int bit, int lid)
{
uasm_r_mips_pc16(r, *p, lid);
- ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
+ uasm_i_bbit1(p, reg, bit, 0);
}
-UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1));
+UASM_EXPORT_SYMBOL(uasm_il_bbit1);
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 54f56d5a96c4..b0f9b188e833 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -145,56 +145,6 @@ static irqreturn_t corehi_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_MIPS_MT_SMP
-
-#define MIPS_CPU_IPI_RESCHED_IRQ 0 /* SW int 0 for resched */
-#define C_RESCHED C_SW0
-#define MIPS_CPU_IPI_CALL_IRQ 1 /* SW int 1 for resched */
-#define C_CALL C_SW1
-static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
-
-static void ipi_resched_dispatch(void)
-{
- do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
-}
-
-static void ipi_call_dispatch(void)
-{
- do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
-}
-
-static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
-{
-#ifdef CONFIG_MIPS_VPE_APSP_API_CMP
- if (aprp_hook)
- aprp_hook();
-#endif
-
- scheduler_ipi();
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
-{
- generic_smp_call_function_interrupt();
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction irq_resched = {
- .handler = ipi_resched_interrupt,
- .flags = IRQF_PERCPU,
- .name = "IPI_resched"
-};
-
-static struct irqaction irq_call = {
- .handler = ipi_call_interrupt,
- .flags = IRQF_PERCPU,
- .name = "IPI_call"
-};
-#endif /* CONFIG_MIPS_MT_SMP */
-
static struct irqaction corehi_irqaction = {
.handler = corehi_handler,
.name = "CoreHi",
@@ -222,12 +172,6 @@ static msc_irqmap_t msc_eicirqmap[] __initdata = {
static int msc_nr_eicirqs __initdata = ARRAY_SIZE(msc_eicirqmap);
-void __init arch_init_ipiirq(int irq, struct irqaction *action)
-{
- setup_irq(irq, action);
- irq_set_handler(irq, handle_percpu_irq);
-}
-
void __init arch_init_irq(void)
{
int corehi_irq;
@@ -273,30 +217,11 @@ void __init arch_init_irq(void)
if (gic_present) {
corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
+ } else if (cpu_has_veic) {
+ set_vi_handler(MSC01E_INT_COREHI, corehi_irqdispatch);
+ corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI;
} else {
-#if defined(CONFIG_MIPS_MT_SMP)
- /* set up ipi interrupts */
- if (cpu_has_veic) {
- set_vi_handler (MSC01E_INT_SW0, ipi_resched_dispatch);
- set_vi_handler (MSC01E_INT_SW1, ipi_call_dispatch);
- cpu_ipi_resched_irq = MSC01E_INT_SW0;
- cpu_ipi_call_irq = MSC01E_INT_SW1;
- } else {
- cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE +
- MIPS_CPU_IPI_RESCHED_IRQ;
- cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE +
- MIPS_CPU_IPI_CALL_IRQ;
- }
- arch_init_ipiirq(cpu_ipi_resched_irq, &irq_resched);
- arch_init_ipiirq(cpu_ipi_call_irq, &irq_call);
-#endif
- if (cpu_has_veic) {
- set_vi_handler(MSC01E_INT_COREHI,
- corehi_irqdispatch);
- corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI;
- } else {
- corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
- }
+ corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
}
setup_irq(corehi_irq, &corehi_irqaction);
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 49a2e2226fee..44b925005dd3 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -365,6 +365,12 @@ static inline void emit_half_load(unsigned int reg, unsigned int base,
emit_instr(ctx, lh, reg, offset, base);
}
+static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base,
+ unsigned int offset, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, lhu, reg, offset, base);
+}
+
static inline void emit_mul(unsigned int dst, unsigned int src1,
unsigned int src2, struct jit_ctx *ctx)
{
@@ -526,7 +532,8 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
u32 sflags, tmp_flags;
/* Adjust the stack pointer */
- emit_stack_offset(-align_sp(offset), ctx);
+ if (offset)
+ emit_stack_offset(-align_sp(offset), ctx);
tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
/* sflags is essentially a bitmap */
@@ -578,7 +585,8 @@ static void restore_bpf_jit_regs(struct jit_ctx *ctx,
emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
/* Restore the sp and discard the scrach memory */
- emit_stack_offset(align_sp(offset), ctx);
+ if (offset)
+ emit_stack_offset(align_sp(offset), ctx);
}
static unsigned int get_stack_depth(struct jit_ctx *ctx)
@@ -625,8 +633,14 @@ static void build_prologue(struct jit_ctx *ctx)
if (ctx->flags & SEEN_X)
emit_jit_reg_move(r_X, r_zero, ctx);
- /* Do not leak kernel data to userspace */
- if (bpf_needs_clear_a(&ctx->skf->insns[0]))
+ /*
+ * Do not leak kernel data to userspace, we only need to clear
+ * r_A if it is ever used. In fact if it is never used, we
+ * will not save/restore it, so clearing it in this case would
+ * corrupt the state of the caller.
+ */
+ if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
+ (ctx->flags & SEEN_A))
emit_jit_reg_move(r_A, r_zero, ctx);
}
@@ -1112,6 +1126,8 @@ jmp_cmp:
break;
case BPF_ANC | SKF_AD_IFINDEX:
/* A = skb->dev->ifindex */
+ case BPF_ANC | SKF_AD_HATYPE:
+ /* A = skb->dev->type */
ctx->flags |= SEEN_SKB | SEEN_A;
off = offsetof(struct sk_buff, dev);
/* Load *dev pointer */
@@ -1120,10 +1136,15 @@ jmp_cmp:
emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
b_imm(prog->len, ctx), ctx);
emit_reg_move(r_ret, r_zero, ctx);
- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
- ifindex) != 4);
- off = offsetof(struct net_device, ifindex);
- emit_load(r_A, r_s0, off, ctx);
+ if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
+ off = offsetof(struct net_device, ifindex);
+ emit_load(r_A, r_s0, off, ctx);
+ } else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
+ off = offsetof(struct net_device, type);
+ emit_half_load_unsigned(r_A, r_s0, off, ctx);
+ }
break;
case BPF_ANC | SKF_AD_MARK:
ctx->flags |= SEEN_SKB | SEEN_A;
@@ -1143,7 +1164,7 @@ jmp_cmp:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
vlan_tci) != 2);
off = offsetof(struct sk_buff, vlan_tci);
- emit_half_load(r_s0, r_skb, off, ctx);
+ emit_half_load_unsigned(r_s0, r_skb, off, ctx);
if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
} else {
@@ -1170,7 +1191,7 @@ jmp_cmp:
BUILD_BUG_ON(offsetof(struct sk_buff,
queue_mapping) > 0xff);
off = offsetof(struct sk_buff, queue_mapping);
- emit_half_load(r_A, r_skb, off, ctx);
+ emit_half_load_unsigned(r_A, r_skb, off, ctx);
break;
default:
pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
index 5d2e0c8d29c0..88a2075305d1 100644
--- a/arch/mips/net/bpf_jit_asm.S
+++ b/arch/mips/net/bpf_jit_asm.S
@@ -90,18 +90,14 @@ FEXPORT(sk_load_half_positive)
is_offset_in_header(2, half)
/* Offset within header boundaries */
PTR_ADDU t1, $r_skb_data, offset
- .set reorder
- lh $r_A, 0(t1)
- .set noreorder
+ lhu $r_A, 0(t1)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
- wsbh t0, $r_A
- seh $r_A, t0
+ wsbh $r_A, $r_A
# else
- sll t0, $r_A, 24
- andi t1, $r_A, 0xff00
- sra t0, t0, 16
- srl t1, t1, 8
+ sll t0, $r_A, 8
+ srl t1, $r_A, 8
+ andi t0, t0, 0xff00
or $r_A, t0, t1
# endif
#endif
@@ -115,7 +111,7 @@ FEXPORT(sk_load_byte_positive)
is_offset_in_header(1, byte)
/* Offset within header boundaries */
PTR_ADDU t1, $r_skb_data, offset
- lb $r_A, 0(t1)
+ lbu $r_A, 0(t1)
jr $r_ra
move $r_ret, zero
END(sk_load_byte)
@@ -139,6 +135,11 @@ FEXPORT(sk_load_byte_positive)
* (void *to) is returned in r_s0
*
*/
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define DS_OFFSET(SIZE) (4 * SZREG)
+#else
+#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
+#endif
#define bpf_slow_path_common(SIZE) \
/* Quick check. Are we within reasonable boundaries? */ \
LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
@@ -150,7 +151,7 @@ FEXPORT(sk_load_byte_positive)
PTR_LA t0, skb_copy_bits; \
PTR_S $r_ra, (5 * SZREG)($r_sp); \
/* Assign low slot to a2 */ \
- move a2, $r_sp; \
+ PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
jalr t0; \
/* Reset our destination slot (DS but it's ok) */ \
INT_S zero, (4 * SZREG)($r_sp); \
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 9f672ceb089b..ad3584dbc9d7 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -679,7 +679,7 @@ static void __cvmx_increment_ba(union cvmx_sli_mem_access_subidx *pmas)
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
pmas->cn68xx.ba++;
else
- pmas->cn63xx.ba++;
+ pmas->s.ba++;
}
/**
@@ -1351,7 +1351,7 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
mem_access_subid.cn68xx.ba = 0;
else
- mem_access_subid.cn63xx.ba = 0;
+ mem_access_subid.s.ba = 0;
/*
* Setup mem access 12-15 for port 0, 16-19 for port 1,
diff --git a/arch/mips/sibyte/bcm1480/setup.c b/arch/mips/sibyte/bcm1480/setup.c
index a05246cbf54c..2035aaec8514 100644
--- a/arch/mips/sibyte/bcm1480/setup.c
+++ b/arch/mips/sibyte/bcm1480/setup.c
@@ -36,6 +36,7 @@ unsigned int soc_pass;
unsigned int soc_type;
EXPORT_SYMBOL(soc_type);
unsigned int periph_rev;
+EXPORT_SYMBOL_GPL(periph_rev);
unsigned int zbbus_mhz;
EXPORT_SYMBOL(zbbus_mhz);
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index 90e43782342b..aa7713adfa58 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -34,6 +34,7 @@ unsigned int soc_pass;
unsigned int soc_type;
EXPORT_SYMBOL(soc_type);
unsigned int periph_rev;
+EXPORT_SYMBOL_GPL(periph_rev);
unsigned int zbbus_mhz;
EXPORT_SYMBOL(zbbus_mhz);
diff --git a/arch/mn10300/include/asm/processor.h b/arch/mn10300/include/asm/processor.h
index 18e17abf7664..3ae479117b42 100644
--- a/arch/mn10300/include/asm/processor.h
+++ b/arch/mn10300/include/asm/processor.h
@@ -132,11 +132,6 @@ static inline void start_thread(struct pt_regs *regs,
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
-/*
- * Return saved PC of a blocked thread.
- */
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(task) ((task)->thread.uregs)
diff --git a/arch/mn10300/include/uapi/asm/Kbuild b/arch/mn10300/include/uapi/asm/Kbuild
index 040178cdb3eb..b15bf6bc0e94 100644
--- a/arch/mn10300/include/uapi/asm/Kbuild
+++ b/arch/mn10300/include/uapi/asm/Kbuild
@@ -1,34 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index c9fa42619c6a..89e8027e07fb 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -40,14 +40,6 @@
#include "internal.h"
/*
- * return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- return ((unsigned long *) tsk->thread.sp)[3];
-}
-
-/*
* power off function, if any
*/
void (*pm_power_off)(void);
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 51a56c8b04b4..a72d5f0de692 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -6,6 +6,8 @@ config NIOS2
select GENERIC_CPU_DEVICES
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_KGDB
select IRQ_DOMAIN
diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug
index 2fd08cbfdddb..55105220370c 100644
--- a/arch/nios2/Kconfig.debug
+++ b/arch/nios2/Kconfig.debug
@@ -18,7 +18,6 @@ config EARLY_PRINTK
bool "Activate early kernel debugging"
default y
select SERIAL_CORE_CONSOLE
- depends on SERIAL_ALTERA_JTAGUART_CONSOLE || SERIAL_ALTERA_UART_CONSOLE
help
Enable early printk on console
This is useful for kernel debugging when your machine crashes very
diff --git a/arch/nios2/Makefile b/arch/nios2/Makefile
index e74afc12d516..8673a79dca9c 100644
--- a/arch/nios2/Makefile
+++ b/arch/nios2/Makefile
@@ -22,10 +22,15 @@ export MMU
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
+KBUILD_AFLAGS += -march=r$(CONFIG_NIOS2_ARCH_REVISION)
+
KBUILD_CFLAGS += -pipe -D__linux__ -D__ELF__
+KBUILD_CFLAGS += -march=r$(CONFIG_NIOS2_ARCH_REVISION)
KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_HW_MUL_SUPPORT),-mhw-mul,-mno-hw-mul)
KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_HW_MULX_SUPPORT),-mhw-mulx,-mno-hw-mulx)
KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_HW_DIV_SUPPORT),-mhw-div,-mno-hw-div)
+KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_BMX_SUPPORT),-mbmx,-mno-bmx)
+KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_CDX_SUPPORT),-mcdx,-mno-cdx)
KBUILD_CFLAGS += $(if $(CONFIG_NIOS2_FPU_SUPPORT),-mcustom-fpu-cfg=60-1,)
KBUILD_CFLAGS += -fno-optimize-sibling-calls
diff --git a/arch/nios2/boot/.gitignore b/arch/nios2/boot/.gitignore
new file mode 100644
index 000000000000..109279ca5a4d
--- /dev/null
+++ b/arch/nios2/boot/.gitignore
@@ -0,0 +1,2 @@
+*.dtb
+vmImage
diff --git a/arch/nios2/boot/dts/10m50_devboard.dts b/arch/nios2/boot/dts/10m50_devboard.dts
index f362b2224ee7..4bb4dc1b52e9 100644
--- a/arch/nios2/boot/dts/10m50_devboard.dts
+++ b/arch/nios2/boot/dts/10m50_devboard.dts
@@ -244,6 +244,7 @@
};
chosen {
- bootargs = "debug console=ttyS0,115200";
+ bootargs = "debug earlycon console=ttyS0,115200";
+ stdout-path = &a_16550_uart_0;
};
};
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 87e70f2b463f..727dbb333f60 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += bitsperlong.h
generic-y += bug.h
generic-y += bugs.h
generic-y += clkdev.h
+generic-y += cmpxchg.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h
diff --git a/arch/nios2/include/asm/cacheflush.h b/arch/nios2/include/asm/cacheflush.h
index 52abba973dc2..55e383c173f7 100644
--- a/arch/nios2/include/asm/cacheflush.h
+++ b/arch/nios2/include/asm/cacheflush.h
@@ -46,7 +46,9 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
extern void flush_dcache_range(unsigned long start, unsigned long end);
extern void invalidate_dcache_range(unsigned long start, unsigned long end);
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) \
+ spin_lock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_unlock(mapping) \
+ spin_unlock_irq(&(mapping)->tree_lock)
#endif /* _ASM_NIOS2_CACHEFLUSH_H */
diff --git a/arch/nios2/include/asm/cmpxchg.h b/arch/nios2/include/asm/cmpxchg.h
deleted file mode 100644
index a7978f14d157..000000000000
--- a/arch/nios2/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * Copyright (C) 2004 Microtronix Datacom Ltd.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_NIOS2_CMPXCHG_H
-#define _ASM_NIOS2_CMPXCHG_H
-
-#include <asm-generic/cmpxchg.h>
-
-#endif /* _ASM_NIOS2_CMPXCHG_H */
diff --git a/arch/nios2/include/asm/cpuinfo.h b/arch/nios2/include/asm/cpuinfo.h
index 348bb228fec9..dbdaf96f28d4 100644
--- a/arch/nios2/include/asm/cpuinfo.h
+++ b/arch/nios2/include/asm/cpuinfo.h
@@ -29,6 +29,8 @@ struct cpuinfo {
bool has_div;
bool has_mul;
bool has_mulx;
+ bool has_bmx;
+ bool has_cdx;
/* CPU caches */
u32 icache_line_size;
diff --git a/arch/nios2/include/asm/processor.h b/arch/nios2/include/asm/processor.h
index 3bbbc3d798e5..4944e2e1d8b0 100644
--- a/arch/nios2/include/asm/processor.h
+++ b/arch/nios2/include/asm/processor.h
@@ -75,9 +75,6 @@ static inline void release_thread(struct task_struct *dead_task)
{
}
-/* Return saved PC of a blocked thread. */
-#define thread_saved_pc(tsk) ((tsk)->thread.kregs->ea)
-
extern unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(p) \
diff --git a/arch/nios2/include/asm/prom.h b/arch/nios2/include/asm/prom.h
deleted file mode 100644
index 75fffb42cfa5..000000000000
--- a/arch/nios2/include/asm/prom.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright Altera Corporation (C) <2015>. All rights reserved
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ASM_NIOS2_PROM_H__
-#define __ASM_NIOS2_PROM_H__
-
-extern unsigned long __init of_early_console(void);
-
-#endif
diff --git a/arch/nios2/include/asm/setup.h b/arch/nios2/include/asm/setup.h
index dcbf8cf1a344..ac9bff248e6d 100644
--- a/arch/nios2/include/asm/setup.h
+++ b/arch/nios2/include/asm/setup.h
@@ -30,8 +30,6 @@ extern char fast_handler_end[];
extern void pagetable_init(void);
-extern void setup_early_printk(void);
-
#endif/* __KERNEL__ */
#endif /* __ASSEMBLY__ */
diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h
index 727bd9504899..dfa3c7cb30b4 100644
--- a/arch/nios2/include/asm/uaccess.h
+++ b/arch/nios2/include/asm/uaccess.h
@@ -42,6 +42,8 @@
# define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n"
+#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
+
/*
* Zero Userspace
*/
@@ -81,8 +83,9 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
#define INLINE_COPY_TO_USER
extern long strncpy_from_user(char *__to, const char __user *__from,
- long __len);
-extern long strnlen_user(const char __user *s, long n);
+ long __len);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *s, long n);
/* Optimized macros */
#define __get_user_asm(val, insn, addr, err) \
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index e0bb972a50d7..374bd123329f 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -1,5 +1,5 @@
+# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += elf.h
-
+generic-y += setup.h
generic-y += ucontext.h
diff --git a/arch/nios2/kernel/.gitignore b/arch/nios2/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/nios2/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/nios2/kernel/Makefile b/arch/nios2/kernel/Makefile
index 1aae25703657..06d07432b38d 100644
--- a/arch/nios2/kernel/Makefile
+++ b/arch/nios2/kernel/Makefile
@@ -20,7 +20,6 @@ obj-y += syscall_table.o
obj-y += time.o
obj-y += traps.o
-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_NIOS2_ALIGNMENT_TRAP) += misaligned.o
diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c
index 1cccc36877bc..93207718bb22 100644
--- a/arch/nios2/kernel/cpuinfo.c
+++ b/arch/nios2/kernel/cpuinfo.c
@@ -67,6 +67,8 @@ void __init setup_cpuinfo(void)
cpuinfo.has_div = of_property_read_bool(cpu, "altr,has-div");
cpuinfo.has_mul = of_property_read_bool(cpu, "altr,has-mul");
cpuinfo.has_mulx = of_property_read_bool(cpu, "altr,has-mulx");
+ cpuinfo.has_bmx = of_property_read_bool(cpu, "altr,has-bmx");
+ cpuinfo.has_cdx = of_property_read_bool(cpu, "altr,has-cdx");
cpuinfo.mmu = of_property_read_bool(cpu, "altr,has-mmu");
if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div)
@@ -78,6 +80,12 @@ void __init setup_cpuinfo(void)
if (IS_ENABLED(CONFIG_NIOS2_HW_MULX_SUPPORT) && !cpuinfo.has_mulx)
err_cpu("MULX");
+ if (IS_ENABLED(CONFIG_NIOS2_BMX_SUPPORT) && !cpuinfo.has_bmx)
+ err_cpu("BMX");
+
+ if (IS_ENABLED(CONFIG_NIOS2_CDX_SUPPORT) && !cpuinfo.has_cdx)
+ err_cpu("CDX");
+
cpuinfo.tlb_num_ways = fcpu(cpu, "altr,tlb-num-ways");
if (!cpuinfo.tlb_num_ways)
panic("altr,tlb-num-ways can't be 0. Please check your hardware "
@@ -125,12 +133,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m,
"CPU:\t\tNios II/%s\n"
+ "REV:\t\t%i\n"
"MMU:\t\t%s\n"
"FPU:\t\tnone\n"
"Clocking:\t%u.%02u MHz\n"
"BogoMips:\t%lu.%02lu\n"
"Calibration:\t%lu loops\n",
cpuinfo.cpu_impl,
+ CONFIG_NIOS2_ARCH_REVISION,
cpuinfo.mmu ? "present" : "none",
clockfreq / 1000000, (clockfreq / 100000) % 10,
(loops_per_jiffy * HZ) / 500000,
@@ -141,10 +151,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"HW:\n"
" MUL:\t\t%s\n"
" MULX:\t\t%s\n"
- " DIV:\t\t%s\n",
+ " DIV:\t\t%s\n"
+ " BMX:\t\t%s\n"
+ " CDX:\t\t%s\n",
cpuinfo.has_mul ? "yes" : "no",
cpuinfo.has_mulx ? "yes" : "no",
- cpuinfo.has_div ? "yes" : "no");
+ cpuinfo.has_div ? "yes" : "no",
+ cpuinfo.has_bmx ? "yes" : "no",
+ cpuinfo.has_cdx ? "yes" : "no");
seq_printf(m,
"Icache:\t\t%ukB, line length: %u\n",
diff --git a/arch/nios2/kernel/early_printk.c b/arch/nios2/kernel/early_printk.c
deleted file mode 100644
index c08e4c1486fc..000000000000
--- a/arch/nios2/kernel/early_printk.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Early printk for Nios2.
- *
- * Copyright (C) 2015, Altera Corporation
- * Copyright (C) 2010, Tobias Klauser <tklauser@distanz.ch>
- * Copyright (C) 2009, Wind River Systems Inc
- * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/console.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/io.h>
-
-#include <asm/prom.h>
-
-static unsigned long base_addr;
-
-#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE)
-
-#define ALTERA_JTAGUART_DATA_REG 0
-#define ALTERA_JTAGUART_CONTROL_REG 4
-#define ALTERA_JTAGUART_CONTROL_WSPACE_MSK 0xFFFF0000
-#define ALTERA_JTAGUART_CONTROL_AC_MSK 0x00000400
-
-#define JUART_GET_CR() \
- __builtin_ldwio((void *)(base_addr + ALTERA_JTAGUART_CONTROL_REG))
-#define JUART_SET_CR(v) \
- __builtin_stwio((void *)(base_addr + ALTERA_JTAGUART_CONTROL_REG), v)
-#define JUART_SET_TX(v) \
- __builtin_stwio((void *)(base_addr + ALTERA_JTAGUART_DATA_REG), v)
-
-static void early_console_write(struct console *con, const char *s, unsigned n)
-{
- unsigned long status;
-
- while (n-- && *s) {
- while (((status = JUART_GET_CR())
- & ALTERA_JTAGUART_CONTROL_WSPACE_MSK) == 0) {
-#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS)
- if ((status & ALTERA_JTAGUART_CONTROL_AC_MSK) == 0)
- return; /* no connection activity */
-#endif
- }
- JUART_SET_TX(*s);
- s++;
- }
-}
-
-#elif defined(CONFIG_SERIAL_ALTERA_UART_CONSOLE)
-
-#define ALTERA_UART_TXDATA_REG 4
-#define ALTERA_UART_STATUS_REG 8
-#define ALTERA_UART_STATUS_TRDY 0x0040
-
-#define UART_GET_SR() \
- __builtin_ldwio((void *)(base_addr + ALTERA_UART_STATUS_REG))
-#define UART_SET_TX(v) \
- __builtin_stwio((void *)(base_addr + ALTERA_UART_TXDATA_REG), v)
-
-static void early_console_putc(char c)
-{
- while (!(UART_GET_SR() & ALTERA_UART_STATUS_TRDY))
- ;
-
- UART_SET_TX(c);
-}
-
-static void early_console_write(struct console *con, const char *s, unsigned n)
-{
- while (n-- && *s) {
- early_console_putc(*s);
- if (*s == '\n')
- early_console_putc('\r');
- s++;
- }
-}
-
-#else
-# error Neither SERIAL_ALTERA_JTAGUART_CONSOLE nor SERIAL_ALTERA_UART_CONSOLE \
-selected
-#endif
-
-static struct console early_console_prom = {
- .name = "early",
- .write = early_console_write,
- .flags = CON_PRINTBUFFER | CON_BOOT,
- .index = -1
-};
-
-void __init setup_early_printk(void)
-{
-#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE) || \
- defined(CONFIG_SERIAL_ALTERA_UART_CONSOLE)
- base_addr = of_early_console();
-#else
- base_addr = 0;
-#endif
-
- if (!base_addr)
- return;
-
-#if defined(CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE_BYPASS)
- /* Clear activity bit so BYPASS doesn't stall if we've used JTAG for
- * downloading the kernel. This might cause early data to be lost even
- * if the JTAG terminal is running.
- */
- JUART_SET_CR(JUART_GET_CR() | ALTERA_JTAGUART_CONTROL_AC_MSK);
-#endif
-
- early_console = &early_console_prom;
- register_console(early_console);
- pr_info("early_console initialized at 0x%08lx\n", base_addr);
-}
diff --git a/arch/nios2/kernel/irq.c b/arch/nios2/kernel/irq.c
index f5b74ae69b5b..6c833a9d4eab 100644
--- a/arch/nios2/kernel/irq.c
+++ b/arch/nios2/kernel/irq.c
@@ -67,7 +67,7 @@ static int irq_map(struct irq_domain *h, unsigned int virq,
return 0;
}
-static struct irq_domain_ops irq_ops = {
+static const struct irq_domain_ops irq_ops = {
.map = irq_map,
.xlate = irq_domain_xlate_onecell,
};
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index 3901b80d4420..6688576b3a47 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -30,7 +30,6 @@
#include <linux/of_fdt.h>
#include <linux/io.h>
-#include <asm/prom.h>
#include <asm/sections.h>
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
@@ -71,51 +70,3 @@ void __init early_init_devtree(void *params)
early_init_dt_scan(params);
}
-
-#ifdef CONFIG_EARLY_PRINTK
-static int __init early_init_dt_scan_serial(unsigned long node,
- const char *uname, int depth, void *data)
-{
- u64 *addr64 = (u64 *) data;
- const char *p;
-
- /* only consider serial nodes */
- if (strncmp(uname, "serial", 6) != 0)
- return 0;
-
- p = of_get_flat_dt_prop(node, "compatible", NULL);
- if (!p)
- return 0;
-
- /*
- * We found an altera_jtaguart but it wasn't configured for console, so
- * skip it.
- */
-#ifndef CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE
- if (strncmp(p, "altr,juart", 10) == 0)
- return 0;
-#endif
-
- /*
- * Same for altera_uart.
- */
-#ifndef CONFIG_SERIAL_ALTERA_UART_CONSOLE
- if (strncmp(p, "altr,uart", 9) == 0)
- return 0;
-#endif
-
- *addr64 = of_flat_dt_translate_address(node);
-
- return *addr64 == OF_BAD_ADDR ? 0 : 1;
-}
-
-unsigned long __init of_early_console(void)
-{
- u64 base = 0;
-
- if (of_scan_flat_dt(early_init_dt_scan_serial, &base))
- return (u32)ioremap(base, 32);
- else
- return 0;
-}
-#endif /* CONFIG_EARLY_PRINTK */
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index 6044d9be28b4..926a02b17b31 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -137,6 +137,8 @@ asmlinkage void __init nios2_boot_init(unsigned r4, unsigned r5, unsigned r6,
strncpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif
#endif
+
+ parse_early_param();
}
void __init setup_arch(char **cmdline_p)
@@ -145,10 +147,6 @@ void __init setup_arch(char **cmdline_p)
console_verbose();
-#ifdef CONFIG_EARLY_PRINTK
- setup_early_printk();
-#endif
-
memory_start = PAGE_ALIGN((unsigned long)__pa(_end));
memory_end = (unsigned long) CONFIG_NIOS2_MEM_BASE + memory_size;
diff --git a/arch/nios2/mm/uaccess.c b/arch/nios2/mm/uaccess.c
index 804983317766..34f10af8ea40 100644
--- a/arch/nios2/mm/uaccess.c
+++ b/arch/nios2/mm/uaccess.c
@@ -128,36 +128,3 @@ asm(
".word 12b,13b\n"
".previous\n");
EXPORT_SYMBOL(raw_copy_to_user);
-
-long strncpy_from_user(char *__to, const char __user *__from, long __len)
-{
- int l = strnlen_user(__from, __len);
- int is_zt = 1;
-
- if (l > __len) {
- is_zt = 0;
- l = __len;
- }
-
- if (l == 0 || copy_from_user(__to, __from, l))
- return -EFAULT;
-
- if (is_zt)
- l--;
- return l;
-}
-
-long strnlen_user(const char __user *s, long n)
-{
- long i;
-
- for (i = 0; i < n; i++) {
- char c;
-
- if (get_user(c, s + i) == -EFAULT)
- return 0;
- if (c == 0)
- return i + 1;
- }
- return n + 1;
-}
diff --git a/arch/nios2/platform/Kconfig.platform b/arch/nios2/platform/Kconfig.platform
index d3e5df9fb36b..74c1aaf588b8 100644
--- a/arch/nios2/platform/Kconfig.platform
+++ b/arch/nios2/platform/Kconfig.platform
@@ -52,6 +52,14 @@ config NIOS2_DTB_SOURCE
comment "Nios II instructions"
+config NIOS2_ARCH_REVISION
+ int "Select Nios II architecture revision"
+ range 1 2
+ default 1
+ help
+ Select between Nios II R1 and Nios II R2 . The architectures
+ are binary incompatible. Default is R1 .
+
config NIOS2_HW_MUL_SUPPORT
bool "Enable MUL instruction"
default n
@@ -73,6 +81,24 @@ config NIOS2_HW_DIV_SUPPORT
Set to true if you configured the Nios II to include the DIV
instruction. Enables the -mhw-div compiler flag.
+config NIOS2_BMX_SUPPORT
+ bool "Enable BMX instructions"
+ depends on NIOS2_ARCH_REVISION = 2
+ default n
+ help
+ Set to true if you configured the Nios II R2 to include
+ the BMX Bit Manipulation Extension instructions. Enables
+ the -mbmx compiler flag.
+
+config NIOS2_CDX_SUPPORT
+ bool "Enable CDX instructions"
+ depends on NIOS2_ARCH_REVISION = 2
+ default n
+ help
+ Set to true if you configured the Nios II R2 to include
+ the CDX Bit Manipulation Extension instructions. Enables
+ the -mcdx compiler flag.
+
config NIOS2_FPU_SUPPORT
bool "Custom floating point instr support"
default n
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index df8e2f7bc7dd..fdbcf0bf44a4 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -1,6 +1,3 @@
-
-header-y += ucontext.h
-
generic-y += auxvec.h
generic-y += barrier.h
generic-y += bitsperlong.h
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
index a908e6c30a00..396d8f306c21 100644
--- a/arch/openrisc/include/asm/processor.h
+++ b/arch/openrisc/include/asm/processor.h
@@ -84,11 +84,6 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
-/*
- * Return saved PC of a blocked thread. For now, this is the "user" PC
- */
-extern unsigned long thread_saved_pc(struct task_struct *t);
-
#define init_stack (init_thread_union.stack)
#define cpu_relax() barrier()
diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild
index 80761eb82b5f..b15bf6bc0e94 100644
--- a/arch/openrisc/include/uapi/asm/Kbuild
+++ b/arch/openrisc/include/uapi/asm/Kbuild
@@ -1,10 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += byteorder.h
-header-y += elf.h
-header-y += kvm_para.h
-header-y += param.h
-header-y += ptrace.h
-header-y += sigcontext.h
-header-y += unistd.h
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index f8da545854f9..f9b77003f113 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -110,11 +110,6 @@ void show_regs(struct pt_regs *regs)
show_registers(regs);
}
-unsigned long thread_saved_pc(struct task_struct *t)
-{
- return (unsigned long)user_regs(t->stack)->pc;
-}
-
void release_thread(struct task_struct *dead_task)
{
}
@@ -167,8 +162,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
top_of_kernel_stack = sp;
- p->set_child_tid = p->clear_child_tid = NULL;
-
/* Locate userspace context on stack... */
sp -= STACK_FRAME_OVERHEAD; /* redzone */
sp -= sizeof(struct pt_regs);
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index ea4e6ae091d0..b3b66c3d6f3c 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -165,12 +165,7 @@ struct thread_struct {
.flags = 0 \
}
-/*
- * Return saved PC of a blocked thread. This is used by ps mostly.
- */
-
struct task_struct;
-unsigned long thread_saved_pc(struct task_struct *t);
void show_trace(struct task_struct *task, unsigned long *stack);
/*
diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild
index 348356c99514..3971c60a7e7f 100644
--- a/arch/parisc/include/uapi/asm/Kbuild
+++ b/arch/parisc/include/uapi/asm/Kbuild
@@ -2,31 +2,3 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += resource.h
-
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += pdc.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 4516a5b53f38..b64d7d21646e 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -239,11 +239,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
return 0;
}
-unsigned long thread_saved_pc(struct task_struct *t)
-{
- return t->thread.regs.kpc;
-}
-
unsigned long
get_wchan(struct task_struct *p)
{
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index e5288638a1d9..378a754ca186 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -90,7 +90,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
unsigned long task_size = TASK_SIZE;
int do_color_align, last_mmap;
struct vm_unmapped_area_info info;
@@ -117,9 +117,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
else
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+ vma = find_vma_prev(mm, addr, &prev);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
goto found_addr;
}
@@ -143,7 +144,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
- struct vm_area_struct *vma;
+ struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_color_align, last_mmap;
@@ -177,9 +178,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
else
addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
+
+ vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)) &&
+ (!prev || addr >= vm_end_gap(prev)))
goto found_addr;
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d8834e8bfb05..6189238e69f8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -146,6 +146,7 @@ config PPC
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
@@ -183,7 +184,7 @@ config PPC
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
- select HAVE_GENERIC_RCU_GUP
+ select HAVE_GENERIC_GUP
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
select HAVE_IDE
select HAVE_IOREMAP_PROT
@@ -1198,11 +1199,6 @@ source "arch/powerpc/Kconfig.debug"
source "security/Kconfig"
-config KEYS_COMPAT
- bool
- depends on COMPAT && KEYS
- default y
-
source "crypto/Kconfig"
config PPC_LIB_RHEAP
diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink
index 3c22d64b2de9..eccfcc88afae 100644
--- a/arch/powerpc/Makefile.postlink
+++ b/arch/powerpc/Makefile.postlink
@@ -7,7 +7,7 @@
PHONY := __archpost
__archpost:
-include include/config/auto.conf
+-include include/config/auto.conf
include scripts/Kbuild.include
quiet_cmd_relocs_check = CHKREL $@
diff --git a/arch/powerpc/boot/dts/include/dt-bindings b/arch/powerpc/boot/dts/include/dt-bindings
deleted file mode 120000
index 08c00e4972fa..000000000000
--- a/arch/powerpc/boot/dts/include/dt-bindings
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index b4b5e6b671ca..0c4e470571ca 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -8,7 +8,7 @@
#define H_PTE_INDEX_SIZE 9
#define H_PMD_INDEX_SIZE 7
#define H_PUD_INDEX_SIZE 9
-#define H_PGD_INDEX_SIZE 12
+#define H_PGD_INDEX_SIZE 9
#ifndef __ASSEMBLY__
#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 214219dff87c..9732837aaae8 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -2,9 +2,9 @@
#define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
#define H_PTE_INDEX_SIZE 8
-#define H_PMD_INDEX_SIZE 5
-#define H_PUD_INDEX_SIZE 5
-#define H_PGD_INDEX_SIZE 15
+#define H_PMD_INDEX_SIZE 10
+#define H_PUD_INDEX_SIZE 7
+#define H_PGD_INDEX_SIZE 8
/*
* 64k aligned address free up few of the lower bits of RPN for us
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index f2c562a0a427..0151af6c2a50 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -104,7 +104,7 @@
"1: "PPC_TLNEI" %4,0\n" \
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \
- "i" (BUGFLAG_TAINT(TAINT_WARN)), \
+ "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\
"i" (sizeof(struct bug_entry)), \
"r" (__ret_warn_on)); \
} \
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
index 8ee4211ca0c6..14ad37865000 100644
--- a/arch/powerpc/include/asm/cpm1.h
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -560,6 +560,8 @@ typedef struct risc_timer_pram {
#define CPM_PIN_SECONDARY 2
#define CPM_PIN_GPIO 4
#define CPM_PIN_OPENDRAIN 8
+#define CPM_PIN_FALLEDGE 16
+#define CPM_PIN_ANYEDGE 0
enum cpm_port {
CPM_PORTA,
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index 6e834caa3720..0d1df02bf99d 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_POWERPC_CPUFEATURES_H
-#define __ASM_POWERPC_CPUFEATURES_H
+#ifndef __ASM_POWERPC_CPU_HAS_FEATURE_H
+#define __ASM_POWERPC_CPU_HAS_FEATURE_H
#ifndef __ASSEMBLY__
@@ -52,4 +52,4 @@ static inline bool cpu_has_feature(unsigned long feature)
#endif
#endif /* __ASSEMBLY__ */
-#endif /* __ASM_POWERPC_CPUFEATURE_H */
+#endif /* __ASM_POWERPC_CPU_HAS_FEATURE_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 1f6847b107e4..d02ad93bf708 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -118,7 +118,9 @@ extern struct cpu_spec *cur_cpu_spec;
extern unsigned int __start___ftr_fixup, __stop___ftr_fixup;
+extern void set_cur_cpu_spec(struct cpu_spec *s);
extern struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr);
+extern void identify_cpu_name(unsigned int pvr);
extern void do_feature_fixups(unsigned long value, void *fixup_start,
void *fixup_end);
@@ -212,7 +214,6 @@ enum {
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
-#define CPU_FTR_SUBCORE LONG_ASM_CONST(0x2000000000000000)
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
#ifndef __ASSEMBLY__
@@ -461,7 +462,7 @@ enum {
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
- CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE)
+ CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
diff --git a/arch/powerpc/include/asm/dt_cpu_ftrs.h b/arch/powerpc/include/asm/dt_cpu_ftrs.h
new file mode 100644
index 000000000000..7a34fc11bf63
--- /dev/null
+++ b/arch/powerpc/include/asm/dt_cpu_ftrs.h
@@ -0,0 +1,26 @@
+#ifndef __ASM_POWERPC_DT_CPU_FTRS_H
+#define __ASM_POWERPC_DT_CPU_FTRS_H
+
+/*
+ * Copyright 2017, IBM Corporation
+ * cpufeatures is the new way to discover CPU features with /cpus/features
+ * devicetree. This supersedes PVR based discovery ("cputable"), and older
+ * device tree feature advertisement.
+ */
+
+#include <linux/types.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+#include <uapi/asm/cputable.h>
+
+#ifdef CONFIG_PPC_DT_CPU_FTRS
+bool dt_cpu_ftrs_init(void *fdt);
+void dt_cpu_ftrs_scan(void);
+bool dt_cpu_ftrs_in_use(void);
+#else
+static inline bool dt_cpu_ftrs_init(void *fdt) { return false; }
+static inline void dt_cpu_ftrs_scan(void) { }
+static inline bool dt_cpu_ftrs_in_use(void) { return false; }
+#endif
+
+#endif /* __ASM_POWERPC_DT_CPU_FTRS_H */
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index a83821f33ea3..8814a7249ceb 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_handler(struct pt_regs *regs);
extern int kprobe_post_handler(struct pt_regs *regs);
+extern int is_current_kprobe_addr(unsigned long addr);
#ifdef CONFIG_KPROBES_ON_FTRACE
extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb);
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 0593d9479f74..b148496ffe36 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -111,6 +111,8 @@ struct kvmppc_host_state {
struct kvm_vcpu *kvm_vcpu;
struct kvmppc_vcore *kvm_vcore;
void __iomem *xics_phys;
+ void __iomem *xive_tima_phys;
+ void __iomem *xive_tima_virt;
u32 saved_xirr;
u64 dabr;
u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 77c60826d145..9c51ac4b8f36 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -210,6 +210,12 @@ struct kvmppc_spapr_tce_table {
/* XICS components, defined in book3s_xics.c */
struct kvmppc_xics;
struct kvmppc_icp;
+extern struct kvm_device_ops kvm_xics_ops;
+
+/* XIVE components, defined in book3s_xive.c */
+struct kvmppc_xive;
+struct kvmppc_xive_vcpu;
+extern struct kvm_device_ops kvm_xive_ops;
struct kvmppc_passthru_irqmap;
@@ -298,6 +304,7 @@ struct kvm_arch {
#endif
#ifdef CONFIG_KVM_XICS
struct kvmppc_xics *xics;
+ struct kvmppc_xive *xive;
struct kvmppc_passthru_irqmap *pimap;
#endif
struct kvmppc_ops *kvm_ops;
@@ -427,7 +434,7 @@ struct kvmppc_passthru_irqmap {
#define KVMPPC_IRQ_DEFAULT 0
#define KVMPPC_IRQ_MPIC 1
-#define KVMPPC_IRQ_XICS 2
+#define KVMPPC_IRQ_XICS 2 /* Includes a XIVE option */
#define MMIO_HPTE_CACHE_SIZE 4
@@ -454,6 +461,21 @@ struct mmio_hpte_cache {
struct openpic;
+/* W0 and W1 of a XIVE thread management context */
+union xive_tma_w01 {
+ struct {
+ u8 nsr;
+ u8 cppr;
+ u8 ipb;
+ u8 lsmfb;
+ u8 ack;
+ u8 inc;
+ u8 age;
+ u8 pipr;
+ };
+ __be64 w01;
+};
+
struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid;
@@ -714,6 +736,10 @@ struct kvm_vcpu_arch {
struct openpic *mpic; /* KVM_IRQ_MPIC */
#ifdef CONFIG_KVM_XICS
struct kvmppc_icp *icp; /* XICS presentation controller */
+ struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */
+ __be32 xive_cam_word; /* Cooked W2 in proper endian with valid bit */
+ u32 xive_pushed; /* Is the VP pushed on the physical CPU ? */
+ union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 76e940a3c145..e0d88c38602b 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -240,6 +240,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
+
extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
u32 priority);
extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
@@ -428,6 +429,14 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
paca[cpu].kvm_hstate.xics_phys = (void __iomem *)addr;
}
+static inline void kvmppc_set_xive_tima(int cpu,
+ unsigned long phys_addr,
+ void __iomem *virt_addr)
+{
+ paca[cpu].kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
+ paca[cpu].kvm_hstate.xive_tima_virt = virt_addr;
+}
+
static inline u32 kvmppc_get_xics_latch(void)
{
u32 xirr;
@@ -458,6 +467,11 @@ static inline void __init kvm_cma_reserve(void)
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{}
+static inline void kvmppc_set_xive_tima(int cpu,
+ unsigned long phys_addr,
+ void __iomem *virt_addr)
+{}
+
static inline u32 kvmppc_get_xics_latch(void)
{
return 0;
@@ -508,6 +522,10 @@ extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
struct kvmppc_irq_map *irq_map,
struct kvmppc_passthru_irqmap *pimap,
bool *again);
+
+extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+ int level, bool line_status);
+
extern int h_ipi_redirect;
#else
static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
@@ -525,6 +543,60 @@ static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{ return 0; }
#endif
+#ifdef CONFIG_KVM_XIVE
+/*
+ * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
+ * ie. P9 new interrupt controller, while the second "xive" is the legacy
+ * "eXternal Interrupt Vector Entry" which is the configuration of an
+ * interrupt on the "xics" interrupt controller on P8 and earlier. Those
+ * two function consume or produce a legacy "XIVE" state from the
+ * new "XIVE" interrupt controller.
+ */
+extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority);
+extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority);
+extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
+extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
+extern void kvmppc_xive_init_module(void);
+extern void kvmppc_xive_exit_module(void);
+
+extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu);
+extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
+extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc);
+extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc);
+extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
+
+extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+ int level, bool line_status);
+#else
+static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority) { return -1; }
+static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority) { return -1; }
+static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
+static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
+static inline void kvmppc_xive_init_module(void) { }
+static inline void kvmppc_xive_exit_module(void) { }
+
+static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
+static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
+static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc) { return -ENODEV; }
+static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc) { return -ENODEV; }
+static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
+static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
+
+static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+ int level, bool line_status) { return -ENODEV; }
+#endif /* CONFIG_KVM_XIVE */
+
/*
* Prototypes for functions called only from assembler code.
* Having prototypes reduces sparse errors.
@@ -562,6 +634,8 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
unsigned long slb_v, unsigned int status, bool data);
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
+unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
+unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
unsigned long mfrr);
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index 53885512b8d3..6c0132c7212f 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -14,6 +14,10 @@
#include <asm-generic/module.h>
+#ifdef CC_USING_MPROFILE_KERNEL
+#define MODULE_ARCH_VERMAGIC "mprofile-kernel"
+#endif
+
#ifndef __powerpc64__
/*
* Thanks to Paul M for explaining this.
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 2a32483c7b6c..8da5d4c1cab2 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -132,7 +132,19 @@ extern long long virt_phys_offset;
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * On hash the vmalloc and other regions alias to the kernel region when passed
+ * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
+ * return true for some vmalloc addresses, which is incorrect. So explicitly
+ * check that the address is in the kernel region.
+ */
+#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
+ pfn_valid(virt_to_pfn(kaddr)))
+#else
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
+#endif
/*
* On Book-E parts we need __va to parse the device tree and we can't
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index a4b1d8d6b793..1189d04f3bd1 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -110,13 +110,18 @@ void release_thread(struct task_struct *);
#define TASK_SIZE_128TB (0x0000800000000000UL)
#define TASK_SIZE_512TB (0x0002000000000000UL)
-#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * For now 512TB is only supported with book3s and 64K linux page size.
+ */
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
/*
* Max value currently used:
*/
-#define TASK_SIZE_USER64 TASK_SIZE_512TB
+#define TASK_SIZE_USER64 TASK_SIZE_512TB
+#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB
#else
-#define TASK_SIZE_USER64 TASK_SIZE_64TB
+#define TASK_SIZE_USER64 TASK_SIZE_64TB
+#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB
#endif
/*
@@ -132,7 +137,7 @@ void release_thread(struct task_struct *);
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
-#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
@@ -143,16 +148,15 @@ void release_thread(struct task_struct *);
* with 128TB and conditionally enable upto 512TB
*/
#ifdef CONFIG_PPC_BOOK3S_64
-#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
- TASK_SIZE_USER32 : TASK_SIZE_128TB)
+#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
+ TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
#else
#define DEFAULT_MAP_WINDOW TASK_SIZE
#endif
#ifdef __powerpc64__
-/* Limit stack to 128TB */
-#define STACK_TOP_USER64 TASK_SIZE_128TB
+#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
#define STACK_TOP_USER32 TASK_SIZE_USER32
#define STACK_TOP (is_32bit_task() ? \
@@ -374,12 +378,6 @@ struct thread_struct {
}
#endif
-/*
- * Return saved PC of a blocked thread. For now, this is the "user" PC
- */
-#define thread_saved_pc(tsk) \
- ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
-
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs)
unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index d4f653c9259a..7e50e47375d6 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1229,6 +1229,7 @@
#define PVR_POWER8E 0x004B
#define PVR_POWER8NVL 0x004C
#define PVR_POWER8 0x004D
+#define PVR_POWER9 0x004E
#define PVR_BE 0x0070
#define PVR_PA6T 0x0090
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8b3b46b7b0f2..329771559cbb 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
extern int sysfs_add_device_to_node(struct device *dev, int nid);
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
+static inline int early_cpu_to_node(int cpu)
+{
+ int nid;
+
+ nid = numa_cpu_lookup_table[cpu];
+
+ /*
+ * Fall back to node 0 if nid is unset (it should be, except bugs).
+ * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
+ */
+ return (nid < 0) ? 0 : nid;
+}
#else
+static inline int early_cpu_to_node(int cpu) { return 0; }
+
static inline void dump_numa_cpu_topology(void) {}
static inline int sysfs_add_device_to_node(struct device *dev, int nid)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 5c0d8a8cdae5..41e88d3ce36b 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -267,13 +267,7 @@ do { \
extern unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size);
-#ifndef __powerpc64__
-
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-
-#else /* __powerpc64__ */
-
+#ifdef __powerpc64__
static inline unsigned long
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index 3cdbeaeac397..c23ff4389ca2 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -94,12 +94,13 @@ struct xive_q {
* store at 0 and some ESBs support doing a trigger via a
* separate trigger page.
*/
-#define XIVE_ESB_GET 0x800
-#define XIVE_ESB_SET_PQ_00 0xc00
-#define XIVE_ESB_SET_PQ_01 0xd00
-#define XIVE_ESB_SET_PQ_10 0xe00
-#define XIVE_ESB_SET_PQ_11 0xf00
-#define XIVE_ESB_MASK XIVE_ESB_SET_PQ_01
+#define XIVE_ESB_STORE_EOI 0x400 /* Store */
+#define XIVE_ESB_LOAD_EOI 0x000 /* Load */
+#define XIVE_ESB_GET 0x800 /* Load */
+#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
+#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
+#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
+#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
#define XIVE_ESB_VAL_P 0x2
#define XIVE_ESB_VAL_Q 0x1
@@ -136,11 +137,11 @@ extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
__be32 *qpage, u32 order, bool can_escalate);
extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
-extern bool __xive_irq_trigger(struct xive_irq_data *xd);
-extern bool __xive_irq_retrigger(struct xive_irq_data *xd);
-extern void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd);
-
+extern void xive_native_sync_source(u32 hw_irq);
extern bool is_xive_irq(struct irq_chip *chip);
+extern int xive_native_enable_vp(u32 vp_id);
+extern int xive_native_disable_vp(u32 vp_id);
+extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
#else
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index dab3717e3ea0..b15bf6bc0e94 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -1,47 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += bootx.h
-header-y += byteorder.h
-header-y += cputable.h
-header-y += eeh.h
-header-y += elf.h
-header-y += epapr_hcalls.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += nvram.h
-header-y += opal-prd.h
-header-y += param.h
-header-y += perf_event.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ps3fb.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += spu_info.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += tm.h
-header-y += types.h
-header-y += ucontext.h
-header-y += unistd.h
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index f63c96cd3608..4d877144f377 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -46,5 +46,14 @@
#define PPC_FEATURE2_HTM_NOSC 0x01000000
#define PPC_FEATURE2_ARCH_3_00 0x00800000 /* ISA 3.00 */
#define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* VSX IEEE Binary Float 128-bit */
+#define PPC_FEATURE2_DARN 0x00200000 /* darn random number insn */
+#define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */
+
+/*
+ * IMPORTANT!
+ * All future PPC_FEATURE definitions should be allocated in cooperation with
+ * OPAL / skiboot firmware, in accordance with the ibm,powerpc-cpu-features
+ * device tree binding.
+ */
#endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index b9db46ae545b..e132902e1f14 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y)
obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
+obj-$(CONFIG_PPC_DT_CPU_FTRS) += dt_cpu_ftrs.o
obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
eeh_driver.o eeh_event.o eeh_sysfs.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 439c257dec4a..709e23425317 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -634,6 +634,8 @@ int main(void)
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
+ HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys);
+ HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt);
HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_PTID, ptid);
@@ -719,6 +721,14 @@ int main(void)
OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6);
#endif
+#ifdef CONFIG_KVM_XICS
+ DEFINE(VCPU_XIVE_SAVED_STATE, offsetof(struct kvm_vcpu,
+ arch.xive_saved_state));
+ DEFINE(VCPU_XIVE_CAM_WORD, offsetof(struct kvm_vcpu,
+ arch.xive_cam_word));
+ DEFINE(VCPU_XIVE_PUSHED, offsetof(struct kvm_vcpu, arch.xive_pushed));
+#endif
+
#ifdef CONFIG_KVM_EXIT_TIMING
OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu);
OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl);
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index e79b9daa873c..6f849832a669 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -23,7 +23,9 @@
#include <asm/mmu.h>
#include <asm/setup.h>
-struct cpu_spec* cur_cpu_spec = NULL;
+static struct cpu_spec the_cpu_spec __read_mostly;
+
+struct cpu_spec* cur_cpu_spec __read_mostly = NULL;
EXPORT_SYMBOL(cur_cpu_spec);
/* The platform string corresponding to the real PVR */
@@ -122,7 +124,8 @@ extern void __restore_cpu_e6500(void);
#define COMMON_USER_POWER9 COMMON_USER_POWER8
#define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \
PPC_FEATURE2_ARCH_3_00 | \
- PPC_FEATURE2_HAS_IEEE128)
+ PPC_FEATURE2_HAS_IEEE128 | \
+ PPC_FEATURE2_DARN )
#ifdef CONFIG_PPC_BOOK3E_64
#define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE)
@@ -2179,7 +2182,15 @@ static struct cpu_spec __initdata cpu_specs[] = {
#endif /* CONFIG_E500 */
};
-static struct cpu_spec the_cpu_spec;
+void __init set_cur_cpu_spec(struct cpu_spec *s)
+{
+ struct cpu_spec *t = &the_cpu_spec;
+
+ t = PTRRELOC(t);
+ *t = *s;
+
+ *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
+}
static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
struct cpu_spec *s)
@@ -2266,6 +2277,29 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
return NULL;
}
+/*
+ * Used by cpufeatures to get the name for CPUs with a PVR table.
+ * If they don't hae a PVR table, cpufeatures gets the name from
+ * cpu device-tree node.
+ */
+void __init identify_cpu_name(unsigned int pvr)
+{
+ struct cpu_spec *s = cpu_specs;
+ struct cpu_spec *t = &the_cpu_spec;
+ int i;
+
+ s = PTRRELOC(s);
+ t = PTRRELOC(t);
+
+ for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) {
+ if ((pvr & s->pvr_mask) == s->pvr_value) {
+ t->cpu_name = s->cpu_name;
+ return;
+ }
+ }
+}
+
+
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS] = {
[0 ... NUM_CPU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
new file mode 100644
index 000000000000..4c7656dc4e04
--- /dev/null
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -0,0 +1,1069 @@
+/*
+ * Copyright 2017, Nicholas Piggin, IBM Corporation
+ * Licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/jump_label.h>
+#include <linux/libfdt.h>
+#include <linux/memblock.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/threads.h>
+
+#include <asm/cputable.h>
+#include <asm/dt_cpu_ftrs.h>
+#include <asm/mmu.h>
+#include <asm/oprofile_impl.h>
+#include <asm/prom.h>
+#include <asm/setup.h>
+
+
+/* Device-tree visible constants follow */
+#define ISA_V2_07B 2070
+#define ISA_V3_0B 3000
+
+#define USABLE_PR (1U << 0)
+#define USABLE_OS (1U << 1)
+#define USABLE_HV (1U << 2)
+
+#define HV_SUPPORT_HFSCR (1U << 0)
+#define OS_SUPPORT_FSCR (1U << 0)
+
+/* For parsing, we define all bits set as "NONE" case */
+#define HV_SUPPORT_NONE 0xffffffffU
+#define OS_SUPPORT_NONE 0xffffffffU
+
+struct dt_cpu_feature {
+ const char *name;
+ uint32_t isa;
+ uint32_t usable_privilege;
+ uint32_t hv_support;
+ uint32_t os_support;
+ uint32_t hfscr_bit_nr;
+ uint32_t fscr_bit_nr;
+ uint32_t hwcap_bit_nr;
+ /* fdt parsing */
+ unsigned long node;
+ int enabled;
+ int disabled;
+};
+
+#define CPU_FTRS_BASE \
+ (CPU_FTR_USE_TB | \
+ CPU_FTR_LWSYNC | \
+ CPU_FTR_FPU_UNAVAILABLE |\
+ CPU_FTR_NODSISRALIGN |\
+ CPU_FTR_NOEXECUTE |\
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_STCX_CHECKS_ADDRESS |\
+ CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_DAWR | \
+ CPU_FTR_ARCH_206 |\
+ CPU_FTR_ARCH_207S)
+
+#define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
+
+#define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \
+ PPC_FEATURE_ARCH_2_06 |\
+ PPC_FEATURE_ICACHE_SNOOP)
+#define COMMON_USER2_BASE (PPC_FEATURE2_ARCH_2_07 | \
+ PPC_FEATURE2_ISEL)
+/*
+ * Set up the base CPU
+ */
+
+extern void __flush_tlb_power8(unsigned int action);
+extern void __flush_tlb_power9(unsigned int action);
+extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
+extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
+
+static int hv_mode;
+
+static struct {
+ u64 lpcr;
+ u64 hfscr;
+ u64 fscr;
+} system_registers;
+
+static void (*init_pmu_registers)(void);
+
+static void cpufeatures_flush_tlb(void)
+{
+ unsigned long rb;
+ unsigned int i, num_sets;
+
+ /*
+ * This is a temporary measure to keep equivalent TLB flush as the
+ * cputable based setup code.
+ */
+ switch (PVR_VER(mfspr(SPRN_PVR))) {
+ case PVR_POWER8:
+ case PVR_POWER8E:
+ case PVR_POWER8NVL:
+ num_sets = POWER8_TLB_SETS;
+ break;
+ case PVR_POWER9:
+ num_sets = POWER9_TLB_SETS_HASH;
+ break;
+ default:
+ num_sets = 1;
+ pr_err("unknown CPU version for boot TLB flush\n");
+ break;
+ }
+
+ asm volatile("ptesync" : : : "memory");
+ rb = TLBIEL_INVAL_SET;
+ for (i = 0; i < num_sets; i++) {
+ asm volatile("tlbiel %0" : : "r" (rb));
+ rb += 1 << TLBIEL_INVAL_SET_SHIFT;
+ }
+ asm volatile("ptesync" : : : "memory");
+}
+
+static void __restore_cpu_cpufeatures(void)
+{
+ /*
+ * LPCR is restored by the power on engine already. It can be changed
+ * after early init e.g., by radix enable, and we have no unified API
+ * for saving and restoring such SPRs.
+ *
+ * This ->restore hook should really be removed from idle and register
+ * restore moved directly into the idle restore code, because this code
+ * doesn't know how idle is implemented or what it needs restored here.
+ *
+ * The best we can do to accommodate secondary boot and idle restore
+ * for now is "or" LPCR with existing.
+ */
+
+ mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR));
+ if (hv_mode) {
+ mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_HFSCR, system_registers.hfscr);
+ }
+ mtspr(SPRN_FSCR, system_registers.fscr);
+
+ if (init_pmu_registers)
+ init_pmu_registers();
+
+ cpufeatures_flush_tlb();
+}
+
+static char dt_cpu_name[64];
+
+static struct cpu_spec __initdata base_cpu_spec = {
+ .cpu_name = NULL,
+ .cpu_features = CPU_FTRS_BASE,
+ .cpu_user_features = COMMON_USER_BASE,
+ .cpu_user_features2 = COMMON_USER2_BASE,
+ .mmu_features = 0,
+ .icache_bsize = 32, /* minimum block size, fixed by */
+ .dcache_bsize = 32, /* cache info init. */
+ .num_pmcs = 0,
+ .pmc_type = PPC_PMC_DEFAULT,
+ .oprofile_cpu_type = NULL,
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .cpu_setup = NULL,
+ .cpu_restore = __restore_cpu_cpufeatures,
+ .flush_tlb = NULL,
+ .machine_check_early = NULL,
+ .platform = NULL,
+};
+
+static void __init cpufeatures_setup_cpu(void)
+{
+ set_cur_cpu_spec(&base_cpu_spec);
+
+ cur_cpu_spec->pvr_mask = -1;
+ cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
+
+ /* Initialize the base environment -- clear FSCR/HFSCR. */
+ hv_mode = !!(mfmsr() & MSR_HV);
+ if (hv_mode) {
+ /* CPU_FTR_HVMODE is used early in PACA setup */
+ cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
+ mtspr(SPRN_HFSCR, 0);
+ }
+ mtspr(SPRN_FSCR, 0);
+
+ /*
+ * LPCR does not get cleared, to match behaviour with secondaries
+ * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
+ * could clear LPCR too.
+ */
+}
+
+static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
+{
+ if (f->hv_support == HV_SUPPORT_NONE) {
+ } else if (f->hv_support & HV_SUPPORT_HFSCR) {
+ u64 hfscr = mfspr(SPRN_HFSCR);
+ hfscr |= 1UL << f->hfscr_bit_nr;
+ mtspr(SPRN_HFSCR, hfscr);
+ } else {
+ /* Does not have a known recipe */
+ return 0;
+ }
+
+ if (f->os_support == OS_SUPPORT_NONE) {
+ } else if (f->os_support & OS_SUPPORT_FSCR) {
+ u64 fscr = mfspr(SPRN_FSCR);
+ fscr |= 1UL << f->fscr_bit_nr;
+ mtspr(SPRN_FSCR, fscr);
+ } else {
+ /* Does not have a known recipe */
+ return 0;
+ }
+
+ if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
+ uint32_t word = f->hwcap_bit_nr / 32;
+ uint32_t bit = f->hwcap_bit_nr % 32;
+
+ if (word == 0)
+ cur_cpu_spec->cpu_user_features |= 1U << bit;
+ else if (word == 1)
+ cur_cpu_spec->cpu_user_features2 |= 1U << bit;
+ else
+ pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
+ }
+
+ return 1;
+}
+
+static int __init feat_enable(struct dt_cpu_feature *f)
+{
+ if (f->hv_support != HV_SUPPORT_NONE) {
+ if (f->hfscr_bit_nr != -1) {
+ u64 hfscr = mfspr(SPRN_HFSCR);
+ hfscr |= 1UL << f->hfscr_bit_nr;
+ mtspr(SPRN_HFSCR, hfscr);
+ }
+ }
+
+ if (f->os_support != OS_SUPPORT_NONE) {
+ if (f->fscr_bit_nr != -1) {
+ u64 fscr = mfspr(SPRN_FSCR);
+ fscr |= 1UL << f->fscr_bit_nr;
+ mtspr(SPRN_FSCR, fscr);
+ }
+ }
+
+ if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
+ uint32_t word = f->hwcap_bit_nr / 32;
+ uint32_t bit = f->hwcap_bit_nr % 32;
+
+ if (word == 0)
+ cur_cpu_spec->cpu_user_features |= 1U << bit;
+ else if (word == 1)
+ cur_cpu_spec->cpu_user_features2 |= 1U << bit;
+ else
+ pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
+ }
+
+ return 1;
+}
+
+static int __init feat_disable(struct dt_cpu_feature *f)
+{
+ return 0;
+}
+
+static int __init feat_enable_hv(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ if (!hv_mode) {
+ pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
+ return 0;
+ }
+
+ mtspr(SPRN_LPID, 0);
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr &= ~LPCR_LPES0; /* HV external interrupts */
+ mtspr(SPRN_LPCR, lpcr);
+
+ cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
+
+ return 1;
+}
+
+static int __init feat_enable_le(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
+ return 1;
+}
+
+static int __init feat_enable_smt(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
+ return 1;
+}
+
+static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ /* Set PECE wakeup modes for ISA 207 */
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr |= LPCR_PECE0;
+ lpcr |= LPCR_PECE1;
+ lpcr |= LPCR_PECE2;
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static int __init feat_enable_align_dsisr(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->cpu_features &= ~CPU_FTR_NODSISRALIGN;
+
+ return 1;
+}
+
+static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ /* Set PECE wakeup modes for ISAv3.0B */
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr |= LPCR_PECE0;
+ lpcr |= LPCR_PECE1;
+ lpcr |= LPCR_PECE2;
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr &= ~LPCR_ISL;
+
+ /* VRMASD */
+ lpcr |= LPCR_VPM0;
+ lpcr &= ~LPCR_VPM1;
+ lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
+ mtspr(SPRN_LPCR, lpcr);
+
+ cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
+
+ return 1;
+}
+
+static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr &= ~LPCR_ISL;
+ mtspr(SPRN_LPCR, lpcr);
+
+ cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
+
+ return 1;
+}
+
+
+static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
+{
+#ifdef CONFIG_PPC_RADIX_MMU
+ cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
+ cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
+
+ return 1;
+#endif
+ return 0;
+}
+
+static int __init feat_enable_dscr(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ feat_enable(f);
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr &= ~LPCR_DPFD;
+ lpcr |= (4UL << LPCR_DPFD_SH);
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static void hfscr_pmu_enable(void)
+{
+ u64 hfscr = mfspr(SPRN_HFSCR);
+ hfscr |= PPC_BIT(60);
+ mtspr(SPRN_HFSCR, hfscr);
+}
+
+static void init_pmu_power8(void)
+{
+ if (hv_mode) {
+ mtspr(SPRN_MMCRC, 0);
+ mtspr(SPRN_MMCRH, 0);
+ }
+
+ mtspr(SPRN_MMCRA, 0);
+ mtspr(SPRN_MMCR0, 0);
+ mtspr(SPRN_MMCR1, 0);
+ mtspr(SPRN_MMCR2, 0);
+ mtspr(SPRN_MMCRS, 0);
+}
+
+static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->platform = "power8";
+ cur_cpu_spec->flush_tlb = __flush_tlb_power8;
+ cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
+
+ return 1;
+}
+
+static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
+{
+ hfscr_pmu_enable();
+
+ init_pmu_power8();
+ init_pmu_registers = init_pmu_power8;
+
+ cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
+ if (pvr_version_is(PVR_POWER8E))
+ cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
+
+ cur_cpu_spec->num_pmcs = 6;
+ cur_cpu_spec->pmc_type = PPC_PMC_IBM;
+ cur_cpu_spec->oprofile_cpu_type = "ppc64/power8";
+
+ return 1;
+}
+
+static void init_pmu_power9(void)
+{
+ if (hv_mode)
+ mtspr(SPRN_MMCRC, 0);
+
+ mtspr(SPRN_MMCRA, 0);
+ mtspr(SPRN_MMCR0, 0);
+ mtspr(SPRN_MMCR1, 0);
+ mtspr(SPRN_MMCR2, 0);
+}
+
+static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->platform = "power9";
+ cur_cpu_spec->flush_tlb = __flush_tlb_power9;
+ cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
+
+ return 1;
+}
+
+static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
+{
+ hfscr_pmu_enable();
+
+ init_pmu_power9();
+ init_pmu_registers = init_pmu_power9;
+
+ cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
+
+ cur_cpu_spec->num_pmcs = 6;
+ cur_cpu_spec->pmc_type = PPC_PMC_IBM;
+ cur_cpu_spec->oprofile_cpu_type = "ppc64/power9";
+
+ return 1;
+}
+
+static int __init feat_enable_tm(struct dt_cpu_feature *f)
+{
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ feat_enable(f);
+ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
+ return 1;
+#endif
+ return 0;
+}
+
+static int __init feat_enable_fp(struct dt_cpu_feature *f)
+{
+ feat_enable(f);
+ cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
+
+ return 1;
+}
+
+static int __init feat_enable_vector(struct dt_cpu_feature *f)
+{
+#ifdef CONFIG_ALTIVEC
+ feat_enable(f);
+ cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
+ cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
+
+ return 1;
+#endif
+ return 0;
+}
+
+static int __init feat_enable_vsx(struct dt_cpu_feature *f)
+{
+#ifdef CONFIG_VSX
+ feat_enable(f);
+ cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
+
+ return 1;
+#endif
+ return 0;
+}
+
+static int __init feat_enable_purr(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
+
+ return 1;
+}
+
+static int __init feat_enable_ebb(struct dt_cpu_feature *f)
+{
+ /*
+ * PPC_FEATURE2_EBB is enabled in PMU init code because it has
+ * historically been related to the PMU facility. This may have
+ * to be decoupled if EBB becomes more generic. For now, follow
+ * existing convention.
+ */
+ f->hwcap_bit_nr = -1;
+ feat_enable(f);
+
+ return 1;
+}
+
+static int __init feat_enable_dbell(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ /* P9 has an HFSCR for privileged state */
+ feat_enable(f);
+
+ cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr |= LPCR_PECEDH; /* hyp doorbell wakeup */
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static int __init feat_enable_hvi(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ /*
+ * POWER9 XIVE interrupts including in OPAL XICS compatibility
+ * are always delivered as hypervisor virtualization interrupts (HVI)
+ * rather than EE.
+ *
+ * However LPES0 is not set here, in the chance that an EE does get
+ * delivered to the host somehow, the EE handler would not expect it
+ * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
+ * happen if there is a bug in interrupt controller code, or IC is
+ * misconfigured in systemsim.
+ */
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr |= LPCR_HVICE; /* enable hvi interrupts */
+ lpcr |= LPCR_HEIC; /* disable ee interrupts when MSR_HV */
+ lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
+
+ return 1;
+}
+
+struct dt_cpu_feature_match {
+ const char *name;
+ int (*enable)(struct dt_cpu_feature *f);
+ u64 cpu_ftr_bit_mask;
+};
+
+static struct dt_cpu_feature_match __initdata
+ dt_cpu_feature_match_table[] = {
+ {"hypervisor", feat_enable_hv, 0},
+ {"big-endian", feat_enable, 0},
+ {"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
+ {"smt", feat_enable_smt, 0},
+ {"interrupt-facilities", feat_enable, 0},
+ {"timer-facilities", feat_enable, 0},
+ {"timer-facilities-v3", feat_enable, 0},
+ {"debug-facilities", feat_enable, 0},
+ {"come-from-address-register", feat_enable, CPU_FTR_CFAR},
+ {"branch-tracing", feat_enable, 0},
+ {"floating-point", feat_enable_fp, 0},
+ {"vector", feat_enable_vector, 0},
+ {"vector-scalar", feat_enable_vsx, 0},
+ {"vector-scalar-v3", feat_enable, 0},
+ {"decimal-floating-point", feat_enable, 0},
+ {"decimal-integer", feat_enable, 0},
+ {"quadword-load-store", feat_enable, 0},
+ {"vector-crypto", feat_enable, 0},
+ {"mmu-hash", feat_enable_mmu_hash, 0},
+ {"mmu-radix", feat_enable_mmu_radix, 0},
+ {"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
+ {"virtual-page-class-key-protection", feat_enable, 0},
+ {"transactional-memory", feat_enable_tm, CPU_FTR_TM},
+ {"transactional-memory-v3", feat_enable_tm, 0},
+ {"idle-nap", feat_enable_idle_nap, 0},
+ {"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0},
+ {"idle-stop", feat_enable_idle_stop, 0},
+ {"machine-check-power8", feat_enable_mce_power8, 0},
+ {"performance-monitor-power8", feat_enable_pmu_power8, 0},
+ {"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
+ {"event-based-branch", feat_enable_ebb, 0},
+ {"target-address-register", feat_enable, 0},
+ {"branch-history-rolling-buffer", feat_enable, 0},
+ {"control-register", feat_enable, CPU_FTR_CTRL},
+ {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
+ {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
+ {"processor-utilization-of-resources-register", feat_enable_purr, 0},
+ {"no-execute", feat_enable, 0},
+ {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
+ {"cache-inhibited-large-page", feat_enable_large_ci, 0},
+ {"coprocessor-icswx", feat_enable, CPU_FTR_ICSWX},
+ {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
+ {"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
+ {"wait", feat_enable, 0},
+ {"atomic-memory-operations", feat_enable, 0},
+ {"branch-v3", feat_enable, 0},
+ {"copy-paste", feat_enable, 0},
+ {"decimal-floating-point-v3", feat_enable, 0},
+ {"decimal-integer-v3", feat_enable, 0},
+ {"fixed-point-v3", feat_enable, 0},
+ {"floating-point-v3", feat_enable, 0},
+ {"group-start-register", feat_enable, 0},
+ {"pc-relative-addressing", feat_enable, 0},
+ {"machine-check-power9", feat_enable_mce_power9, 0},
+ {"performance-monitor-power9", feat_enable_pmu_power9, 0},
+ {"event-based-branch-v3", feat_enable, 0},
+ {"random-number-generator", feat_enable, 0},
+ {"system-call-vectored", feat_disable, 0},
+ {"trace-interrupt-v3", feat_enable, 0},
+ {"vector-v3", feat_enable, 0},
+ {"vector-binary128", feat_enable, 0},
+ {"vector-binary16", feat_enable, 0},
+ {"wait-v3", feat_enable, 0},
+};
+
+static bool __initdata using_dt_cpu_ftrs;
+static bool __initdata enable_unknown = true;
+
+static int __init dt_cpu_ftrs_parse(char *str)
+{
+ if (!str)
+ return 0;
+
+ if (!strcmp(str, "off"))
+ using_dt_cpu_ftrs = false;
+ else if (!strcmp(str, "known"))
+ enable_unknown = false;
+ else
+ return 1;
+
+ return 0;
+}
+early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
+
+static void __init cpufeatures_setup_start(u32 isa)
+{
+ pr_info("setup for ISA %d\n", isa);
+
+ if (isa >= 3000) {
+ cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
+ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
+ }
+}
+
+static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
+{
+ const struct dt_cpu_feature_match *m;
+ bool known = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
+ m = &dt_cpu_feature_match_table[i];
+ if (!strcmp(f->name, m->name)) {
+ known = true;
+ if (m->enable(f))
+ break;
+
+ pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
+ f->name);
+ return false;
+ }
+ }
+
+ if (!known && enable_unknown) {
+ if (!feat_try_enable_unknown(f)) {
+ pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
+ f->name);
+ return false;
+ }
+ }
+
+ if (m->cpu_ftr_bit_mask)
+ cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
+
+ if (known)
+ pr_debug("enabling: %s\n", f->name);
+ else
+ pr_debug("enabling: %s (unknown)\n", f->name);
+
+ return true;
+}
+
+static __init void cpufeatures_cpu_quirks(void)
+{
+ int version = mfspr(SPRN_PVR);
+
+ /*
+ * Not all quirks can be derived from the cpufeatures device tree.
+ */
+ if ((version & 0xffffff00) == 0x004e0100)
+ cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
+}
+
+static void __init cpufeatures_setup_finished(void)
+{
+ cpufeatures_cpu_quirks();
+
+ if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
+ pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
+ cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
+ }
+
+ system_registers.lpcr = mfspr(SPRN_LPCR);
+ system_registers.hfscr = mfspr(SPRN_HFSCR);
+ system_registers.fscr = mfspr(SPRN_FSCR);
+
+ cpufeatures_flush_tlb();
+
+ pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
+ cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
+}
+
+static int __init disabled_on_cmdline(void)
+{
+ unsigned long root, chosen;
+ const char *p;
+
+ root = of_get_flat_dt_root();
+ chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
+ if (chosen == -FDT_ERR_NOTFOUND)
+ return false;
+
+ p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
+ if (!p)
+ return false;
+
+ if (strstr(p, "dt_cpu_ftrs=off"))
+ return true;
+
+ return false;
+}
+
+static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
+ && of_get_flat_dt_prop(node, "isa", NULL))
+ return 1;
+
+ return 0;
+}
+
+bool __init dt_cpu_ftrs_in_use(void)
+{
+ return using_dt_cpu_ftrs;
+}
+
+bool __init dt_cpu_ftrs_init(void *fdt)
+{
+ using_dt_cpu_ftrs = false;
+
+ /* Setup and verify the FDT, if it fails we just bail */
+ if (!early_init_dt_verify(fdt))
+ return false;
+
+ if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
+ return false;
+
+ if (disabled_on_cmdline())
+ return false;
+
+ cpufeatures_setup_cpu();
+
+ using_dt_cpu_ftrs = true;
+ return true;
+}
+
+static int nr_dt_cpu_features;
+static struct dt_cpu_feature *dt_cpu_features;
+
+static int __init process_cpufeatures_node(unsigned long node,
+ const char *uname, int i)
+{
+ const __be32 *prop;
+ struct dt_cpu_feature *f;
+ int len;
+
+ f = &dt_cpu_features[i];
+ memset(f, 0, sizeof(struct dt_cpu_feature));
+
+ f->node = node;
+
+ f->name = uname;
+
+ prop = of_get_flat_dt_prop(node, "isa", &len);
+ if (!prop) {
+ pr_warn("%s: missing isa property\n", uname);
+ return 0;
+ }
+ f->isa = be32_to_cpup(prop);
+
+ prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
+ if (!prop) {
+ pr_warn("%s: missing usable-privilege property", uname);
+ return 0;
+ }
+ f->usable_privilege = be32_to_cpup(prop);
+
+ prop = of_get_flat_dt_prop(node, "hv-support", &len);
+ if (prop)
+ f->hv_support = be32_to_cpup(prop);
+ else
+ f->hv_support = HV_SUPPORT_NONE;
+
+ prop = of_get_flat_dt_prop(node, "os-support", &len);
+ if (prop)
+ f->os_support = be32_to_cpup(prop);
+ else
+ f->os_support = OS_SUPPORT_NONE;
+
+ prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
+ if (prop)
+ f->hfscr_bit_nr = be32_to_cpup(prop);
+ else
+ f->hfscr_bit_nr = -1;
+ prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
+ if (prop)
+ f->fscr_bit_nr = be32_to_cpup(prop);
+ else
+ f->fscr_bit_nr = -1;
+ prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
+ if (prop)
+ f->hwcap_bit_nr = be32_to_cpup(prop);
+ else
+ f->hwcap_bit_nr = -1;
+
+ if (f->usable_privilege & USABLE_HV) {
+ if (!(mfmsr() & MSR_HV)) {
+ pr_warn("%s: HV feature passed to guest\n", uname);
+ return 0;
+ }
+
+ if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
+ pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
+ return 0;
+ }
+
+ if (f->hv_support == HV_SUPPORT_HFSCR) {
+ if (f->hfscr_bit_nr == -1) {
+ pr_warn("%s: missing hfscr_bit_nr\n", uname);
+ return 0;
+ }
+ }
+ } else {
+ if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
+ pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
+ return 0;
+ }
+ }
+
+ if (f->usable_privilege & USABLE_OS) {
+ if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
+ pr_warn("%s: unwanted fscr_bit_nr\n", uname);
+ return 0;
+ }
+
+ if (f->os_support == OS_SUPPORT_FSCR) {
+ if (f->fscr_bit_nr == -1) {
+ pr_warn("%s: missing fscr_bit_nr\n", uname);
+ return 0;
+ }
+ }
+ } else {
+ if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
+ pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
+ return 0;
+ }
+ }
+
+ if (!(f->usable_privilege & USABLE_PR)) {
+ if (f->hwcap_bit_nr != -1) {
+ pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
+ return 0;
+ }
+ }
+
+ /* Do all the independent features in the first pass */
+ if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
+ if (cpufeatures_process_feature(f))
+ f->enabled = 1;
+ else
+ f->disabled = 1;
+ }
+
+ return 0;
+}
+
+static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
+{
+ const __be32 *prop;
+ int len;
+ int nr_deps;
+ int i;
+
+ if (f->enabled || f->disabled)
+ return;
+
+ prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
+ if (!prop) {
+ pr_warn("%s: missing dependencies property", f->name);
+ return;
+ }
+
+ nr_deps = len / sizeof(int);
+
+ for (i = 0; i < nr_deps; i++) {
+ unsigned long phandle = be32_to_cpu(prop[i]);
+ int j;
+
+ for (j = 0; j < nr_dt_cpu_features; j++) {
+ struct dt_cpu_feature *d = &dt_cpu_features[j];
+
+ if (of_get_flat_dt_phandle(d->node) == phandle) {
+ cpufeatures_deps_enable(d);
+ if (d->disabled) {
+ f->disabled = 1;
+ return;
+ }
+ }
+ }
+ }
+
+ if (cpufeatures_process_feature(f))
+ f->enabled = 1;
+ else
+ f->disabled = 1;
+}
+
+static int __init scan_cpufeatures_subnodes(unsigned long node,
+ const char *uname,
+ void *data)
+{
+ int *count = data;
+
+ process_cpufeatures_node(node, uname, *count);
+
+ (*count)++;
+
+ return 0;
+}
+
+static int __init count_cpufeatures_subnodes(unsigned long node,
+ const char *uname,
+ void *data)
+{
+ int *count = data;
+
+ (*count)++;
+
+ return 0;
+}
+
+static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
+ *uname, int depth, void *data)
+{
+ const __be32 *prop;
+ int count, i;
+ u32 isa;
+
+ /* We are scanning "ibm,powerpc-cpu-features" nodes only */
+ if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
+ return 0;
+
+ prop = of_get_flat_dt_prop(node, "isa", NULL);
+ if (!prop)
+ /* We checked before, "can't happen" */
+ return 0;
+
+ isa = be32_to_cpup(prop);
+
+ /* Count and allocate space for cpu features */
+ of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
+ &nr_dt_cpu_features);
+ dt_cpu_features = __va(
+ memblock_alloc(sizeof(struct dt_cpu_feature)*
+ nr_dt_cpu_features, PAGE_SIZE));
+
+ cpufeatures_setup_start(isa);
+
+ /* Scan nodes into dt_cpu_features and enable those without deps */
+ count = 0;
+ of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
+
+ /* Recursive enable remaining features with dependencies */
+ for (i = 0; i < nr_dt_cpu_features; i++) {
+ struct dt_cpu_feature *f = &dt_cpu_features[i];
+
+ cpufeatures_deps_enable(f);
+ }
+
+ prop = of_get_flat_dt_prop(node, "display-name", NULL);
+ if (prop && strlen((char *)prop) != 0) {
+ strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
+ cur_cpu_spec->cpu_name = dt_cpu_name;
+ }
+
+ cpufeatures_setup_finished();
+
+ memblock_free(__pa(dt_cpu_features),
+ sizeof(struct dt_cpu_feature)*nr_dt_cpu_features);
+
+ return 0;
+}
+
+void __init dt_cpu_ftrs_scan(void)
+{
+ if (!using_dt_cpu_ftrs)
+ return;
+
+ of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
+}
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 45b453e4d0c8..acd8ca76233e 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -735,8 +735,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
andis. r15,r14,(DBSR_IC|DBSR_BT)@h
beq+ 1f
+#ifdef CONFIG_RELOCATABLE
+ ld r15,PACATOC(r13)
+ ld r14,interrupt_base_book3e@got(r15)
+ ld r15,__end_interrupts@got(r15)
+#else
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
LOAD_REG_IMMEDIATE(r15,__end_interrupts)
+#endif
cmpld cr0,r10,r14
cmpld cr1,r10,r15
blt+ cr0,1f
@@ -799,8 +805,14 @@ kernel_dbg_exc:
andis. r15,r14,(DBSR_IC|DBSR_BT)@h
beq+ 1f
+#ifdef CONFIG_RELOCATABLE
+ ld r15,PACATOC(r13)
+ ld r14,interrupt_base_book3e@got(r15)
+ ld r15,__end_interrupts@got(r15)
+#else
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
LOAD_REG_IMMEDIATE(r15,__end_interrupts)
+#endif
cmpld cr0,r10,r14
cmpld cr1,r10,r15
blt+ cr0,1f
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a9312b52fe6f..b886795060fd 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -391,9 +391,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
*/
BEGIN_FTR_SECTION
rlwinm. r11,r12,47-31,30,31
- beq- 4f
- BRANCH_TO_COMMON(r10, machine_check_idle_common)
-4:
+ bne machine_check_idle_common
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif
@@ -1413,10 +1411,8 @@ USE_TEXT_SECTION()
.balign IFETCH_ALIGN_BYTES
do_hash_page:
#ifdef CONFIG_PPC_STD_MMU_64
- andis. r0,r4,0xa410 /* weird error? */
+ andis. r0,r4,0xa450 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */
- andis. r0,r4,DSISR_DABRMATCH@h
- bne- handle_dabr_fault
CURRENT_THREAD_INFO(r11, r1)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
@@ -1440,11 +1436,16 @@ do_hash_page:
/* Error */
blt- 13f
+
+ /* Reload DSISR into r4 for the DABR check below */
+ ld r4,_DSISR(r1)
#endif /* CONFIG_PPC_STD_MMU_64 */
/* Here we have a page fault that hash_page can't handle. */
handle_page_fault:
-11: ld r4,_DAR(r1)
+11: andis. r0,r4,DSISR_DABRMATCH@h
+ bne- handle_dabr_fault
+ ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_page_fault
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 07d4e0ad60db..4898d676dcae 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -416,7 +416,7 @@ power9_dd1_recover_paca:
* which needs to be restored from the stack.
*/
li r3, 1
- stb r0,PACA_NAPSTATELOST(r13)
+ stb r3,PACA_NAPSTATELOST(r13)
blr
/*
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 160ae0fa7d0d..01addfb0ed0a 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
+int is_current_kprobe_addr(unsigned long addr)
+{
+ struct kprobe *p = kprobe_running();
+ return (p && (unsigned long)p->addr == addr) ? 1 : 0;
+}
+
bool arch_within_kprobe_blacklist(unsigned long addr)
{
return (addr >= (unsigned long)__kprobes_text_start &&
@@ -305,16 +311,17 @@ int kprobe_handler(struct pt_regs *regs)
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
- prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
if (p->ainsn.boostable >= 0) {
ret = try_to_emulate(p, regs);
if (ret > 0) {
restore_previous_kprobe(kcb);
+ preempt_enable_no_resched();
return 1;
}
}
+ prepare_singlestep(p, regs);
return 1;
} else {
if (*addr != BREAKPOINT_INSTRUCTION) {
@@ -616,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
#endif
+ /*
+ * jprobes use jprobe_return() which skips the normal return
+ * path of the function, and this messes up the accounting of the
+ * function graph tracer.
+ *
+ * Pause function graph tracing while performing the jprobe function.
+ */
+ pause_graph_tracing();
+
return 1;
}
NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -641,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
* saved regs...
*/
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+ /* It's OK to start function graph tracing again */
+ unpause_graph_tracing();
preempt_enable_no_resched();
return 1;
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index d645da302bf2..2ad725ef4368 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -864,6 +864,25 @@ static void tm_reclaim_thread(struct thread_struct *thr,
if (!MSR_TM_SUSPENDED(mfmsr()))
return;
+ /*
+ * If we are in a transaction and FP is off then we can't have
+ * used FP inside that transaction. Hence the checkpointed
+ * state is the same as the live state. We need to copy the
+ * live state to the checkpointed state so that when the
+ * transaction is restored, the checkpointed state is correct
+ * and the aborted transaction sees the correct state. We use
+ * ckpt_regs.msr here as that's what tm_reclaim will use to
+ * determine if it's going to write the checkpointed state or
+ * not. So either this will write the checkpointed registers,
+ * or reclaim will. Similarly for VMX.
+ */
+ if ((thr->ckpt_regs.msr & MSR_FP) == 0)
+ memcpy(&thr->ckfp_state, &thr->fp_state,
+ sizeof(struct thread_fp_state));
+ if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
+ memcpy(&thr->ckvr_state, &thr->vr_state,
+ sizeof(struct thread_vr_state));
+
giveup_all(container_of(thr, struct task_struct, thread));
tm_reclaim(thr, thr->ckpt_regs.msr, cause);
@@ -1647,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
#endif
+ current->thread.load_fp = 0;
memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
current->thread.fp_save_area = NULL;
#ifdef CONFIG_ALTIVEC
@@ -1655,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.vr_save_area = NULL;
current->thread.vrsave = 0;
current->thread.used_vr = 0;
+ current->thread.load_vec = 0;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
memset(current->thread.evr, 0, sizeof(current->thread.evr));
@@ -1666,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.tm_tfhar = 0;
current->thread.tm_texasr = 0;
current->thread.tm_tfiar = 0;
+ current->thread.load_tm = 0;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
}
EXPORT_SYMBOL(start_thread);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index d2f0afeae5a0..f83056297441 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -57,6 +57,7 @@
#include <asm/fadump.h>
#include <asm/epapr_hcalls.h>
#include <asm/firmware.h>
+#include <asm/dt_cpu_ftrs.h>
#include <mm/mmu_decl.h>
@@ -160,7 +161,9 @@ static struct ibm_pa_feature {
{ .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL },
{ .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE },
{ .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE },
+#ifdef CONFIG_PPC_RADIX_MMU
{ .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX },
+#endif
{ .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN },
{ .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE,
.cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
@@ -375,23 +378,31 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
* A POWER6 partition in "POWER6 architected" mode
* uses the 0x0f000002 PVR value; in POWER5+ mode
* it uses 0x0f000001.
+ *
+ * If we're using device tree CPU feature discovery then we don't
+ * support the cpu-version property, and it's the responsibility of the
+ * firmware/hypervisor to provide the correct feature set for the
+ * architecture level via the ibm,powerpc-cpu-features binding.
*/
- prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
- if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000)
- identify_cpu(0, be32_to_cpup(prop));
+ if (!dt_cpu_ftrs_in_use()) {
+ prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
+ if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000)
+ identify_cpu(0, be32_to_cpup(prop));
- identical_pvr_fixup(node);
+ check_cpu_feature_properties(node);
+ check_cpu_pa_features(node);
+ }
- check_cpu_feature_properties(node);
- check_cpu_pa_features(node);
+ identical_pvr_fixup(node);
init_mmu_slb_size(node);
#ifdef CONFIG_PPC64
- if (nthreads > 1)
- cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
- else
+ if (nthreads == 1)
cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
+ else if (!dt_cpu_ftrs_in_use())
+ cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
#endif
+
return 0;
}
@@ -721,6 +732,8 @@ void __init early_init_devtree(void *params)
DBG("Scanning CPUs ...\n");
+ dt_cpu_ftrs_scan();
+
/* Retrieve CPU related informations from the flat tree
* (altivec support, boot CPU ID, ...)
*/
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 69e077180db6..857129acf960 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -261,7 +261,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "processor\t: %lu\n", cpu_id);
seq_printf(m, "cpu\t\t: ");
- if (cur_cpu_spec->pvr_mask)
+ if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name)
seq_printf(m, "%s", cur_cpu_spec->cpu_name);
else
seq_printf(m, "unknown (%08x)", pvr);
@@ -928,7 +928,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_PPC_MM_SLICES
#ifdef CONFIG_PPC64
- init_mm.context.addr_limit = TASK_SIZE_128TB;
+ init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
#else
#error "context.addr_limit not initialized."
#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 0d4dcaeaafcb..4640f6d64f8b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -49,6 +49,7 @@
#include <asm/paca.h>
#include <asm/time.h>
#include <asm/cputable.h>
+#include <asm/dt_cpu_ftrs.h>
#include <asm/sections.h>
#include <asm/btext.h>
#include <asm/nvram.h>
@@ -274,8 +275,10 @@ void __init early_setup(unsigned long dt_ptr)
/* -------- printk is _NOT_ safe to use here ! ------- */
- /* Identify CPU type */
- identify_cpu(0, mfspr(SPRN_PVR));
+ /* Try new device tree based feature discovery ... */
+ if (!dt_cpu_ftrs_init(__va(dt_ptr)))
+ /* Otherwise use the old style CPU table */
+ identify_cpu(0, mfspr(SPRN_PVR));
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
initialise_paca(&boot_paca, 0);
@@ -541,6 +544,9 @@ void __init initialize_cache_info(void)
dcache_bsize = ppc64_caches.l1d.block_size;
icache_bsize = ppc64_caches.l1i.block_size;
+ cur_cpu_spec->dcache_bsize = dcache_bsize;
+ cur_cpu_spec->icache_bsize = icache_bsize;
+
DBG(" <- initialize_cache_info()\n");
}
@@ -610,6 +616,24 @@ void __init exc_lvl_early_init(void)
#endif
/*
+ * Emergency stacks are used for a range of things, from asynchronous
+ * NMIs (system reset, machine check) to synchronous, process context.
+ * We set preempt_count to zero, even though that isn't necessarily correct. To
+ * get the right value we'd need to copy it from the previous thread_info, but
+ * doing that might fault causing more problems.
+ * TODO: what to do with accounting?
+ */
+static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
+{
+ ti->task = NULL;
+ ti->cpu = cpu;
+ ti->preempt_count = 0;
+ ti->local_flags = 0;
+ ti->flags = 0;
+ klp_init_thread_info(ti);
+}
+
+/*
* Stack space used when we detect a bad kernel stack pointer, and
* early in SMP boots before relocation is enabled. Exclusive emergency
* stack for machine checks.
@@ -627,24 +651,31 @@ void __init emergency_stack_init(void)
* Since we use these as temporary stacks during secondary CPU
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
+ *
+ * The IRQ stacks allocated elsewhere in this file are zeroed and
+ * initialized in kernel/irq.c. These are initialized here in order
+ * to have emergency stacks available as early as possible.
*/
limit = min(safe_stack_limit(), ppc64_rma_size);
for_each_possible_cpu(i) {
struct thread_info *ti;
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
- klp_init_thread_info(ti);
+ memset(ti, 0, THREAD_SIZE);
+ emerg_stack_init_thread_info(ti, i);
paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
#ifdef CONFIG_PPC_BOOK3S_64
/* emergency stack for NMI exception handling. */
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
- klp_init_thread_info(ti);
+ memset(ti, 0, THREAD_SIZE);
+ emerg_stack_init_thread_info(ti, i);
paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
/* emergency stack for machine check exception handling. */
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
- klp_init_thread_info(ti);
+ memset(ti, 0, THREAD_SIZE);
+ emerg_stack_init_thread_info(ti, i);
paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
#endif
}
@@ -655,7 +686,7 @@ void __init emergency_stack_init(void)
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{
- return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
+ return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
__pa(MAX_DMA_ADDRESS));
}
@@ -666,7 +697,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{
- if (cpu_to_node(from) == cpu_to_node(to))
+ if (early_cpu_to_node(from) == early_cpu_to_node(to))
return LOCAL_DISTANCE;
else
return REMOTE_DISTANCE;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index df2a41647d8e..1069f74fca47 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -97,7 +97,7 @@ int smp_generic_cpu_bootable(unsigned int nr)
/* Special case - we inhibit secondary thread startup
* during boot if the user requests it.
*/
- if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
+ if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
return 0;
if (smt_enabled_at_boot
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index 7c933a99f5d5..c98e90b4ea7b 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
stdu r1,-SWITCH_FRAME_SIZE(r1)
/* Save all gprs to pt_regs */
- SAVE_8GPRS(0,r1)
- SAVE_8GPRS(8,r1)
- SAVE_8GPRS(16,r1)
- SAVE_8GPRS(24,r1)
+ SAVE_GPR(0, r1)
+ SAVE_10GPRS(2, r1)
+ SAVE_10GPRS(12, r1)
+ SAVE_10GPRS(22, r1)
+
+ /* Save previous stack pointer (r1) */
+ addi r8, r1, SWITCH_FRAME_SIZE
+ std r8, GPR1(r1)
/* Load special regs for save below */
mfmsr r8
@@ -95,18 +99,44 @@ ftrace_call:
bl ftrace_stub
nop
- /* Load ctr with the possibly modified NIP */
- ld r3, _NIP(r1)
- mtctr r3
+ /* Load the possibly modified NIP */
+ ld r15, _NIP(r1)
+
#ifdef CONFIG_LIVEPATCH
- cmpd r14,r3 /* has NIP been altered? */
+ cmpd r14, r15 /* has NIP been altered? */
+#endif
+
+#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
+ /* NIP has not been altered, skip over further checks */
+ beq 1f
+
+ /* Check if there is an active kprobe on us */
+ subi r3, r14, 4
+ bl is_current_kprobe_addr
+ nop
+
+ /*
+ * If r3 == 1, then this is a kprobe/jprobe.
+ * else, this is livepatched function.
+ *
+ * The conditional branch for livepatch_handler below will use the
+ * result of this comparison. For kprobe/jprobe, we just need to branch to
+ * the new NIP, not call livepatch_handler. The branch below is bne, so we
+ * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
+ * CR0[EQ] = (r3 == 1).
+ */
+ cmpdi r3, 1
+1:
#endif
+ /* Load CTR with the possibly modified NIP */
+ mtctr r15
+
/* Restore gprs */
- REST_8GPRS(0,r1)
- REST_8GPRS(8,r1)
- REST_8GPRS(16,r1)
- REST_8GPRS(24,r1)
+ REST_GPR(0,r1)
+ REST_10GPRS(2,r1)
+ REST_10GPRS(12,r1)
+ REST_10GPRS(22,r1)
/* Restore possibly modified LR */
ld r0, _LINK(r1)
@@ -119,7 +149,10 @@ ftrace_call:
addi r1, r1, SWITCH_FRAME_SIZE
#ifdef CONFIG_LIVEPATCH
- /* Based on the cmpd above, if the NIP was altered handle livepatch */
+ /*
+ * Based on the cmpd or cmpdi above, if the NIP was altered and we're
+ * not on a kprobe/jprobe, then handle livepatch.
+ */
bne- livepatch_handler
#endif
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 65a471de96de..0c52cb5d43f5 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -67,7 +67,7 @@ config KVM_BOOK3S_64
select KVM_BOOK3S_64_HANDLER
select KVM
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
- select SPAPR_TCE_IOMMU if IOMMU_SUPPORT
+ select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV)
---help---
Support running unmodified book3s_64 and book3s_32 guest kernels
in virtual machines on book3s_64 host processors.
@@ -197,6 +197,11 @@ config KVM_XICS
Specification) interrupt controller architecture used on
IBM POWER (pSeries) servers.
+config KVM_XIVE
+ bool
+ default y
+ depends on KVM_XICS && PPC_XIVE_NATIVE && KVM_BOOK3S_HV_POSSIBLE
+
source drivers/vhost/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index b87ccde2137a..381a6ec0ff3b 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -46,7 +46,7 @@ kvm-e500mc-objs := \
e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
-kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \
+kvm-book3s_64-builtin-objs-$(CONFIG_SPAPR_TCE_IOMMU) := \
book3s_64_vio_hv.o
kvm-pr-y := \
@@ -74,7 +74,7 @@ kvm-hv-y += \
book3s_64_mmu_radix.o
kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
- book3s_hv_rm_xics.o
+ book3s_hv_rm_xics.o book3s_hv_rm_xive.o
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
@@ -89,10 +89,12 @@ endif
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
book3s_xics.o
+kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o
+kvm-book3s_64-objs-$(CONFIG_SPAPR_TCE_IOMMU) += book3s_64_vio.o
+
kvm-book3s_64-module-objs := \
$(common-objs-y) \
book3s.o \
- book3s_64_vio.o \
book3s_rtas.o \
$(kvm-book3s_64-objs-y)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 8c4d7e9d27d2..72d977e30952 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -35,6 +35,7 @@
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
+#include <asm/xive.h>
#include "book3s.h"
#include "trace.h"
@@ -596,11 +597,14 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
#ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE:
- if (!vcpu->arch.icp) {
+ if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
r = -ENXIO;
break;
}
- *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
+ if (xive_enabled())
+ *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
+ else
+ *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
break;
#endif /* CONFIG_KVM_XICS */
case KVM_REG_PPC_FSCR:
@@ -666,12 +670,14 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
#endif /* CONFIG_VSX */
#ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE:
- if (!vcpu->arch.icp) {
+ if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
r = -ENXIO;
break;
}
- r = kvmppc_xics_set_icp(vcpu,
- set_reg_val(id, *val));
+ if (xive_enabled())
+ r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
+ else
+ r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
break;
#endif /* CONFIG_KVM_XICS */
case KVM_REG_PPC_FSCR:
@@ -942,6 +948,50 @@ int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
return kvm->arch.kvm_ops->hcall_implemented(hcall);
}
+#ifdef CONFIG_KVM_XICS
+int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ bool line_status)
+{
+ if (xive_enabled())
+ return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
+ line_status);
+ else
+ return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
+ line_status);
+}
+
+int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
+ struct kvm *kvm, int irq_source_id,
+ int level, bool line_status)
+{
+ return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
+ level, line_status);
+}
+static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level,
+ bool line_status)
+{
+ return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
+}
+
+int kvm_irq_map_gsi(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *entries, int gsi)
+{
+ entries->gsi = gsi;
+ entries->type = KVM_IRQ_ROUTING_IRQCHIP;
+ entries->set = kvmppc_book3s_set_irq;
+ entries->irqchip.irqchip = 0;
+ entries->irqchip.pin = gsi;
+ return 1;
+}
+
+int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
+{
+ return pin;
+}
+
+#endif /* CONFIG_KVM_XICS */
+
static int kvmppc_book3s_init(void)
{
int r;
@@ -952,12 +1002,25 @@ static int kvmppc_book3s_init(void)
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
r = kvmppc_book3s_init_pr();
#endif
- return r;
+#ifdef CONFIG_KVM_XICS
+#ifdef CONFIG_KVM_XIVE
+ if (xive_enabled()) {
+ kvmppc_xive_init_module();
+ kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
+ } else
+#endif
+ kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
+#endif
+ return r;
}
static void kvmppc_book3s_exit(void)
{
+#ifdef CONFIG_KVM_XICS
+ if (xive_enabled())
+ kvmppc_xive_exit_module();
+#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kvmppc_book3s_exit_pr();
#endif
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index eda0a8f6fae8..3adfd2f5301c 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -301,6 +301,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
/* liobn, ioba, tce); */
+ /* For radix, we might be in virtual mode, so punt */
+ if (kvm_is_radix(vcpu->kvm))
+ return H_TOO_HARD;
+
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -381,6 +385,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
bool prereg = false;
struct kvmppc_spapr_tce_iommu_table *stit;
+ /* For radix, we might be in virtual mode, so punt */
+ if (kvm_is_radix(vcpu->kvm))
+ return H_TOO_HARD;
+
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -491,6 +499,10 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
long i, ret;
struct kvmppc_spapr_tce_iommu_table *stit;
+ /* For radix, we might be in virtual mode, so punt */
+ if (kvm_is_radix(vcpu->kvm))
+ return H_TOO_HARD;
+
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -527,6 +539,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
return H_SUCCESS;
}
+/* This can be called in either virtual mode or real mode */
long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba)
{
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 549dd6070dee..8d1a365b8edc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -67,6 +67,7 @@
#include <asm/mmu.h>
#include <asm/opal.h>
#include <asm/xics.h>
+#include <asm/xive.h>
#include "book3s.h"
@@ -837,6 +838,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_IPOLL:
case H_XIRR_X:
if (kvmppc_xics_enabled(vcpu)) {
+ if (xive_enabled()) {
+ ret = H_NOT_AVAILABLE;
+ return RESUME_GUEST;
+ }
ret = kvmppc_xics_hcall(vcpu, req);
break;
}
@@ -1481,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break;
case KVM_REG_PPC_TB_OFFSET:
+ /*
+ * POWER9 DD1 has an erratum where writing TBU40 causes
+ * the timebase to lose ticks. So we don't let the
+ * timebase offset be changed on P9 DD1. (It is
+ * initialized to zero.)
+ */
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ break;
/* round up to multiple of 2^24 */
vcpu->arch.vcore->tb_offset =
ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -2902,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int r;
int srcu_idx;
+ unsigned long ebb_regs[3] = {}; /* shut up GCC */
+ unsigned long user_tar = 0;
+ unsigned int user_vrsave;
if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
+ /*
+ * Don't allow entry with a suspended transaction, because
+ * the guest entry/exit code will lose it.
+ * If the guest has TM enabled, save away their TM-related SPRs
+ * (they will get restored by the TM unavailable interrupt).
+ */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
+ (current->thread.regs->msr & MSR_TM)) {
+ if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ run->fail_entry.hardware_entry_failure_reason = 0;
+ return -EINVAL;
+ }
+ current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+ current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+ current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+ current->thread.regs->msr &= ~MSR_TM;
+ }
+#endif
+
kvmppc_core_prepare_to_enter(vcpu);
/* No need to go into the guest when all we'll do is come back out */
@@ -2929,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
flush_all_to_thread(current);
+ /* Save userspace EBB and other register values */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ ebb_regs[0] = mfspr(SPRN_EBBHR);
+ ebb_regs[1] = mfspr(SPRN_EBBRR);
+ ebb_regs[2] = mfspr(SPRN_BESCR);
+ user_tar = mfspr(SPRN_TAR);
+ }
+ user_vrsave = mfspr(SPRN_VRSAVE);
+
vcpu->arch.wqp = &vcpu->arch.vcore->wq;
vcpu->arch.pgdir = current->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2947,10 +2993,24 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
r = kvmppc_book3s_hv_page_fault(run, vcpu,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
- } else if (r == RESUME_PASSTHROUGH)
- r = kvmppc_xics_rm_complete(vcpu, 0);
+ } else if (r == RESUME_PASSTHROUGH) {
+ if (WARN_ON(xive_enabled()))
+ r = H_SUCCESS;
+ else
+ r = kvmppc_xics_rm_complete(vcpu, 0);
+ }
} while (is_kvmppc_resume_guest(r));
+ /* Restore userspace EBB and other register values */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ mtspr(SPRN_EBBHR, ebb_regs[0]);
+ mtspr(SPRN_EBBRR, ebb_regs[1]);
+ mtspr(SPRN_BESCR, ebb_regs[2]);
+ mtspr(SPRN_TAR, user_tar);
+ mtspr(SPRN_FSCR, current->thread.fscr);
+ }
+ mtspr(SPRN_VRSAVE, user_vrsave);
+
out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running);
@@ -3400,10 +3460,20 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
/*
* On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed)
* Set HVICE bit to enable hypervisor virtualization interrupts.
+ * Set HEIC to prevent OS interrupts to go to hypervisor (should
+ * be unnecessary but better safe than sorry in case we re-enable
+ * EE in HV mode with this LPCR still set)
*/
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
lpcr &= ~LPCR_VPM0;
- lpcr |= LPCR_HVICE;
+ lpcr |= LPCR_HVICE | LPCR_HEIC;
+
+ /*
+ * If xive is enabled, we route 0x500 interrupts directly
+ * to the guest.
+ */
+ if (xive_enabled())
+ lpcr |= LPCR_LPES;
}
/*
@@ -3533,7 +3603,7 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
struct kvmppc_irq_map *irq_map;
struct kvmppc_passthru_irqmap *pimap;
struct irq_chip *chip;
- int i;
+ int i, rc = 0;
if (!kvm_irq_bypass)
return 1;
@@ -3558,10 +3628,10 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
/*
* For now, we only support interrupts for which the EOI operation
* is an OPAL call followed by a write to XIRR, since that's
- * what our real-mode EOI code does.
+ * what our real-mode EOI code does, or a XIVE interrupt
*/
chip = irq_data_get_irq_chip(&desc->irq_data);
- if (!chip || !is_pnv_opal_msi(chip)) {
+ if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) {
pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n",
host_irq, guest_gsi);
mutex_unlock(&kvm->lock);
@@ -3603,7 +3673,12 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
if (i == pimap->n_mapped)
pimap->n_mapped++;
- kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
+ if (xive_enabled())
+ rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc);
+ else
+ kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
+ if (rc)
+ irq_map->r_hwirq = 0;
mutex_unlock(&kvm->lock);
@@ -3614,7 +3689,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
{
struct irq_desc *desc;
struct kvmppc_passthru_irqmap *pimap;
- int i;
+ int i, rc = 0;
if (!kvm_irq_bypass)
return 0;
@@ -3639,9 +3714,12 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
return -ENODEV;
}
- kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
+ if (xive_enabled())
+ rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc);
+ else
+ kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
- /* invalidate the entry */
+ /* invalidate the entry (what do do on error from the above ?) */
pimap->mapped[i].r_hwirq = 0;
/*
@@ -3650,7 +3728,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
*/
unlock:
mutex_unlock(&kvm->lock);
- return 0;
+ return rc;
}
static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons,
@@ -3928,7 +4006,7 @@ static int kvmppc_book3s_init_hv(void)
* indirectly, via OPAL.
*/
#ifdef CONFIG_SMP
- if (!get_paca()->kvm_hstate.xics_phys) {
+ if (!xive_enabled() && !local_paca->kvm_hstate.xics_phys) {
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 9c71c72e65ce..ee4c2558c305 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -32,6 +32,24 @@
#define KVM_CMA_CHUNK_ORDER 18
+#include "book3s_xics.h"
+#include "book3s_xive.h"
+
+/*
+ * The XIVE module will populate these when it loads
+ */
+unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
+unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
+int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr);
+int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
+int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
+EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
+EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
+EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
+EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
+EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
+
/*
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
* should be power of 2.
@@ -189,7 +207,14 @@ EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
long kvmppc_h_random(struct kvm_vcpu *vcpu)
{
- if (powernv_get_random_real_mode(&vcpu->arch.gpr[4]))
+ int r;
+
+ /* Only need to do the expensive mfmsr() on radix */
+ if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
+ r = powernv_get_random_long(&vcpu->arch.gpr[4]);
+ else
+ r = powernv_get_random_real_mode(&vcpu->arch.gpr[4]);
+ if (r)
return H_SUCCESS;
return H_HARDWARE;
@@ -211,6 +236,7 @@ void kvmhv_rm_send_ipi(int cpu)
__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
return;
}
+
/* On POWER8 for IPIs to threads in the same core, use msgsnd. */
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
cpu_first_thread_sibling(cpu) ==
@@ -407,6 +433,9 @@ static long kvmppc_read_one_intr(bool *again)
u8 host_ipi;
int64_t rc;
+ if (xive_enabled())
+ return 1;
+
/* see if a host IPI is pending */
host_ipi = local_paca->kvm_hstate.host_ipi;
if (host_ipi)
@@ -491,3 +520,84 @@ static long kvmppc_read_one_intr(bool *again)
return kvmppc_check_passthru(xisr, xirr, again);
}
+
+#ifdef CONFIG_KVM_XICS
+static inline bool is_rm(void)
+{
+ return !(mfmsr() & MSR_DR);
+}
+
+unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_xirr(vcpu);
+ if (unlikely(!__xive_vm_h_xirr))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_xirr(vcpu);
+ } else
+ return xics_rm_h_xirr(vcpu);
+}
+
+unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.gpr[5] = get_tb();
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_xirr(vcpu);
+ if (unlikely(!__xive_vm_h_xirr))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_xirr(vcpu);
+ } else
+ return xics_rm_h_xirr(vcpu);
+}
+
+unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_ipoll(vcpu, server);
+ if (unlikely(!__xive_vm_h_ipoll))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_ipoll(vcpu, server);
+ } else
+ return H_TOO_HARD;
+}
+
+int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_ipi(vcpu, server, mfrr);
+ if (unlikely(!__xive_vm_h_ipi))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_ipi(vcpu, server, mfrr);
+ } else
+ return xics_rm_h_ipi(vcpu, server, mfrr);
+}
+
+int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_cppr(vcpu, cppr);
+ if (unlikely(!__xive_vm_h_cppr))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_cppr(vcpu, cppr);
+ } else
+ return xics_rm_h_cppr(vcpu, cppr);
+}
+
+int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_eoi(vcpu, xirr);
+ if (unlikely(!__xive_vm_h_eoi))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_eoi(vcpu, xirr);
+ } else
+ return xics_rm_h_eoi(vcpu, xirr);
+}
+#endif /* CONFIG_KVM_XICS */
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 0fdc4a28970b..404deb512844 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* Put whatever is in the decrementer into the
* hypervisor decrementer.
*/
+BEGIN_FTR_SECTION
+ ld r5, HSTATE_KVM_VCORE(r13)
+ ld r6, VCORE_KVM(r5)
+ ld r9, KVM_HOST_LPCR(r6)
+ andis. r9, r9, LPCR_LD@h
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mfspr r8,SPRN_DEC
mftb r7
- mtspr SPRN_HDEC,r8
+BEGIN_FTR_SECTION
+ /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
+ bne 32f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r8,r8
+32: mtspr SPRN_HDEC,r8
add r8,r8,r7
std r8,HSTATE_DECEXP(r13)
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index ffde4507ddfd..2a862618f072 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -484,7 +484,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
}
-unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
+unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
{
union kvmppc_icp_state old_state, new_state;
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
@@ -522,8 +522,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
return check_too_hard(xics, icp);
}
-int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr)
+int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
{
union kvmppc_icp_state old_state, new_state;
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
@@ -609,7 +609,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
return check_too_hard(xics, this_icp);
}
-int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
+int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
{
union kvmppc_icp_state old_state, new_state;
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
@@ -729,7 +729,7 @@ static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq)
return check_too_hard(xics, icp);
}
-int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
{
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
struct kvmppc_icp *icp = vcpu->arch.icp;
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xive.c b/arch/powerpc/kvm/book3s_hv_rm_xive.c
new file mode 100644
index 000000000000..abf5f01b6eb1
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_rm_xive.c
@@ -0,0 +1,47 @@
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
+#include <asm/hvcall.h>
+#include <asm/xics.h>
+#include <asm/debug.h>
+#include <asm/synch.h>
+#include <asm/cputhreads.h>
+#include <asm/pgtable.h>
+#include <asm/ppc-opcode.h>
+#include <asm/pnv-pci.h>
+#include <asm/opal.h>
+#include <asm/smp.h>
+#include <asm/asm-prototypes.h>
+#include <asm/xive.h>
+#include <asm/xive-regs.h>
+
+#include "book3s_xive.h"
+
+/* XXX */
+#include <asm/udbg.h>
+//#define DBG(fmt...) udbg_printf(fmt)
+#define DBG(fmt...) do { } while(0)
+
+static inline void __iomem *get_tima_phys(void)
+{
+ return local_paca->kvm_hstate.xive_tima_phys;
+}
+
+#undef XIVE_RUNTIME_CHECKS
+#define X_PFX xive_rm_
+#define X_STATIC
+#define X_STAT_PFX stat_rm_
+#define __x_tima get_tima_phys()
+#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page))
+#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page))
+#define __x_readb __raw_rm_readb
+#define __x_writeb __raw_rm_writeb
+#define __x_readw __raw_rm_readw
+#define __x_readq __raw_rm_readq
+#define __x_writeq __raw_rm_writeq
+
+#include "book3s_xive_template.c"
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 7c6477d1840a..4888dd494604 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -30,6 +30,13 @@
#include <asm/book3s/64/mmu-hash.h>
#include <asm/tm.h>
#include <asm/opal.h>
+#include <asm/xive-regs.h>
+
+/* Sign-extend HDEC if not on POWER9 */
+#define EXTEND_HDEC(reg) \
+BEGIN_FTR_SECTION; \
+ extsw reg, reg; \
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
@@ -37,6 +44,17 @@
#define NAPPING_CEDE 1
#define NAPPING_NOVCPU 2
+/* Stack frame offsets for kvmppc_hv_entry */
+#define SFS 144
+#define STACK_SLOT_TRAP (SFS-4)
+#define STACK_SLOT_TID (SFS-16)
+#define STACK_SLOT_PSSCR (SFS-24)
+#define STACK_SLOT_PID (SFS-32)
+#define STACK_SLOT_IAMR (SFS-40)
+#define STACK_SLOT_CIABR (SFS-48)
+#define STACK_SLOT_DAWR (SFS-56)
+#define STACK_SLOT_DAWRX (SFS-64)
+
/*
* Call kvmppc_hv_entry in real mode.
* Must be called with interrupts hard-disabled.
@@ -213,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
+ /* HDEC may be larger than DEC for arch >= v3.00, but since the */
+ /* HDEC value came from DEC in the first place, it will fit */
mfspr r3, SPRN_HDEC
mtspr SPRN_DEC, r3
/*
@@ -294,8 +314,9 @@ kvm_novcpu_wakeup:
/* See if our timeslice has expired (HDEC is negative) */
mfspr r0, SPRN_HDEC
+ EXTEND_HDEC(r0)
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
- cmpwi r0, 0
+ cmpdi r0, 0
blt kvm_novcpu_exit
/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
@@ -318,10 +339,10 @@ kvm_novcpu_exit:
bl kvmhv_accumulate_time
#endif
13: mr r3, r12
- stw r12, 112-4(r1)
+ stw r12, STACK_SLOT_TRAP(r1)
bl kvmhv_commence_exit
nop
- lwz r12, 112-4(r1)
+ lwz r12, STACK_SLOT_TRAP(r1)
b kvmhv_switch_to_host
/*
@@ -389,8 +410,8 @@ kvm_secondary_got_guest:
lbz r4, HSTATE_PTID(r13)
cmpwi r4, 0
bne 63f
- lis r6, 0x7fff
- ori r6, r6, 0xffff
+ LOAD_REG_ADDR(r6, decrementer_max)
+ ld r6, 0(r6)
mtspr SPRN_HDEC, r6
/* and set per-LPAR registers, if doing dynamic micro-threading */
ld r6, HSTATE_SPLIT_MODE(r13)
@@ -544,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* *
*****************************************************************************/
-/* Stack frame offsets */
-#define STACK_SLOT_TID (112-16)
-#define STACK_SLOT_PSSCR (112-24)
-#define STACK_SLOT_PID (112-32)
-
.global kvmppc_hv_entry
kvmppc_hv_entry:
@@ -564,7 +580,7 @@ kvmppc_hv_entry:
*/
mflr r0
std r0, PPC_LR_STKOFF(r1)
- stdu r1, -112(r1)
+ stdu r1, -SFS(r1)
/* Save R1 in the PACA */
std r1, HSTATE_HOST_R1(r13)
@@ -748,10 +764,20 @@ BEGIN_FTR_SECTION
mfspr r5, SPRN_TIDR
mfspr r6, SPRN_PSSCR
mfspr r7, SPRN_PID
+ mfspr r8, SPRN_IAMR
std r5, STACK_SLOT_TID(r1)
std r6, STACK_SLOT_PSSCR(r1)
std r7, STACK_SLOT_PID(r1)
+ std r8, STACK_SLOT_IAMR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+BEGIN_FTR_SECTION
+ mfspr r5, SPRN_CIABR
+ mfspr r6, SPRN_DAWR
+ mfspr r7, SPRN_DAWRX
+ std r5, STACK_SLOT_CIABR(r1)
+ std r6, STACK_SLOT_DAWR(r1)
+ std r7, STACK_SLOT_DAWRX(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION
/* Set partition DABR */
@@ -967,9 +993,27 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
/* Check if HDEC expires soon */
mfspr r3, SPRN_HDEC
- cmpwi r3, 512 /* 1 microsecond */
+ EXTEND_HDEC(r3)
+ cmpdi r3, 512 /* 1 microsecond */
blt hdec_soon
+#ifdef CONFIG_KVM_XICS
+ /* We are entering the guest on that thread, push VCPU to XIVE */
+ ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
+ cmpldi cr0, r10, r0
+ beq no_xive
+ ld r11, VCPU_XIVE_SAVED_STATE(r4)
+ li r9, TM_QW1_OS
+ stdcix r11,r9,r10
+ eieio
+ lwz r11, VCPU_XIVE_CAM_WORD(r4)
+ li r9, TM_QW1_OS + TM_WORD2
+ stwcix r11,r9,r10
+ li r9, 1
+ stw r9, VCPU_XIVE_PUSHED(r4)
+no_xive:
+#endif /* CONFIG_KVM_XICS */
+
deliver_guest_interrupt:
ld r6, VCPU_CTR(r4)
ld r7, VCPU_XER(r4)
@@ -1307,6 +1351,42 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
blt deliver_guest_interrupt
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
+#ifdef CONFIG_KVM_XICS
+ /* We are exiting, pull the VP from the XIVE */
+ lwz r0, VCPU_XIVE_PUSHED(r9)
+ cmpwi cr0, r0, 0
+ beq 1f
+ li r7, TM_SPC_PULL_OS_CTX
+ li r6, TM_QW1_OS
+ mfmsr r0
+ andi. r0, r0, MSR_IR /* in real mode? */
+ beq 2f
+ ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
+ cmpldi cr0, r10, 0
+ beq 1f
+ /* First load to pull the context, we ignore the value */
+ lwzx r11, r7, r10
+ eieio
+ /* Second load to recover the context state (Words 0 and 1) */
+ ldx r11, r6, r10
+ b 3f
+2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
+ cmpldi cr0, r10, 0
+ beq 1f
+ /* First load to pull the context, we ignore the value */
+ lwzcix r11, r7, r10
+ eieio
+ /* Second load to recover the context state (Words 0 and 1) */
+ ldcix r11, r6, r10
+3: std r11, VCPU_XIVE_SAVED_STATE(r9)
+ /* Fixup some of the state for the next load */
+ li r10, 0
+ li r0, 0xff
+ stw r10, VCPU_XIVE_PUSHED(r9)
+ stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
+ stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
+1:
+#endif /* CONFIG_KVM_XICS */
/* Save more register state */
mfdar r6
mfdsisr r7
@@ -1451,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
* set by the guest could disrupt the host.
*/
li r0, 0
- mtspr SPRN_IAMR, r0
- mtspr SPRN_CIABR, r0
- mtspr SPRN_DAWRX, r0
+ mtspr SPRN_PSPB, r0
mtspr SPRN_WORT, r0
BEGIN_FTR_SECTION
+ mtspr SPRN_IAMR, r0
mtspr SPRN_TCSCR, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1
@@ -1471,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
std r6,VCPU_UAMOR(r9)
li r6,0
mtspr SPRN_AMR,r6
+ mtspr SPRN_UAMOR, r6
/* Switch DSCR back to host value */
mfspr r8, SPRN_DSCR
@@ -1616,12 +1696,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* Restore host values of some registers */
BEGIN_FTR_SECTION
+ ld r5, STACK_SLOT_CIABR(r1)
+ ld r6, STACK_SLOT_DAWR(r1)
+ ld r7, STACK_SLOT_DAWRX(r1)
+ mtspr SPRN_CIABR, r5
+ mtspr SPRN_DAWR, r6
+ mtspr SPRN_DAWRX, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+BEGIN_FTR_SECTION
ld r5, STACK_SLOT_TID(r1)
ld r6, STACK_SLOT_PSSCR(r1)
ld r7, STACK_SLOT_PID(r1)
+ ld r8, STACK_SLOT_IAMR(r1)
mtspr SPRN_TIDR, r5
mtspr SPRN_PSSCR, r6
mtspr SPRN_PID, r7
+ mtspr SPRN_IAMR, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT
@@ -1765,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
- ld r0, 112+PPC_LR_STKOFF(r1)
- addi r1, r1, 112
+ ld r0, SFS+PPC_LR_STKOFF(r1)
+ addi r1, r1, SFS
mtlr r0
blr
@@ -2011,7 +2101,7 @@ hcall_real_table:
.long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
- .long 0 /* 0x70 - H_IPOLL */
+ .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
#else
.long 0 /* 0x64 - H_EOI */
@@ -2181,7 +2271,11 @@ hcall_real_table:
.long 0 /* 0x2f0 */
.long 0 /* 0x2f4 */
.long 0 /* 0x2f8 */
- .long 0 /* 0x2fc */
+#ifdef CONFIG_KVM_XICS
+ .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
+#else
+ .long 0 /* 0x2fc - H_XIRR_X*/
+#endif
.long DOTSYM(kvmppc_h_random) - hcall_real_table
.globl hcall_real_table_end
hcall_real_table_end:
@@ -2308,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
mfspr r3, SPRN_DEC
mfspr r4, SPRN_HDEC
mftb r5
- cmpw r3, r4
+ extsw r3, r3
+ EXTEND_HDEC(r4)
+ cmpd r3, r4
ble 67f
mtspr SPRN_DEC, r4
67:
/* save expiry time of guest decrementer */
- extsw r3, r3
add r3, r3, r5
ld r4, HSTATE_KVM_VCPU(r13)
ld r5, HSTATE_KVM_VCORE(r13)
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index bcbeeb62dd13..8a4205fa774f 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -50,7 +50,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
pteg_addr = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
+ ret = H_FUNCTION;
+ if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)))
+ goto done;
hpte = pteg;
ret = H_PTEG_FULL;
@@ -71,7 +73,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
pteg_addr += i * HPTE_SIZE;
- copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE))
+ goto done;
kvmppc_set_gpr(vcpu, 4, pte_index | i);
ret = H_SUCCESS;
@@ -93,7 +97,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
+ goto done;
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@@ -103,7 +109,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
goto done;
- copy_to_user((void __user *)pteg, &v, sizeof(v));
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg, &v, sizeof(v)))
+ goto done;
rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
@@ -171,7 +179,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
}
pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) {
+ ret = H_FUNCTION;
+ break;
+ }
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@@ -184,7 +195,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
tsh |= H_BULK_REMOVE_NOT_FOUND;
} else {
/* Splat the pteg in (userland) hpt */
- copy_to_user((void __user *)pteg, &v, sizeof(v));
+ if (copy_to_user((void __user *)pteg, &v, sizeof(v))) {
+ ret = H_FUNCTION;
+ break;
+ }
rb = compute_tlbie_rb(pte[0], pte[1],
tsh & H_BULK_REMOVE_PTEX);
@@ -211,7 +225,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
+ goto done;
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@@ -234,7 +250,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
pte[0] = (__force u64)cpu_to_be64(pte[0]);
pte[1] = (__force u64)cpu_to_be64(pte[1]);
- copy_to_user((void __user *)pteg, pte, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg, pte, sizeof(pte)))
+ goto done;
ret = H_SUCCESS;
done:
@@ -244,36 +262,37 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
-static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
+static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu)
{
- unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
- unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
- unsigned long tce = kvmppc_get_gpr(vcpu, 6);
long rc;
- rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
+ rc = kvmppc_h_logical_ci_load(vcpu);
if (rc == H_TOO_HARD)
return EMULATE_FAIL;
kvmppc_set_gpr(vcpu, 3, rc);
return EMULATE_DONE;
}
-static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu)
+static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
{
long rc;
- rc = kvmppc_h_logical_ci_load(vcpu);
+ rc = kvmppc_h_logical_ci_store(vcpu);
if (rc == H_TOO_HARD)
return EMULATE_FAIL;
kvmppc_set_gpr(vcpu, 3, rc);
return EMULATE_DONE;
}
-static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
{
+ unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
+ unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
+ unsigned long tce = kvmppc_get_gpr(vcpu, 6);
long rc;
- rc = kvmppc_h_logical_ci_store(vcpu);
+ rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
if (rc == H_TOO_HARD)
return EMULATE_FAIL;
kvmppc_set_gpr(vcpu, 3, rc);
@@ -311,6 +330,23 @@ static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+#else /* CONFIG_SPAPR_TCE_IOMMU */
+static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
+{
+ return EMULATE_FAIL;
+}
+
+static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
+{
+ return EMULATE_FAIL;
+}
+
+static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
+{
+ return EMULATE_FAIL;
+}
+#endif /* CONFIG_SPAPR_TCE_IOMMU */
+
static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{
long rc = kvmppc_xics_hcall(vcpu, cmd);
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index 20528701835b..2d3b2b1cc272 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -16,6 +16,7 @@
#include <asm/kvm_ppc.h>
#include <asm/hvcall.h>
#include <asm/rtas.h>
+#include <asm/xive.h>
#ifdef CONFIG_KVM_XICS
static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -32,7 +33,10 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
server = be32_to_cpu(args->args[1]);
priority = be32_to_cpu(args->args[2]);
- rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
+ if (xive_enabled())
+ rc = kvmppc_xive_set_xive(vcpu->kvm, irq, server, priority);
+ else
+ rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
if (rc)
rc = -3;
out:
@@ -52,7 +56,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
irq = be32_to_cpu(args->args[0]);
server = priority = 0;
- rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
+ if (xive_enabled())
+ rc = kvmppc_xive_get_xive(vcpu->kvm, irq, &server, &priority);
+ else
+ rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
if (rc) {
rc = -3;
goto out;
@@ -76,7 +83,10 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
irq = be32_to_cpu(args->args[0]);
- rc = kvmppc_xics_int_off(vcpu->kvm, irq);
+ if (xive_enabled())
+ rc = kvmppc_xive_int_off(vcpu->kvm, irq);
+ else
+ rc = kvmppc_xics_int_off(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
@@ -95,7 +105,10 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
irq = be32_to_cpu(args->args[0]);
- rc = kvmppc_xics_int_on(vcpu->kvm, irq);
+ if (xive_enabled())
+ rc = kvmppc_xive_int_on(vcpu->kvm, irq);
+ else
+ rc = kvmppc_xics_int_on(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 459b72cb617a..d329b2add7e2 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -1306,8 +1306,8 @@ static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
return 0;
}
-int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
- bool line_status)
+int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ bool line_status)
{
struct kvmppc_xics *xics = kvm->arch.xics;
@@ -1316,14 +1316,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
return ics_deliver_irq(xics, irq, level);
}
-int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
- struct kvm *kvm, int irq_source_id,
- int level, bool line_status)
-{
- return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
- level, line_status);
-}
-
static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
struct kvmppc_xics *xics = dev->private;
@@ -1457,29 +1449,6 @@ void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
}
-static int xics_set_irq(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level,
- bool line_status)
-{
- return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
-}
-
-int kvm_irq_map_gsi(struct kvm *kvm,
- struct kvm_kernel_irq_routing_entry *entries, int gsi)
-{
- entries->gsi = gsi;
- entries->type = KVM_IRQ_ROUTING_IRQCHIP;
- entries->set = xics_set_irq;
- entries->irqchip.irqchip = 0;
- entries->irqchip.pin = gsi;
- return 1;
-}
-
-int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
-{
- return pin;
-}
-
void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
unsigned long host_irq)
{
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index ec5474cf70c6..453c9e518c19 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -10,6 +10,7 @@
#ifndef _KVM_PPC_BOOK3S_XICS_H
#define _KVM_PPC_BOOK3S_XICS_H
+#ifdef CONFIG_KVM_XICS
/*
* We use a two-level tree to store interrupt source information.
* There are up to 1024 ICS nodes, each of which can represent
@@ -144,5 +145,11 @@ static inline struct kvmppc_ics *kvmppc_xics_find_ics(struct kvmppc_xics *xics,
return ics;
}
+extern unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu);
+extern int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr);
+extern int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
+extern int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
+#endif /* CONFIG_KVM_XICS */
#endif /* _KVM_PPC_BOOK3S_XICS_H */
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
new file mode 100644
index 000000000000..ffe1da95033a
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -0,0 +1,1894 @@
+/*
+ * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "xive-kvm: " fmt
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <asm/uaccess.h>
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
+#include <asm/hvcall.h>
+#include <asm/xics.h>
+#include <asm/xive.h>
+#include <asm/xive-regs.h>
+#include <asm/debug.h>
+#include <asm/debugfs.h>
+#include <asm/time.h>
+#include <asm/opal.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "book3s_xive.h"
+
+
+/*
+ * Virtual mode variants of the hcalls for use on radix/radix
+ * with AIL. They require the VCPU's VP to be "pushed"
+ *
+ * We still instanciate them here because we use some of the
+ * generated utility functions as well in this file.
+ */
+#define XIVE_RUNTIME_CHECKS
+#define X_PFX xive_vm_
+#define X_STATIC static
+#define X_STAT_PFX stat_vm_
+#define __x_tima xive_tima
+#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
+#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
+#define __x_readb __raw_readb
+#define __x_writeb __raw_writeb
+#define __x_readw __raw_readw
+#define __x_readq __raw_readq
+#define __x_writeq __raw_writeq
+
+#include "book3s_xive_template.c"
+
+/*
+ * We leave a gap of a couple of interrupts in the queue to
+ * account for the IPI and additional safety guard.
+ */
+#define XIVE_Q_GAP 2
+
+/*
+ * This is a simple trigger for a generic XIVE IRQ. This must
+ * only be called for interrupts that support a trigger page
+ */
+static bool xive_irq_trigger(struct xive_irq_data *xd)
+{
+ /* This should be only for MSIs */
+ if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
+ return false;
+
+ /* Those interrupts should always have a trigger page */
+ if (WARN_ON(!xd->trig_mmio))
+ return false;
+
+ out_be64(xd->trig_mmio, 0);
+
+ return true;
+}
+
+static irqreturn_t xive_esc_irq(int irq, void *data)
+{
+ struct kvm_vcpu *vcpu = data;
+
+ /* We use the existing H_PROD mechanism to wake up the target */
+ vcpu->arch.prodded = 1;
+ smp_mb();
+ if (vcpu->arch.ceded)
+ kvmppc_fast_vcpu_kick(vcpu);
+
+ return IRQ_HANDLED;
+}
+
+static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct xive_q *q = &xc->queues[prio];
+ char *name = NULL;
+ int rc;
+
+ /* Already there ? */
+ if (xc->esc_virq[prio])
+ return 0;
+
+ /* Hook up the escalation interrupt */
+ xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
+ if (!xc->esc_virq[prio]) {
+ pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
+ prio, xc->server_num);
+ return -EIO;
+ }
+
+ /*
+ * Future improvement: start with them disabled
+ * and handle DD2 and later scheme of merged escalation
+ * interrupts
+ */
+ name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
+ vcpu->kvm->arch.lpid, xc->server_num, prio);
+ if (!name) {
+ pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
+ prio, xc->server_num);
+ rc = -ENOMEM;
+ goto error;
+ }
+ rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
+ IRQF_NO_THREAD, name, vcpu);
+ if (rc) {
+ pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
+ prio, xc->server_num);
+ goto error;
+ }
+ xc->esc_virq_names[prio] = name;
+ return 0;
+error:
+ irq_dispose_mapping(xc->esc_virq[prio]);
+ xc->esc_virq[prio] = 0;
+ kfree(name);
+ return rc;
+}
+
+static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvmppc_xive *xive = xc->xive;
+ struct xive_q *q = &xc->queues[prio];
+ void *qpage;
+ int rc;
+
+ if (WARN_ON(q->qpage))
+ return 0;
+
+ /* Allocate the queue and retrieve infos on current node for now */
+ qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
+ if (!qpage) {
+ pr_err("Failed to allocate queue %d for VCPU %d\n",
+ prio, xc->server_num);
+ return -ENOMEM;;
+ }
+ memset(qpage, 0, 1 << xive->q_order);
+
+ /*
+ * Reconfigure the queue. This will set q->qpage only once the
+ * queue is fully configured. This is a requirement for prio 0
+ * as we will stop doing EOIs for every IPI as soon as we observe
+ * qpage being non-NULL, and instead will only EOI when we receive
+ * corresponding queue 0 entries
+ */
+ rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
+ xive->q_order, true);
+ if (rc)
+ pr_err("Failed to configure queue %d for VCPU %d\n",
+ prio, xc->server_num);
+ return rc;
+}
+
+/* Called with kvm_lock held */
+static int xive_check_provisioning(struct kvm *kvm, u8 prio)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvm_vcpu *vcpu;
+ int i, rc;
+
+ lockdep_assert_held(&kvm->lock);
+
+ /* Already provisioned ? */
+ if (xive->qmap & (1 << prio))
+ return 0;
+
+ pr_devel("Provisioning prio... %d\n", prio);
+
+ /* Provision each VCPU and enable escalations */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!vcpu->arch.xive_vcpu)
+ continue;
+ rc = xive_provision_queue(vcpu, prio);
+ if (rc == 0)
+ xive_attach_escalation(vcpu, prio);
+ if (rc)
+ return rc;
+ }
+
+ /* Order previous stores and mark it as provisioned */
+ mb();
+ xive->qmap |= (1 << prio);
+ return 0;
+}
+
+static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvmppc_xive_vcpu *xc;
+ struct xive_q *q;
+
+ /* Locate target server */
+ vcpu = kvmppc_xive_find_server(kvm, server);
+ if (!vcpu) {
+ pr_warn("%s: Can't find server %d\n", __func__, server);
+ return;
+ }
+ xc = vcpu->arch.xive_vcpu;
+ if (WARN_ON(!xc))
+ return;
+
+ q = &xc->queues[prio];
+ atomic_inc(&q->pending_count);
+}
+
+static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct xive_q *q;
+ u32 max;
+
+ if (WARN_ON(!xc))
+ return -ENXIO;
+ if (!xc->valid)
+ return -ENXIO;
+
+ q = &xc->queues[prio];
+ if (WARN_ON(!q->qpage))
+ return -ENXIO;
+
+ /* Calculate max number of interrupts in that queue. */
+ max = (q->msk + 1) - XIVE_Q_GAP;
+ return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
+}
+
+static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
+{
+ struct kvm_vcpu *vcpu;
+ int i, rc;
+
+ /* Locate target server */
+ vcpu = kvmppc_xive_find_server(kvm, *server);
+ if (!vcpu) {
+ pr_devel("Can't find server %d\n", *server);
+ return -EINVAL;
+ }
+
+ pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
+
+ /* Try pick it */
+ rc = xive_try_pick_queue(vcpu, prio);
+ if (rc == 0)
+ return rc;
+
+ pr_devel(" .. failed, looking up candidate...\n");
+
+ /* Failed, pick another VCPU */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!vcpu->arch.xive_vcpu)
+ continue;
+ rc = xive_try_pick_queue(vcpu, prio);
+ if (rc == 0) {
+ *server = vcpu->arch.xive_vcpu->server_num;
+ pr_devel(" found on 0x%x/%d\n", *server, prio);
+ return rc;
+ }
+ }
+ pr_devel(" no available target !\n");
+
+ /* No available target ! */
+ return -EBUSY;
+}
+
+static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
+ struct kvmppc_xive_src_block *sb,
+ struct kvmppc_xive_irq_state *state)
+{
+ struct xive_irq_data *xd;
+ u32 hw_num;
+ u8 old_prio;
+ u64 val;
+
+ /*
+ * Take the lock, set masked, try again if racing
+ * with H_EOI
+ */
+ for (;;) {
+ arch_spin_lock(&sb->lock);
+ old_prio = state->guest_priority;
+ state->guest_priority = MASKED;
+ mb();
+ if (!state->in_eoi)
+ break;
+ state->guest_priority = old_prio;
+ arch_spin_unlock(&sb->lock);
+ }
+
+ /* No change ? Bail */
+ if (old_prio == MASKED)
+ return old_prio;
+
+ /* Get the right irq */
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ /*
+ * If the interrupt is marked as needing masking via
+ * firmware, we do it here. Firmware masking however
+ * is "lossy", it won't return the old p and q bits
+ * and won't set the interrupt to a state where it will
+ * record queued ones. If this is an issue we should do
+ * lazy masking instead.
+ *
+ * For now, we work around this in unmask by forcing
+ * an interrupt whenever we unmask a non-LSI via FW
+ * (if ever).
+ */
+ if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
+ xive_native_configure_irq(hw_num,
+ xive->vp_base + state->act_server,
+ MASKED, state->number);
+ /* set old_p so we can track if an H_EOI was done */
+ state->old_p = true;
+ state->old_q = false;
+ } else {
+ /* Set PQ to 10, return old P and old Q and remember them */
+ val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
+ state->old_p = !!(val & 2);
+ state->old_q = !!(val & 1);
+
+ /*
+ * Synchronize hardware to sensure the queues are updated
+ * when masking
+ */
+ xive_native_sync_source(hw_num);
+ }
+
+ return old_prio;
+}
+
+static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
+ struct kvmppc_xive_irq_state *state)
+{
+ /*
+ * Take the lock try again if racing with H_EOI
+ */
+ for (;;) {
+ arch_spin_lock(&sb->lock);
+ if (!state->in_eoi)
+ break;
+ arch_spin_unlock(&sb->lock);
+ }
+}
+
+static void xive_finish_unmask(struct kvmppc_xive *xive,
+ struct kvmppc_xive_src_block *sb,
+ struct kvmppc_xive_irq_state *state,
+ u8 prio)
+{
+ struct xive_irq_data *xd;
+ u32 hw_num;
+
+ /* If we aren't changing a thing, move on */
+ if (state->guest_priority != MASKED)
+ goto bail;
+
+ /* Get the right irq */
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ /*
+ * See command in xive_lock_and_mask() concerning masking
+ * via firmware.
+ */
+ if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
+ xive_native_configure_irq(hw_num,
+ xive->vp_base + state->act_server,
+ state->act_priority, state->number);
+ /* If an EOI is needed, do it here */
+ if (!state->old_p)
+ xive_vm_source_eoi(hw_num, xd);
+ /* If this is not an LSI, force a trigger */
+ if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
+ xive_irq_trigger(xd);
+ goto bail;
+ }
+
+ /* Old Q set, set PQ to 11 */
+ if (state->old_q)
+ xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
+
+ /*
+ * If not old P, then perform an "effective" EOI,
+ * on the source. This will handle the cases where
+ * FW EOI is needed.
+ */
+ if (!state->old_p)
+ xive_vm_source_eoi(hw_num, xd);
+
+ /* Synchronize ordering and mark unmasked */
+ mb();
+bail:
+ state->guest_priority = prio;
+}
+
+/*
+ * Target an interrupt to a given server/prio, this will fallback
+ * to another server if necessary and perform the HW targetting
+ * updates as needed
+ *
+ * NOTE: Must be called with the state lock held
+ */
+static int xive_target_interrupt(struct kvm *kvm,
+ struct kvmppc_xive_irq_state *state,
+ u32 server, u8 prio)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ u32 hw_num;
+ int rc;
+
+ /*
+ * This will return a tentative server and actual
+ * priority. The count for that new target will have
+ * already been incremented.
+ */
+ rc = xive_select_target(kvm, &server, prio);
+
+ /*
+ * We failed to find a target ? Not much we can do
+ * at least until we support the GIQ.
+ */
+ if (rc)
+ return rc;
+
+ /*
+ * Increment the old queue pending count if there
+ * was one so that the old queue count gets adjusted later
+ * when observed to be empty.
+ */
+ if (state->act_priority != MASKED)
+ xive_inc_q_pending(kvm,
+ state->act_server,
+ state->act_priority);
+ /*
+ * Update state and HW
+ */
+ state->act_priority = prio;
+ state->act_server = server;
+
+ /* Get the right irq */
+ kvmppc_xive_select_irq(state, &hw_num, NULL);
+
+ return xive_native_configure_irq(hw_num,
+ xive->vp_base + server,
+ prio, state->number);
+}
+
+/*
+ * Targetting rules: In order to avoid losing track of
+ * pending interrupts accross mask and unmask, which would
+ * allow queue overflows, we implement the following rules:
+ *
+ * - Unless it was never enabled (or we run out of capacity)
+ * an interrupt is always targetted at a valid server/queue
+ * pair even when "masked" by the guest. This pair tends to
+ * be the last one used but it can be changed under some
+ * circumstances. That allows us to separate targetting
+ * from masking, we only handle accounting during (re)targetting,
+ * this also allows us to let an interrupt drain into its target
+ * queue after masking, avoiding complex schemes to remove
+ * interrupts out of remote processor queues.
+ *
+ * - When masking, we set PQ to 10 and save the previous value
+ * of P and Q.
+ *
+ * - When unmasking, if saved Q was set, we set PQ to 11
+ * otherwise we leave PQ to the HW state which will be either
+ * 10 if nothing happened or 11 if the interrupt fired while
+ * masked. Effectively we are OR'ing the previous Q into the
+ * HW Q.
+ *
+ * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
+ * which will unmask the interrupt and shoot a new one if Q was
+ * set.
+ *
+ * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
+ * effectively meaning an H_EOI from the guest is still expected
+ * for that interrupt).
+ *
+ * - If H_EOI occurs while masked, we clear the saved P.
+ *
+ * - When changing target, we account on the new target and
+ * increment a separate "pending" counter on the old one.
+ * This pending counter will be used to decrement the old
+ * target's count when its queue has been observed empty.
+ */
+
+int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u8 new_act_prio;
+ int rc = 0;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
+ irq, server, priority);
+
+ /* First, check provisioning of queues */
+ if (priority != MASKED)
+ rc = xive_check_provisioning(xive->kvm,
+ xive_prio_from_guest(priority));
+ if (rc) {
+ pr_devel(" provisioning failure %d !\n", rc);
+ return rc;
+ }
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ /*
+ * We first handle masking/unmasking since the locking
+ * might need to be retried due to EOIs, we'll handle
+ * targetting changes later. These functions will return
+ * with the SB lock held.
+ *
+ * xive_lock_and_mask() will also set state->guest_priority
+ * but won't otherwise change other fields of the state.
+ *
+ * xive_lock_for_unmask will not actually unmask, this will
+ * be done later by xive_finish_unmask() once the targetting
+ * has been done, so we don't try to unmask an interrupt
+ * that hasn't yet been targetted.
+ */
+ if (priority == MASKED)
+ xive_lock_and_mask(xive, sb, state);
+ else
+ xive_lock_for_unmask(sb, state);
+
+
+ /*
+ * Then we handle targetting.
+ *
+ * First calculate a new "actual priority"
+ */
+ new_act_prio = state->act_priority;
+ if (priority != MASKED)
+ new_act_prio = xive_prio_from_guest(priority);
+
+ pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
+ new_act_prio, state->act_server, state->act_priority);
+
+ /*
+ * Then check if we actually need to change anything,
+ *
+ * The condition for re-targetting the interrupt is that
+ * we have a valid new priority (new_act_prio is not 0xff)
+ * and either the server or the priority changed.
+ *
+ * Note: If act_priority was ff and the new priority is
+ * also ff, we don't do anything and leave the interrupt
+ * untargetted. An attempt of doing an int_on on an
+ * untargetted interrupt will fail. If that is a problem
+ * we could initialize interrupts with valid default
+ */
+
+ if (new_act_prio != MASKED &&
+ (state->act_server != server ||
+ state->act_priority != new_act_prio))
+ rc = xive_target_interrupt(kvm, state, server, new_act_prio);
+
+ /*
+ * Perform the final unmasking of the interrupt source
+ * if necessary
+ */
+ if (priority != MASKED)
+ xive_finish_unmask(xive, sb, state, priority);
+
+ /*
+ * Finally Update saved_priority to match. Only int_on/off
+ * set this field to a different value.
+ */
+ state->saved_priority = priority;
+
+ arch_spin_unlock(&sb->lock);
+ return rc;
+}
+
+int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+ arch_spin_lock(&sb->lock);
+ *server = state->guest_server;
+ *priority = state->guest_priority;
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+
+int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ pr_devel("int_on(irq=0x%x)\n", irq);
+
+ /*
+ * Check if interrupt was not targetted
+ */
+ if (state->act_priority == MASKED) {
+ pr_devel("int_on on untargetted interrupt\n");
+ return -EINVAL;
+ }
+
+ /* If saved_priority is 0xff, do nothing */
+ if (state->saved_priority == MASKED)
+ return 0;
+
+ /*
+ * Lock and unmask it.
+ */
+ xive_lock_for_unmask(sb, state);
+ xive_finish_unmask(xive, sb, state, state->saved_priority);
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+
+int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ pr_devel("int_off(irq=0x%x)\n", irq);
+
+ /*
+ * Lock and mask
+ */
+ state->saved_priority = xive_lock_and_mask(xive, sb, state);
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+
+static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return false;
+ state = &sb->irq_state[idx];
+ if (!state->valid)
+ return false;
+
+ /*
+ * Trigger the IPI. This assumes we never restore a pass-through
+ * interrupt which should be safe enough
+ */
+ xive_irq_trigger(&state->ipi_data);
+
+ return true;
+}
+
+u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ if (!xc)
+ return 0;
+
+ /* Return the per-cpu state for state saving/migration */
+ return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
+ (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT;
+}
+
+int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
+ u8 cppr, mfrr;
+ u32 xisr;
+
+ if (!xc || !xive)
+ return -ENOENT;
+
+ /* Grab individual state fields. We don't use pending_pri */
+ cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
+ xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
+ KVM_REG_PPC_ICP_XISR_MASK;
+ mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
+
+ pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
+ xc->server_num, cppr, mfrr, xisr);
+
+ /*
+ * We can't update the state of a "pushed" VCPU, but that
+ * shouldn't happen.
+ */
+ if (WARN_ON(vcpu->arch.xive_pushed))
+ return -EIO;
+
+ /* Update VCPU HW saved state */
+ vcpu->arch.xive_saved_state.cppr = cppr;
+ xc->hw_cppr = xc->cppr = cppr;
+
+ /*
+ * Update MFRR state. If it's not 0xff, we mark the VCPU as
+ * having a pending MFRR change, which will re-evaluate the
+ * target. The VCPU will thus potentially get a spurious
+ * interrupt but that's not a big deal.
+ */
+ xc->mfrr = mfrr;
+ if (mfrr < cppr)
+ xive_irq_trigger(&xc->vp_ipi_data);
+
+ /*
+ * Now saved XIRR is "interesting". It means there's something in
+ * the legacy "1 element" queue... for an IPI we simply ignore it,
+ * as the MFRR restore will handle that. For anything else we need
+ * to force a resend of the source.
+ * However the source may not have been setup yet. If that's the
+ * case, we keep that info and increment a counter in the xive to
+ * tell subsequent xive_set_source() to go look.
+ */
+ if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
+ xc->delayed_irq = xisr;
+ xive->delayed_irqs++;
+ pr_devel(" xisr restore delayed\n");
+ }
+
+ return 0;
+}
+
+int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
+ unsigned int host_irq = irq_desc_get_irq(host_desc);
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
+ u16 idx;
+ u8 prio;
+ int rc;
+
+ if (!xive)
+ return -ENODEV;
+
+ pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
+
+ sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ /*
+ * Mark the passed-through interrupt as going to a VCPU,
+ * this will prevent further EOIs and similar operations
+ * from the XIVE code. It will also mask the interrupt
+ * to either PQ=10 or 11 state, the latter if the interrupt
+ * is pending. This will allow us to unmask or retrigger it
+ * after routing it to the guest with a simple EOI.
+ *
+ * The "state" argument is a "token", all it needs is to be
+ * non-NULL to switch to passed-through or NULL for the
+ * other way around. We may not yet have an actual VCPU
+ * target here and we don't really care.
+ */
+ rc = irq_set_vcpu_affinity(host_irq, state);
+ if (rc) {
+ pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
+ return rc;
+ }
+
+ /*
+ * Mask and read state of IPI. We need to know if its P bit
+ * is set as that means it's potentially already using a
+ * queue entry in the target
+ */
+ prio = xive_lock_and_mask(xive, sb, state);
+ pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
+ state->old_p, state->old_q);
+
+ /* Turn the IPI hard off */
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
+
+ /* Grab info about irq */
+ state->pt_number = hw_irq;
+ state->pt_data = irq_data_get_irq_handler_data(host_data);
+
+ /*
+ * Configure the IRQ to match the existing configuration of
+ * the IPI if it was already targetted. Otherwise this will
+ * mask the interrupt in a lossy way (act_priority is 0xff)
+ * which is fine for a never started interrupt.
+ */
+ xive_native_configure_irq(hw_irq,
+ xive->vp_base + state->act_server,
+ state->act_priority, state->number);
+
+ /*
+ * We do an EOI to enable the interrupt (and retrigger if needed)
+ * if the guest has the interrupt unmasked and the P bit was *not*
+ * set in the IPI. If it was set, we know a slot may still be in
+ * use in the target queue thus we have to wait for a guest
+ * originated EOI
+ */
+ if (prio != MASKED && !state->old_p)
+ xive_vm_source_eoi(hw_irq, state->pt_data);
+
+ /* Clear old_p/old_q as they are no longer relevant */
+ state->old_p = state->old_q = false;
+
+ /* Restore guest prio (unlocks EOI) */
+ mb();
+ state->guest_priority = prio;
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
+
+int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ unsigned int host_irq = irq_desc_get_irq(host_desc);
+ u16 idx;
+ u8 prio;
+ int rc;
+
+ if (!xive)
+ return -ENODEV;
+
+ pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
+
+ sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ /*
+ * Mask and read state of IRQ. We need to know if its P bit
+ * is set as that means it's potentially already using a
+ * queue entry in the target
+ */
+ prio = xive_lock_and_mask(xive, sb, state);
+ pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
+ state->old_p, state->old_q);
+
+ /*
+ * If old_p is set, the interrupt is pending, we switch it to
+ * PQ=11. This will force a resend in the host so the interrupt
+ * isn't lost to whatver host driver may pick it up
+ */
+ if (state->old_p)
+ xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
+
+ /* Release the passed-through interrupt to the host */
+ rc = irq_set_vcpu_affinity(host_irq, NULL);
+ if (rc) {
+ pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
+ return rc;
+ }
+
+ /* Forget about the IRQ */
+ state->pt_number = 0;
+ state->pt_data = NULL;
+
+ /* Reconfigure the IPI */
+ xive_native_configure_irq(state->ipi_number,
+ xive->vp_base + state->act_server,
+ state->act_priority, state->number);
+
+ /*
+ * If old_p is set (we have a queue entry potentially
+ * occupied) or the interrupt is masked, we set the IPI
+ * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
+ */
+ if (prio == MASKED || state->old_p)
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
+ else
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
+
+ /* Restore guest prio (unlocks EOI) */
+ mb();
+ state->guest_priority = prio;
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
+
+static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ int i, j;
+
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+
+ if (!sb)
+ continue;
+ for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
+
+ if (!state->valid)
+ continue;
+ if (state->act_priority == MASKED)
+ continue;
+ if (state->act_server != xc->server_num)
+ continue;
+
+ /* Clean it up */
+ arch_spin_lock(&sb->lock);
+ state->act_priority = MASKED;
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
+ if (state->pt_number) {
+ xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
+ }
+ arch_spin_unlock(&sb->lock);
+ }
+ }
+}
+
+void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvmppc_xive *xive = xc->xive;
+ int i;
+
+ pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
+
+ /* Ensure no interrupt is still routed to that VP */
+ xc->valid = false;
+ kvmppc_xive_disable_vcpu_interrupts(vcpu);
+
+ /* Mask the VP IPI */
+ xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
+
+ /* Disable the VP */
+ xive_native_disable_vp(xc->vp_id);
+
+ /* Free the queues & associated interrupts */
+ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+ struct xive_q *q = &xc->queues[i];
+
+ /* Free the escalation irq */
+ if (xc->esc_virq[i]) {
+ free_irq(xc->esc_virq[i], vcpu);
+ irq_dispose_mapping(xc->esc_virq[i]);
+ kfree(xc->esc_virq_names[i]);
+ }
+ /* Free the queue */
+ xive_native_disable_queue(xc->vp_id, q, i);
+ if (q->qpage) {
+ free_pages((unsigned long)q->qpage,
+ xive->q_page_order);
+ q->qpage = NULL;
+ }
+ }
+
+ /* Free the IPI */
+ if (xc->vp_ipi) {
+ xive_cleanup_irq_data(&xc->vp_ipi_data);
+ xive_native_free_irq(xc->vp_ipi);
+ }
+ /* Free the VP */
+ kfree(xc);
+}
+
+int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu)
+{
+ struct kvmppc_xive *xive = dev->private;
+ struct kvmppc_xive_vcpu *xc;
+ int i, r = -EBUSY;
+
+ pr_devel("connect_vcpu(cpu=%d)\n", cpu);
+
+ if (dev->ops != &kvm_xive_ops) {
+ pr_devel("Wrong ops !\n");
+ return -EPERM;
+ }
+ if (xive->kvm != vcpu->kvm)
+ return -EPERM;
+ if (vcpu->arch.irq_type)
+ return -EBUSY;
+ if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
+ pr_devel("Duplicate !\n");
+ return -EEXIST;
+ }
+ if (cpu >= KVM_MAX_VCPUS) {
+ pr_devel("Out of bounds !\n");
+ return -EINVAL;
+ }
+ xc = kzalloc(sizeof(*xc), GFP_KERNEL);
+ if (!xc)
+ return -ENOMEM;
+
+ /* We need to synchronize with queue provisioning */
+ mutex_lock(&vcpu->kvm->lock);
+ vcpu->arch.xive_vcpu = xc;
+ xc->xive = xive;
+ xc->vcpu = vcpu;
+ xc->server_num = cpu;
+ xc->vp_id = xive->vp_base + cpu;
+ xc->mfrr = 0xff;
+ xc->valid = true;
+
+ r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
+ if (r)
+ goto bail;
+
+ /* Configure VCPU fields for use by assembly push/pull */
+ vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
+ vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
+
+ /* Allocate IPI */
+ xc->vp_ipi = xive_native_alloc_irq();
+ if (!xc->vp_ipi) {
+ r = -EIO;
+ goto bail;
+ }
+ pr_devel(" IPI=0x%x\n", xc->vp_ipi);
+
+ r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
+ if (r)
+ goto bail;
+
+ /*
+ * Initialize queues. Initially we set them all for no queueing
+ * and we enable escalation for queue 0 only which we'll use for
+ * our mfrr change notifications. If the VCPU is hot-plugged, we
+ * do handle provisioning however.
+ */
+ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+ struct xive_q *q = &xc->queues[i];
+
+ /* Is queue already enabled ? Provision it */
+ if (xive->qmap & (1 << i)) {
+ r = xive_provision_queue(vcpu, i);
+ if (r == 0)
+ xive_attach_escalation(vcpu, i);
+ if (r)
+ goto bail;
+ } else {
+ r = xive_native_configure_queue(xc->vp_id,
+ q, i, NULL, 0, true);
+ if (r) {
+ pr_err("Failed to configure queue %d for VCPU %d\n",
+ i, cpu);
+ goto bail;
+ }
+ }
+ }
+
+ /* If not done above, attach priority 0 escalation */
+ r = xive_attach_escalation(vcpu, 0);
+ if (r)
+ goto bail;
+
+ /* Enable the VP */
+ r = xive_native_enable_vp(xc->vp_id);
+ if (r)
+ goto bail;
+
+ /* Route the IPI */
+ r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
+ if (!r)
+ xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
+
+bail:
+ mutex_unlock(&vcpu->kvm->lock);
+ if (r) {
+ kvmppc_xive_cleanup_vcpu(vcpu);
+ return r;
+ }
+
+ vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
+ return 0;
+}
+
+/*
+ * Scanning of queues before/after migration save
+ */
+static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return;
+
+ state = &sb->irq_state[idx];
+
+ /* Some sanity checking */
+ if (!state->valid) {
+ pr_err("invalid irq 0x%x in cpu queue!\n", irq);
+ return;
+ }
+
+ /*
+ * If the interrupt is in a queue it should have P set.
+ * We warn so that gets reported. A backtrace isn't useful
+ * so no need to use a WARN_ON.
+ */
+ if (!state->saved_p)
+ pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
+
+ /* Set flag */
+ state->in_queue = true;
+}
+
+static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
+ struct kvmppc_xive_src_block *sb,
+ u32 irq)
+{
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
+
+ if (!state->valid)
+ return;
+
+ /* Mask and save state, this will also sync HW queues */
+ state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
+
+ /* Transfer P and Q */
+ state->saved_p = state->old_p;
+ state->saved_q = state->old_q;
+
+ /* Unlock */
+ arch_spin_unlock(&sb->lock);
+}
+
+static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
+ struct kvmppc_xive_src_block *sb,
+ u32 irq)
+{
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
+
+ if (!state->valid)
+ return;
+
+ /*
+ * Lock / exclude EOI (not technically necessary if the
+ * guest isn't running concurrently. If this becomes a
+ * performance issue we can probably remove the lock.
+ */
+ xive_lock_for_unmask(sb, state);
+
+ /* Restore mask/prio if it wasn't masked */
+ if (state->saved_scan_prio != MASKED)
+ xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
+
+ /* Unlock */
+ arch_spin_unlock(&sb->lock);
+}
+
+static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
+{
+ u32 idx = q->idx;
+ u32 toggle = q->toggle;
+ u32 irq;
+
+ do {
+ irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
+ if (irq > XICS_IPI)
+ xive_pre_save_set_queued(xive, irq);
+ } while(irq);
+}
+
+static void xive_pre_save_scan(struct kvmppc_xive *xive)
+{
+ struct kvm_vcpu *vcpu = NULL;
+ int i, j;
+
+ /*
+ * See comment in xive_get_source() about how this
+ * work. Collect a stable state for all interrupts
+ */
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+ if (!sb)
+ continue;
+ for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
+ xive_pre_save_mask_irq(xive, sb, j);
+ }
+
+ /* Then scan the queues and update the "in_queue" flag */
+ kvm_for_each_vcpu(i, vcpu, xive->kvm) {
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ if (!xc)
+ continue;
+ for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
+ if (xc->queues[i].qpage)
+ xive_pre_save_queue(xive, &xc->queues[i]);
+ }
+ }
+
+ /* Finally restore interrupt states */
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+ if (!sb)
+ continue;
+ for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
+ xive_pre_save_unmask_irq(xive, sb, j);
+ }
+}
+
+static void xive_post_save_scan(struct kvmppc_xive *xive)
+{
+ u32 i, j;
+
+ /* Clear all the in_queue flags */
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+ if (!sb)
+ continue;
+ for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
+ sb->irq_state[j].in_queue = false;
+ }
+
+ /* Next get_source() will do a new scan */
+ xive->saved_src_count = 0;
+}
+
+/*
+ * This returns the source configuration and state to user space.
+ */
+static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u64 __user *ubufp = (u64 __user *) addr;
+ u64 val, prio;
+ u16 idx;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -ENOENT;
+
+ state = &sb->irq_state[idx];
+
+ if (!state->valid)
+ return -ENOENT;
+
+ pr_devel("get_source(%ld)...\n", irq);
+
+ /*
+ * So to properly save the state into something that looks like a
+ * XICS migration stream we cannot treat interrupts individually.
+ *
+ * We need, instead, mask them all (& save their previous PQ state)
+ * to get a stable state in the HW, then sync them to ensure that
+ * any interrupt that had already fired hits its queue, and finally
+ * scan all the queues to collect which interrupts are still present
+ * in the queues, so we can set the "pending" flag on them and
+ * they can be resent on restore.
+ *
+ * So we do it all when the "first" interrupt gets saved, all the
+ * state is collected at that point, the rest of xive_get_source()
+ * will merely collect and convert that state to the expected
+ * userspace bit mask.
+ */
+ if (xive->saved_src_count == 0)
+ xive_pre_save_scan(xive);
+ xive->saved_src_count++;
+
+ /* Convert saved state into something compatible with xics */
+ val = state->guest_server;
+ prio = state->saved_scan_prio;
+
+ if (prio == MASKED) {
+ val |= KVM_XICS_MASKED;
+ prio = state->saved_priority;
+ }
+ val |= prio << KVM_XICS_PRIORITY_SHIFT;
+ if (state->lsi) {
+ val |= KVM_XICS_LEVEL_SENSITIVE;
+ if (state->saved_p)
+ val |= KVM_XICS_PENDING;
+ } else {
+ if (state->saved_p)
+ val |= KVM_XICS_PRESENTED;
+
+ if (state->saved_q)
+ val |= KVM_XICS_QUEUED;
+
+ /*
+ * We mark it pending (which will attempt a re-delivery)
+ * if we are in a queue *or* we were masked and had
+ * Q set which is equivalent to the XICS "masked pending"
+ * state
+ */
+ if (state->in_queue || (prio == MASKED && state->saved_q))
+ val |= KVM_XICS_PENDING;
+ }
+
+ /*
+ * If that was the last interrupt saved, reset the
+ * in_queue flags
+ */
+ if (xive->saved_src_count == xive->src_count)
+ xive_post_save_scan(xive);
+
+ /* Copy the result to userspace */
+ if (put_user(val, ubufp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive,
+ int irq)
+{
+ struct kvm *kvm = xive->kvm;
+ struct kvmppc_xive_src_block *sb;
+ int i, bid;
+
+ bid = irq >> KVMPPC_XICS_ICS_SHIFT;
+
+ mutex_lock(&kvm->lock);
+
+ /* block already exists - somebody else got here first */
+ if (xive->src_blocks[bid])
+ goto out;
+
+ /* Create the ICS */
+ sb = kzalloc(sizeof(*sb), GFP_KERNEL);
+ if (!sb)
+ goto out;
+
+ sb->id = bid;
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
+ sb->irq_state[i].guest_priority = MASKED;
+ sb->irq_state[i].saved_priority = MASKED;
+ sb->irq_state[i].act_priority = MASKED;
+ }
+ smp_wmb();
+ xive->src_blocks[bid] = sb;
+
+ if (bid > xive->max_sbid)
+ xive->max_sbid = bid;
+
+out:
+ mutex_unlock(&kvm->lock);
+ return xive->src_blocks[bid];
+}
+
+static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
+{
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu = NULL;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ if (!xc)
+ continue;
+
+ if (xc->delayed_irq == irq) {
+ xc->delayed_irq = 0;
+ xive->delayed_irqs--;
+ return true;
+ }
+ }
+ return false;
+}
+
+static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u64 __user *ubufp = (u64 __user *) addr;
+ u16 idx;
+ u64 val;
+ u8 act_prio, guest_prio;
+ u32 server;
+ int rc = 0;
+
+ if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
+ return -ENOENT;
+
+ pr_devel("set_source(irq=0x%lx)\n", irq);
+
+ /* Find the source */
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb) {
+ pr_devel("No source, creating source block...\n");
+ sb = xive_create_src_block(xive, irq);
+ if (!sb) {
+ pr_devel("Failed to create block...\n");
+ return -ENOMEM;
+ }
+ }
+ state = &sb->irq_state[idx];
+
+ /* Read user passed data */
+ if (get_user(val, ubufp)) {
+ pr_devel("fault getting user info !\n");
+ return -EFAULT;
+ }
+
+ server = val & KVM_XICS_DESTINATION_MASK;
+ guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
+
+ pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
+ val, server, guest_prio);
+ /*
+ * If the source doesn't already have an IPI, allocate
+ * one and get the corresponding data
+ */
+ if (!state->ipi_number) {
+ state->ipi_number = xive_native_alloc_irq();
+ if (state->ipi_number == 0) {
+ pr_devel("Failed to allocate IPI !\n");
+ return -ENOMEM;
+ }
+ xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
+ pr_devel(" src_ipi=0x%x\n", state->ipi_number);
+ }
+
+ /*
+ * We use lock_and_mask() to set us in the right masked
+ * state. We will override that state from the saved state
+ * further down, but this will handle the cases of interrupts
+ * that need FW masking. We set the initial guest_priority to
+ * 0 before calling it to ensure it actually performs the masking.
+ */
+ state->guest_priority = 0;
+ xive_lock_and_mask(xive, sb, state);
+
+ /*
+ * Now, we select a target if we have one. If we don't we
+ * leave the interrupt untargetted. It means that an interrupt
+ * can become "untargetted" accross migration if it was masked
+ * by set_xive() but there is little we can do about it.
+ */
+
+ /* First convert prio and mark interrupt as untargetted */
+ act_prio = xive_prio_from_guest(guest_prio);
+ state->act_priority = MASKED;
+ state->guest_server = server;
+
+ /*
+ * We need to drop the lock due to the mutex below. Hopefully
+ * nothing is touching that interrupt yet since it hasn't been
+ * advertized to a running guest yet
+ */
+ arch_spin_unlock(&sb->lock);
+
+ /* If we have a priority target the interrupt */
+ if (act_prio != MASKED) {
+ /* First, check provisioning of queues */
+ mutex_lock(&xive->kvm->lock);
+ rc = xive_check_provisioning(xive->kvm, act_prio);
+ mutex_unlock(&xive->kvm->lock);
+
+ /* Target interrupt */
+ if (rc == 0)
+ rc = xive_target_interrupt(xive->kvm, state,
+ server, act_prio);
+ /*
+ * If provisioning or targetting failed, leave it
+ * alone and masked. It will remain disabled until
+ * the guest re-targets it.
+ */
+ }
+
+ /*
+ * Find out if this was a delayed irq stashed in an ICP,
+ * in which case, treat it as pending
+ */
+ if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
+ val |= KVM_XICS_PENDING;
+ pr_devel(" Found delayed ! forcing PENDING !\n");
+ }
+
+ /* Cleanup the SW state */
+ state->old_p = false;
+ state->old_q = false;
+ state->lsi = false;
+ state->asserted = false;
+
+ /* Restore LSI state */
+ if (val & KVM_XICS_LEVEL_SENSITIVE) {
+ state->lsi = true;
+ if (val & KVM_XICS_PENDING)
+ state->asserted = true;
+ pr_devel(" LSI ! Asserted=%d\n", state->asserted);
+ }
+
+ /*
+ * Restore P and Q. If the interrupt was pending, we
+ * force both P and Q, which will trigger a resend.
+ *
+ * That means that a guest that had both an interrupt
+ * pending (queued) and Q set will restore with only
+ * one instance of that interrupt instead of 2, but that
+ * is perfectly fine as coalescing interrupts that haven't
+ * been presented yet is always allowed.
+ */
+ if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
+ state->old_p = true;
+ if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
+ state->old_q = true;
+
+ pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
+
+ /*
+ * If the interrupt was unmasked, update guest priority and
+ * perform the appropriate state transition and do a
+ * re-trigger if necessary.
+ */
+ if (val & KVM_XICS_MASKED) {
+ pr_devel(" masked, saving prio\n");
+ state->guest_priority = MASKED;
+ state->saved_priority = guest_prio;
+ } else {
+ pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
+ xive_finish_unmask(xive, sb, state, guest_prio);
+ state->saved_priority = guest_prio;
+ }
+
+ /* Increment the number of valid sources and mark this one valid */
+ if (!state->valid)
+ xive->src_count++;
+ state->valid = true;
+
+ return 0;
+}
+
+int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ bool line_status)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+
+ /* Perform locklessly .... (we need to do some RCUisms here...) */
+ state = &sb->irq_state[idx];
+ if (!state->valid)
+ return -EINVAL;
+
+ /* We don't allow a trigger on a passed-through interrupt */
+ if (state->pt_number)
+ return -EINVAL;
+
+ if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
+ state->asserted = 1;
+ else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
+ state->asserted = 0;
+ return 0;
+ }
+
+ /* Trigger the IPI */
+ xive_irq_trigger(&state->ipi_data);
+
+ return 0;
+}
+
+static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ struct kvmppc_xive *xive = dev->private;
+
+ /* We honor the existing XICS ioctl */
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ return xive_set_source(xive, attr->attr, attr->addr);
+ }
+ return -ENXIO;
+}
+
+static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ struct kvmppc_xive *xive = dev->private;
+
+ /* We honor the existing XICS ioctl */
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ return xive_get_source(xive, attr->attr, attr->addr);
+ }
+ return -ENXIO;
+}
+
+static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ /* We honor the same limits as XICS, at least for now */
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
+ attr->attr < KVMPPC_XICS_NR_IRQS)
+ return 0;
+ break;
+ }
+ return -ENXIO;
+}
+
+static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
+{
+ xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(hw_num, 0, MASKED, 0);
+ xive_cleanup_irq_data(xd);
+}
+
+static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
+{
+ int i;
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
+
+ if (!state->valid)
+ continue;
+
+ kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
+ xive_native_free_irq(state->ipi_number);
+
+ /* Pass-through, cleanup too */
+ if (state->pt_number)
+ kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
+
+ state->valid = false;
+ }
+}
+
+static void kvmppc_xive_free(struct kvm_device *dev)
+{
+ struct kvmppc_xive *xive = dev->private;
+ struct kvm *kvm = xive->kvm;
+ int i;
+
+ debugfs_remove(xive->dentry);
+
+ if (kvm)
+ kvm->arch.xive = NULL;
+
+ /* Mask and free interrupts */
+ for (i = 0; i <= xive->max_sbid; i++) {
+ if (xive->src_blocks[i])
+ kvmppc_xive_free_sources(xive->src_blocks[i]);
+ kfree(xive->src_blocks[i]);
+ xive->src_blocks[i] = NULL;
+ }
+
+ if (xive->vp_base != XIVE_INVALID_VP)
+ xive_native_free_vp_block(xive->vp_base);
+
+
+ kfree(xive);
+ kfree(dev);
+}
+
+static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
+{
+ struct kvmppc_xive *xive;
+ struct kvm *kvm = dev->kvm;
+ int ret = 0;
+
+ pr_devel("Creating xive for partition\n");
+
+ xive = kzalloc(sizeof(*xive), GFP_KERNEL);
+ if (!xive)
+ return -ENOMEM;
+
+ dev->private = xive;
+ xive->dev = dev;
+ xive->kvm = kvm;
+
+ /* Already there ? */
+ if (kvm->arch.xive)
+ ret = -EEXIST;
+ else
+ kvm->arch.xive = xive;
+
+ /* We use the default queue size set by the host */
+ xive->q_order = xive_native_default_eq_shift();
+ if (xive->q_order < PAGE_SHIFT)
+ xive->q_page_order = 0;
+ else
+ xive->q_page_order = xive->q_order - PAGE_SHIFT;
+
+ /* Allocate a bunch of VPs */
+ xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
+ pr_devel("VP_Base=%x\n", xive->vp_base);
+
+ if (xive->vp_base == XIVE_INVALID_VP)
+ ret = -ENOMEM;
+
+ if (ret) {
+ kfree(xive);
+ return ret;
+ }
+
+ return 0;
+}
+
+
+static int xive_debug_show(struct seq_file *m, void *private)
+{
+ struct kvmppc_xive *xive = m->private;
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ u64 t_rm_h_xirr = 0;
+ u64 t_rm_h_ipoll = 0;
+ u64 t_rm_h_cppr = 0;
+ u64 t_rm_h_eoi = 0;
+ u64 t_rm_h_ipi = 0;
+ u64 t_vm_h_xirr = 0;
+ u64 t_vm_h_ipoll = 0;
+ u64 t_vm_h_cppr = 0;
+ u64 t_vm_h_eoi = 0;
+ u64 t_vm_h_ipi = 0;
+ unsigned int i;
+
+ if (!kvm)
+ return 0;
+
+ seq_printf(m, "=========\nVCPU state\n=========\n");
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ if (!xc)
+ continue;
+
+ seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
+ " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
+ xc->server_num, xc->cppr, xc->hw_cppr,
+ xc->mfrr, xc->pending,
+ xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
+
+ t_rm_h_xirr += xc->stat_rm_h_xirr;
+ t_rm_h_ipoll += xc->stat_rm_h_ipoll;
+ t_rm_h_cppr += xc->stat_rm_h_cppr;
+ t_rm_h_eoi += xc->stat_rm_h_eoi;
+ t_rm_h_ipi += xc->stat_rm_h_ipi;
+ t_vm_h_xirr += xc->stat_vm_h_xirr;
+ t_vm_h_ipoll += xc->stat_vm_h_ipoll;
+ t_vm_h_cppr += xc->stat_vm_h_cppr;
+ t_vm_h_eoi += xc->stat_vm_h_eoi;
+ t_vm_h_ipi += xc->stat_vm_h_ipi;
+ }
+
+ seq_printf(m, "Hcalls totals\n");
+ seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
+ seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
+ seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
+ seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
+ seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
+
+ return 0;
+}
+
+static int xive_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, xive_debug_show, inode->i_private);
+}
+
+static const struct file_operations xive_debug_fops = {
+ .open = xive_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void xive_debugfs_init(struct kvmppc_xive *xive)
+{
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
+ if (!name) {
+ pr_err("%s: no memory for name\n", __func__);
+ return;
+ }
+
+ xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
+ xive, &xive_debug_fops);
+
+ pr_debug("%s: created %s\n", __func__, name);
+ kfree(name);
+}
+
+static void kvmppc_xive_init(struct kvm_device *dev)
+{
+ struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
+
+ /* Register some debug interfaces */
+ xive_debugfs_init(xive);
+}
+
+struct kvm_device_ops kvm_xive_ops = {
+ .name = "kvm-xive",
+ .create = kvmppc_xive_create,
+ .init = kvmppc_xive_init,
+ .destroy = kvmppc_xive_free,
+ .set_attr = xive_set_attr,
+ .get_attr = xive_get_attr,
+ .has_attr = xive_has_attr,
+};
+
+void kvmppc_xive_init_module(void)
+{
+ __xive_vm_h_xirr = xive_vm_h_xirr;
+ __xive_vm_h_ipoll = xive_vm_h_ipoll;
+ __xive_vm_h_ipi = xive_vm_h_ipi;
+ __xive_vm_h_cppr = xive_vm_h_cppr;
+ __xive_vm_h_eoi = xive_vm_h_eoi;
+}
+
+void kvmppc_xive_exit_module(void)
+{
+ __xive_vm_h_xirr = NULL;
+ __xive_vm_h_ipoll = NULL;
+ __xive_vm_h_ipi = NULL;
+ __xive_vm_h_cppr = NULL;
+ __xive_vm_h_eoi = NULL;
+}
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
new file mode 100644
index 000000000000..5938f7644dc1
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _KVM_PPC_BOOK3S_XIVE_H
+#define _KVM_PPC_BOOK3S_XIVE_H
+
+#ifdef CONFIG_KVM_XICS
+#include "book3s_xics.h"
+
+/*
+ * State for one guest irq source.
+ *
+ * For each guest source we allocate a HW interrupt in the XIVE
+ * which we use for all SW triggers. It will be unused for
+ * pass-through but it's easier to keep around as the same
+ * guest interrupt can alternatively be emulated or pass-through
+ * if a physical device is hot unplugged and replaced with an
+ * emulated one.
+ *
+ * This state structure is very similar to the XICS one with
+ * additional XIVE specific tracking.
+ */
+struct kvmppc_xive_irq_state {
+ bool valid; /* Interrupt entry is valid */
+
+ u32 number; /* Guest IRQ number */
+ u32 ipi_number; /* XIVE IPI HW number */
+ struct xive_irq_data ipi_data; /* XIVE IPI associated data */
+ u32 pt_number; /* XIVE Pass-through number if any */
+ struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */
+
+ /* Targetting as set by guest */
+ u32 guest_server; /* Current guest selected target */
+ u8 guest_priority; /* Guest set priority */
+ u8 saved_priority; /* Saved priority when masking */
+
+ /* Actual targetting */
+ u32 act_server; /* Actual server */
+ u8 act_priority; /* Actual priority */
+
+ /* Various state bits */
+ bool in_eoi; /* Synchronize with H_EOI */
+ bool old_p; /* P bit state when masking */
+ bool old_q; /* Q bit state when masking */
+ bool lsi; /* level-sensitive interrupt */
+ bool asserted; /* Only for emulated LSI: current state */
+
+ /* Saved for migration state */
+ bool in_queue;
+ bool saved_p;
+ bool saved_q;
+ u8 saved_scan_prio;
+};
+
+/* Select the "right" interrupt (IPI vs. passthrough) */
+static inline void kvmppc_xive_select_irq(struct kvmppc_xive_irq_state *state,
+ u32 *out_hw_irq,
+ struct xive_irq_data **out_xd)
+{
+ if (state->pt_number) {
+ if (out_hw_irq)
+ *out_hw_irq = state->pt_number;
+ if (out_xd)
+ *out_xd = state->pt_data;
+ } else {
+ if (out_hw_irq)
+ *out_hw_irq = state->ipi_number;
+ if (out_xd)
+ *out_xd = &state->ipi_data;
+ }
+}
+
+/*
+ * This corresponds to an "ICS" in XICS terminology, we use it
+ * as a mean to break up source information into multiple structures.
+ */
+struct kvmppc_xive_src_block {
+ arch_spinlock_t lock;
+ u16 id;
+ struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
+};
+
+
+struct kvmppc_xive {
+ struct kvm *kvm;
+ struct kvm_device *dev;
+ struct dentry *dentry;
+
+ /* VP block associated with the VM */
+ u32 vp_base;
+
+ /* Blocks of sources */
+ struct kvmppc_xive_src_block *src_blocks[KVMPPC_XICS_MAX_ICS_ID + 1];
+ u32 max_sbid;
+
+ /*
+ * For state save, we lazily scan the queues on the first interrupt
+ * being migrated. We don't have a clean way to reset that flags
+ * so we keep track of the number of valid sources and how many of
+ * them were migrated so we can reset when all of them have been
+ * processed.
+ */
+ u32 src_count;
+ u32 saved_src_count;
+
+ /*
+ * Some irqs are delayed on restore until the source is created,
+ * keep track here of how many of them
+ */
+ u32 delayed_irqs;
+
+ /* Which queues (priorities) are in use by the guest */
+ u8 qmap;
+
+ /* Queue orders */
+ u32 q_order;
+ u32 q_page_order;
+
+};
+
+#define KVMPPC_XIVE_Q_COUNT 8
+
+struct kvmppc_xive_vcpu {
+ struct kvmppc_xive *xive;
+ struct kvm_vcpu *vcpu;
+ bool valid;
+
+ /* Server number. This is the HW CPU ID from a guest perspective */
+ u32 server_num;
+
+ /*
+ * HW VP corresponding to this VCPU. This is the base of the VP
+ * block plus the server number.
+ */
+ u32 vp_id;
+ u32 vp_chip_id;
+ u32 vp_cam;
+
+ /* IPI used for sending ... IPIs */
+ u32 vp_ipi;
+ struct xive_irq_data vp_ipi_data;
+
+ /* Local emulation state */
+ uint8_t cppr; /* guest CPPR */
+ uint8_t hw_cppr;/* Hardware CPPR */
+ uint8_t mfrr;
+ uint8_t pending;
+
+ /* Each VP has 8 queues though we only provision some */
+ struct xive_q queues[KVMPPC_XIVE_Q_COUNT];
+ u32 esc_virq[KVMPPC_XIVE_Q_COUNT];
+ char *esc_virq_names[KVMPPC_XIVE_Q_COUNT];
+
+ /* Stash a delayed irq on restore from migration (see set_icp) */
+ u32 delayed_irq;
+
+ /* Stats */
+ u64 stat_rm_h_xirr;
+ u64 stat_rm_h_ipoll;
+ u64 stat_rm_h_cppr;
+ u64 stat_rm_h_eoi;
+ u64 stat_rm_h_ipi;
+ u64 stat_vm_h_xirr;
+ u64 stat_vm_h_ipoll;
+ u64 stat_vm_h_cppr;
+ u64 stat_vm_h_eoi;
+ u64 stat_vm_h_ipi;
+};
+
+static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr)
+{
+ struct kvm_vcpu *vcpu = NULL;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num)
+ return vcpu;
+ }
+ return NULL;
+}
+
+static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmppc_xive *xive,
+ u32 irq, u16 *source)
+{
+ u32 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
+ u16 src = irq & KVMPPC_XICS_SRC_MASK;
+
+ if (source)
+ *source = src;
+ if (bid > KVMPPC_XICS_MAX_ICS_ID)
+ return NULL;
+ return xive->src_blocks[bid];
+}
+
+/*
+ * Mapping between guest priorities and host priorities
+ * is as follow.
+ *
+ * Guest request for 0...6 are honored. Guest request for anything
+ * higher results in a priority of 7 being applied.
+ *
+ * However, when XIRR is returned via H_XIRR, 7 is translated to 0xb
+ * in order to match AIX expectations
+ *
+ * Similar mapping is done for CPPR values
+ */
+static inline u8 xive_prio_from_guest(u8 prio)
+{
+ if (prio == 0xff || prio < 8)
+ return prio;
+ return 7;
+}
+
+static inline u8 xive_prio_to_guest(u8 prio)
+{
+ if (prio == 0xff || prio < 7)
+ return prio;
+ return 0xb;
+}
+
+static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle)
+{
+ u32 cur;
+
+ if (!qpage)
+ return 0;
+ cur = be32_to_cpup(qpage + *idx);
+ if ((cur >> 31) == *toggle)
+ return 0;
+ *idx = (*idx + 1) & msk;
+ if (*idx == 0)
+ (*toggle) ^= 1;
+ return cur & 0x7fffffff;
+}
+
+extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu);
+extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
+extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr);
+extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
+extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
+
+extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
+extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
+extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr);
+extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
+extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
+
+#endif /* CONFIG_KVM_XICS */
+#endif /* _KVM_PPC_BOOK3S_XICS_H */
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
new file mode 100644
index 000000000000..4636ca6e7d38
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+/* File to be included by other .c files */
+
+#define XGLUE(a,b) a##b
+#define GLUE(a,b) XGLUE(a,b)
+
+static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
+{
+ u8 cppr;
+ u16 ack;
+
+ /* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */
+
+ /* Perform the acknowledge OS to register cycle. */
+ ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
+
+ /* Synchronize subsequent queue accesses */
+ mb();
+
+ /* XXX Check grouping level */
+
+ /* Anything ? */
+ if (!((ack >> 8) & TM_QW1_NSR_EO))
+ return;
+
+ /* Grab CPPR of the most favored pending interrupt */
+ cppr = ack & 0xff;
+ if (cppr < 8)
+ xc->pending |= 1 << cppr;
+
+#ifdef XIVE_RUNTIME_CHECKS
+ /* Check consistency */
+ if (cppr >= xc->hw_cppr)
+ pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
+ smp_processor_id(), cppr, xc->hw_cppr);
+#endif
+
+ /*
+ * Update our image of the HW CPPR. We don't yet modify
+ * xc->cppr, this will be done as we scan for interrupts
+ * in the queues.
+ */
+ xc->hw_cppr = cppr;
+}
+
+static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
+{
+ u64 val;
+
+ if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
+ offset |= offset << 4;
+
+ val =__x_readq(__x_eoi_page(xd) + offset);
+#ifdef __LITTLE_ENDIAN__
+ val >>= 64-8;
+#endif
+ return (u8)val;
+}
+
+
+static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
+{
+ /* If the XIVE supports the new "store EOI facility, use it */
+ if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
+ __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
+ else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
+ opal_int_eoi(hw_irq);
+ } else {
+ uint64_t eoi_val;
+
+ /*
+ * Otherwise for EOI, we use the special MMIO that does
+ * a clear of both P and Q and returns the old Q,
+ * except for LSIs where we use the "EOI cycle" special
+ * load.
+ *
+ * This allows us to then do a re-trigger if Q was set
+ * rather than synthetizing an interrupt in software
+ *
+ * For LSIs, using the HW EOI cycle works around a problem
+ * on P9 DD1 PHBs where the other ESB accesses don't work
+ * properly.
+ */
+ if (xd->flags & XIVE_IRQ_FLAG_LSI)
+ __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
+ else {
+ eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
+
+ /* Re-trigger if needed */
+ if ((eoi_val & 1) && __x_trig_page(xd))
+ __x_writeq(0, __x_trig_page(xd));
+ }
+ }
+}
+
+enum {
+ scan_fetch,
+ scan_poll,
+ scan_eoi,
+};
+
+static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
+ u8 pending, int scan_type)
+{
+ u32 hirq = 0;
+ u8 prio = 0xff;
+
+ /* Find highest pending priority */
+ while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
+ struct xive_q *q;
+ u32 idx, toggle;
+ __be32 *qpage;
+
+ /*
+ * If pending is 0 this will return 0xff which is what
+ * we want
+ */
+ prio = ffs(pending) - 1;
+
+ /*
+ * If the most favoured prio we found pending is less
+ * favored (or equal) than a pending IPI, we return
+ * the IPI instead.
+ *
+ * Note: If pending was 0 and mfrr is 0xff, we will
+ * not spurriously take an IPI because mfrr cannot
+ * then be smaller than cppr.
+ */
+ if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
+ prio = xc->mfrr;
+ hirq = XICS_IPI;
+ break;
+ }
+
+ /* Don't scan past the guest cppr */
+ if (prio >= xc->cppr || prio > 7)
+ break;
+
+ /* Grab queue and pointers */
+ q = &xc->queues[prio];
+ idx = q->idx;
+ toggle = q->toggle;
+
+ /*
+ * Snapshot the queue page. The test further down for EOI
+ * must use the same "copy" that was used by __xive_read_eq
+ * since qpage can be set concurrently and we don't want
+ * to miss an EOI.
+ */
+ qpage = READ_ONCE(q->qpage);
+
+skip_ipi:
+ /*
+ * Try to fetch from the queue. Will return 0 for a
+ * non-queueing priority (ie, qpage = 0).
+ */
+ hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
+
+ /*
+ * If this was a signal for an MFFR change done by
+ * H_IPI we skip it. Additionally, if we were fetching
+ * we EOI it now, thus re-enabling reception of a new
+ * such signal.
+ *
+ * We also need to do that if prio is 0 and we had no
+ * page for the queue. In this case, we have non-queued
+ * IPI that needs to be EOId.
+ *
+ * This is safe because if we have another pending MFRR
+ * change that wasn't observed above, the Q bit will have
+ * been set and another occurrence of the IPI will trigger.
+ */
+ if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
+ if (scan_type == scan_fetch)
+ GLUE(X_PFX,source_eoi)(xc->vp_ipi,
+ &xc->vp_ipi_data);
+ /* Loop back on same queue with updated idx/toggle */
+#ifdef XIVE_RUNTIME_CHECKS
+ WARN_ON(hirq && hirq != XICS_IPI);
+#endif
+ if (hirq)
+ goto skip_ipi;
+ }
+
+ /* If fetching, update queue pointers */
+ if (scan_type == scan_fetch) {
+ q->idx = idx;
+ q->toggle = toggle;
+ }
+
+ /* Something found, stop searching */
+ if (hirq)
+ break;
+
+ /* Clear the pending bit on the now empty queue */
+ pending &= ~(1 << prio);
+
+ /*
+ * Check if the queue count needs adjusting due to
+ * interrupts being moved away.
+ */
+ if (atomic_read(&q->pending_count)) {
+ int p = atomic_xchg(&q->pending_count, 0);
+ if (p) {
+#ifdef XIVE_RUNTIME_CHECKS
+ WARN_ON(p > atomic_read(&q->count));
+#endif
+ atomic_sub(p, &q->count);
+ }
+ }
+ }
+
+ /* If we are just taking a "peek", do nothing else */
+ if (scan_type == scan_poll)
+ return hirq;
+
+ /* Update the pending bits */
+ xc->pending = pending;
+
+ /*
+ * If this is an EOI that's it, no CPPR adjustment done here,
+ * all we needed was cleanup the stale pending bits and check
+ * if there's anything left.
+ */
+ if (scan_type == scan_eoi)
+ return hirq;
+
+ /*
+ * If we found an interrupt, adjust what the guest CPPR should
+ * be as if we had just fetched that interrupt from HW.
+ */
+ if (hirq)
+ xc->cppr = prio;
+ /*
+ * If it was an IPI the HW CPPR might have been lowered too much
+ * as the HW interrupt we use for IPIs is routed to priority 0.
+ *
+ * We re-sync it here.
+ */
+ if (xc->cppr != xc->hw_cppr) {
+ xc->hw_cppr = xc->cppr;
+ __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
+ }
+
+ return hirq;
+}
+
+X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u8 old_cppr;
+ u32 hirq;
+
+ pr_devel("H_XIRR\n");
+
+ xc->GLUE(X_STAT_PFX,h_xirr)++;
+
+ /* First collect pending bits from HW */
+ GLUE(X_PFX,ack_pending)(xc);
+
+ /*
+ * Cleanup the old-style bits if needed (they may have been
+ * set by pull or an escalation interrupts).
+ */
+ if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions))
+ clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
+ &vcpu->arch.pending_exceptions);
+
+ pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
+ xc->pending, xc->hw_cppr, xc->cppr);
+
+ /* Grab previous CPPR and reverse map it */
+ old_cppr = xive_prio_to_guest(xc->cppr);
+
+ /* Scan for actual interrupts */
+ hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
+
+ pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
+ hirq, xc->hw_cppr, xc->cppr);
+
+#ifdef XIVE_RUNTIME_CHECKS
+ /* That should never hit */
+ if (hirq & 0xff000000)
+ pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
+#endif
+
+ /*
+ * XXX We could check if the interrupt is masked here and
+ * filter it. If we chose to do so, we would need to do:
+ *
+ * if (masked) {
+ * lock();
+ * if (masked) {
+ * old_Q = true;
+ * hirq = 0;
+ * }
+ * unlock();
+ * }
+ */
+
+ /* Return interrupt and old CPPR in GPR4 */
+ vcpu->arch.gpr[4] = hirq | (old_cppr << 24);
+
+ return H_SUCCESS;
+}
+
+X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u8 pending = xc->pending;
+ u32 hirq;
+ u8 pipr;
+
+ pr_devel("H_IPOLL(server=%ld)\n", server);
+
+ xc->GLUE(X_STAT_PFX,h_ipoll)++;
+
+ /* Grab the target VCPU if not the current one */
+ if (xc->server_num != server) {
+ vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
+ if (!vcpu)
+ return H_PARAMETER;
+ xc = vcpu->arch.xive_vcpu;
+
+ /* Scan all priorities */
+ pending = 0xff;
+ } else {
+ /* Grab pending interrupt if any */
+ pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
+ if (pipr < 8)
+ pending |= 1 << pipr;
+ }
+
+ hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
+
+ /* Return interrupt and old CPPR in GPR4 */
+ vcpu->arch.gpr[4] = hirq | (xc->cppr << 24);
+
+ return H_SUCCESS;
+}
+
+static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
+{
+ u8 pending, prio;
+
+ pending = xc->pending;
+ if (xc->mfrr != 0xff) {
+ if (xc->mfrr < 8)
+ pending |= 1 << xc->mfrr;
+ else
+ pending |= 0x80;
+ }
+ if (!pending)
+ return;
+ prio = ffs(pending) - 1;
+
+ __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
+}
+
+X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u8 old_cppr;
+
+ pr_devel("H_CPPR(cppr=%ld)\n", cppr);
+
+ xc->GLUE(X_STAT_PFX,h_cppr)++;
+
+ /* Map CPPR */
+ cppr = xive_prio_from_guest(cppr);
+
+ /* Remember old and update SW state */
+ old_cppr = xc->cppr;
+ xc->cppr = cppr;
+
+ /*
+ * We are masking less, we need to look for pending things
+ * to deliver and set VP pending bits accordingly to trigger
+ * a new interrupt otherwise we might miss MFRR changes for
+ * which we have optimized out sending an IPI signal.
+ */
+ if (cppr > old_cppr)
+ GLUE(X_PFX,push_pending_to_hw)(xc);
+
+ /* Apply new CPPR */
+ xc->hw_cppr = cppr;
+ __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
+
+ return H_SUCCESS;
+}
+
+X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
+{
+ struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct xive_irq_data *xd;
+ u8 new_cppr = xirr >> 24;
+ u32 irq = xirr & 0x00ffffff, hw_num;
+ u16 src;
+ int rc = 0;
+
+ pr_devel("H_EOI(xirr=%08lx)\n", xirr);
+
+ xc->GLUE(X_STAT_PFX,h_eoi)++;
+
+ xc->cppr = xive_prio_from_guest(new_cppr);
+
+ /*
+ * IPIs are synthetized from MFRR and thus don't need
+ * any special EOI handling. The underlying interrupt
+ * used to signal MFRR changes is EOId when fetched from
+ * the queue.
+ */
+ if (irq == XICS_IPI || irq == 0)
+ goto bail;
+
+ /* Find interrupt source */
+ sb = kvmppc_xive_find_source(xive, irq, &src);
+ if (!sb) {
+ pr_devel(" source not found !\n");
+ rc = H_PARAMETER;
+ goto bail;
+ }
+ state = &sb->irq_state[src];
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ state->in_eoi = true;
+ mb();
+
+again:
+ if (state->guest_priority == MASKED) {
+ arch_spin_lock(&sb->lock);
+ if (state->guest_priority != MASKED) {
+ arch_spin_unlock(&sb->lock);
+ goto again;
+ }
+ pr_devel(" EOI on saved P...\n");
+
+ /* Clear old_p, that will cause unmask to perform an EOI */
+ state->old_p = false;
+
+ arch_spin_unlock(&sb->lock);
+ } else {
+ pr_devel(" EOI on source...\n");
+
+ /* Perform EOI on the source */
+ GLUE(X_PFX,source_eoi)(hw_num, xd);
+
+ /* If it's an emulated LSI, check level and resend */
+ if (state->lsi && state->asserted)
+ __x_writeq(0, __x_trig_page(xd));
+
+ }
+
+ mb();
+ state->in_eoi = false;
+bail:
+
+ /* Re-evaluate pending IRQs and update HW */
+ GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
+ GLUE(X_PFX,push_pending_to_hw)(xc);
+ pr_devel(" after scan pending=%02x\n", xc->pending);
+
+ /* Apply new CPPR */
+ xc->hw_cppr = xc->cppr;
+ __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
+
+ return rc;
+}
+
+X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
+
+ xc->GLUE(X_STAT_PFX,h_ipi)++;
+
+ /* Find target */
+ vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
+ if (!vcpu)
+ return H_PARAMETER;
+ xc = vcpu->arch.xive_vcpu;
+
+ /* Locklessly write over MFRR */
+ xc->mfrr = mfrr;
+
+ /* Shoot the IPI if most favored than target cppr */
+ if (mfrr < xc->cppr)
+ __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
+
+ return H_SUCCESS;
+}
diff --git a/arch/powerpc/kvm/irq.h b/arch/powerpc/kvm/irq.h
index 5a9a10b90762..3f1be85a83bc 100644
--- a/arch/powerpc/kvm/irq.h
+++ b/arch/powerpc/kvm/irq.h
@@ -12,6 +12,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
#endif
#ifdef CONFIG_KVM_XICS
ret = ret || (kvm->arch.xics != NULL);
+ ret = ret || (kvm->arch.xive != NULL);
#endif
smp_rmb();
return ret;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 1ee22a910074..7f71ab5fcad1 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -38,6 +38,8 @@
#include <asm/irqflags.h>
#include <asm/iommu.h>
#include <asm/switch_to.h>
+#include <asm/xive.h>
+
#include "timing.h"
#include "irq.h"
#include "../mm/mmu_decl.h"
@@ -697,7 +699,10 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
break;
case KVMPPC_IRQ_XICS:
- kvmppc_xics_free_icp(vcpu);
+ if (xive_enabled())
+ kvmppc_xive_cleanup_vcpu(vcpu);
+ else
+ kvmppc_xics_free_icp(vcpu);
break;
}
@@ -1522,8 +1527,12 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = -EPERM;
dev = kvm_device_from_filp(f.file);
- if (dev)
- r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
+ if (dev) {
+ if (xive_enabled())
+ r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
+ else
+ r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
+ }
fdput(f);
break;
@@ -1547,7 +1556,7 @@ bool kvm_arch_intc_initialized(struct kvm *kvm)
return true;
#endif
#ifdef CONFIG_KVM_XICS
- if (kvm->arch.xics)
+ if (kvm->arch.xics || kvm->arch.xive)
return true;
#endif
return false;
@@ -1740,7 +1749,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_enable_cap(kvm, &cap);
break;
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_CREATE_SPAPR_TCE_64: {
struct kvm_create_spapr_tce_64 create_tce_64;
@@ -1771,6 +1780,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
goto out;
}
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
case KVM_PPC_GET_SMMU_INFO: {
struct kvm_ppc_smmu_info info;
struct kvm *kvm = filp->private_data;
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
index d659345a98d6..44fe4833910f 100644
--- a/arch/powerpc/mm/dump_linuxpagetables.c
+++ b/arch/powerpc/mm/dump_linuxpagetables.c
@@ -16,6 +16,7 @@
*/
#include <linux/debugfs.h>
#include <linux/fs.h>
+#include <linux/hugetlb.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched.h>
@@ -391,7 +392,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
addr = start + i * PMD_SIZE;
- if (!pmd_none(*pmd))
+ if (!pmd_none(*pmd) && !pmd_huge(*pmd))
/* pmd exists */
walk_pte(st, pmd, addr);
else
@@ -407,7 +408,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
addr = start + i * PUD_SIZE;
- if (!pud_none(*pud))
+ if (!pud_none(*pud) && !pud_huge(*pud))
/* pud exists */
walk_pmd(st, pud, addr);
else
@@ -427,7 +428,7 @@ static void walk_pagetables(struct pg_state *st)
*/
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
addr = KERN_VIRT_START + i * PGDIR_SIZE;
- if (!pgd_none(*pgd))
+ if (!pgd_none(*pgd) && !pgd_huge(*pgd))
/* pgd exists */
walk_pud(st, pgd, addr);
else
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 6575b9aabef4..a12e86395025 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -68,7 +68,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (mm->task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
/*
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 9dbd2a733d6b..0ee6be4f1ba4 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -112,7 +112,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -157,7 +157,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index c6dca2ae78ef..a3edf813d455 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -99,7 +99,7 @@ static int hash__init_new_context(struct mm_struct *mm)
* mm->context.addr_limit. Default to max task size so that we copy the
* default values to paca which will help us to handle slb miss early.
*/
- mm->context.addr_limit = TASK_SIZE_128TB;
+ mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
/*
* The old code would re-promote on fork, we don't do that when using
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 966b9fccfa66..45f6740dd407 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -99,7 +99,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
if ((mm->task_size - len) < addr)
return 0;
vma = find_vma(mm, addr);
- return (!vma || (addr + len) <= vma->vm_start);
+ return (!vma || (addr + len) <= vm_start_gap(vma));
}
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index cbd82fde5770..09ceea6175ba 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs_user_copy)
{
regs_user->regs = task_pt_regs(current);
- regs_user->abi = perf_reg_abi(current);
+ regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
+ PERF_SAMPLE_REGS_ABI_NONE;
}
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 018f8e90ac35..bb28e1a41257 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -402,7 +402,7 @@ static struct power_pmu power9_isa207_pmu = {
.name = "POWER9",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
- .test_adder = ISA207_TEST_ADDER,
+ .test_adder = P9_DD1_TEST_ADDER,
.compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power9_config_bhrb,
.bhrb_filter_map = power9_bhrb_filter_map,
@@ -421,7 +421,7 @@ static struct power_pmu power9_pmu = {
.name = "POWER9",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
- .test_adder = P9_DD1_TEST_ADDER,
+ .test_adder = ISA207_TEST_ADDER,
.compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power9_config_bhrb,
.bhrb_filter_map = power9_bhrb_filter_map,
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 33244e3d9375..4fd64d3f5c44 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -59,6 +59,17 @@ config PPC_OF_BOOT_TRAMPOLINE
In case of doubt, say Y
+config PPC_DT_CPU_FTRS
+ bool "Device-tree based CPU feature discovery & setup"
+ depends on PPC_BOOK3S_64
+ default y
+ help
+ This enables code to use a new device tree binding for describing CPU
+ compatibility and features. Saying Y here will attempt to use the new
+ binding if the firmware provides it. Currently only the skiboot
+ firmware provides this binding.
+ If you're not sure say Y.
+
config UDBG_RTAS_CONSOLE
bool "RTAS based debug console"
depends on PPC_RTAS
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 96c2b8a40630..0c45cdbac4cf 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -197,7 +197,9 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
(REGION_ID(ea) != USER_REGION_ID)) {
spin_unlock(&spu->register_lock);
- ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ, 0x300, dsisr);
+ ret = hash_page(ea,
+ _PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED,
+ 0x300, dsisr);
spin_lock(&spu->register_lock);
if (!ret) {
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index e5a891ae80ee..84b7ac926ce6 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
if (!dump_skip(cprm, skip))
goto Eio;
+
+ rc = 0;
out:
free_page((unsigned long)buf);
return rc;
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index d2f19821d71d..d12ea7b9fd47 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -412,11 +412,14 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
* been set for the PE, we will set EEH_PE_CFG_BLOCKED for
* that PE to block its config space.
*
+ * Broadcom BCM5718 2-ports NICs (14e4:1656)
* Broadcom Austin 4-ports NICs (14e4:1657)
* Broadcom Shiner 4-ports 1G NICs (14e4:168a)
* Broadcom Shiner 2-ports 10G NICs (14e4:168e)
*/
if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
+ pdn->device_id == 0x1656) ||
+ (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
pdn->device_id == 0x1657) ||
(pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
pdn->device_id == 0x168a) ||
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 067defeea691..b5d960d6db3d 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -75,7 +75,8 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
if (WARN_ON(!gpdev))
return NULL;
- if (WARN_ON(!gpdev->dev.of_node))
+ /* Not all PCI devices have device-tree nodes */
+ if (!gpdev->dev.of_node)
return NULL;
/* Get assoicated PCI device */
@@ -448,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
return mmio_atsd_reg;
}
-static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
+static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
{
unsigned long launch;
@@ -464,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
/* PID */
launch |= pid << PPC_BITLSHIFT(38);
+ /* No flush */
+ launch |= !flush << PPC_BITLSHIFT(39);
+
/* Invalidating the entire process doesn't use a va */
return mmio_launch_invalidate(npu, launch, 0);
}
static int mmio_invalidate_va(struct npu *npu, unsigned long va,
- unsigned long pid)
+ unsigned long pid, bool flush)
{
unsigned long launch;
@@ -485,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
/* PID */
launch |= pid << PPC_BITLSHIFT(38);
+ /* No flush */
+ launch |= !flush << PPC_BITLSHIFT(39);
+
return mmio_launch_invalidate(npu, launch, va);
}
#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
+struct mmio_atsd_reg {
+ struct npu *npu;
+ int reg;
+};
+
+static void mmio_invalidate_wait(
+ struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
+{
+ struct npu *npu;
+ int i, reg;
+
+ /* Wait for all invalidations to complete */
+ for (i = 0; i <= max_npu2_index; i++) {
+ if (mmio_atsd_reg[i].reg < 0)
+ continue;
+
+ /* Wait for completion */
+ npu = mmio_atsd_reg[i].npu;
+ reg = mmio_atsd_reg[i].reg;
+ while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
+ cpu_relax();
+
+ put_mmio_atsd_reg(npu, reg);
+
+ /*
+ * The GPU requires two flush ATSDs to ensure all entries have
+ * been flushed. We use PID 0 as it will never be used for a
+ * process on the GPU.
+ */
+ if (flush)
+ mmio_invalidate_pid(npu, 0, true);
+ }
+}
+
/*
* Invalidate either a single address or an entire PID depending on
* the value of va.
*/
static void mmio_invalidate(struct npu_context *npu_context, int va,
- unsigned long address)
+ unsigned long address, bool flush)
{
- int i, j, reg;
+ int i, j;
struct npu *npu;
struct pnv_phb *nphb;
struct pci_dev *npdev;
- struct {
- struct npu *npu;
- int reg;
- } mmio_atsd_reg[NV_MAX_NPUS];
+ struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
unsigned long pid = npu_context->mm->context.id;
/*
@@ -524,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
if (va)
mmio_atsd_reg[i].reg =
- mmio_invalidate_va(npu, address, pid);
+ mmio_invalidate_va(npu, address, pid,
+ flush);
else
mmio_atsd_reg[i].reg =
- mmio_invalidate_pid(npu, pid);
+ mmio_invalidate_pid(npu, pid, flush);
/*
* The NPU hardware forwards the shootdown to all GPUs
@@ -543,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
*/
flush_tlb_mm(npu_context->mm);
- /* Wait for all invalidations to complete */
- for (i = 0; i <= max_npu2_index; i++) {
- if (mmio_atsd_reg[i].reg < 0)
- continue;
-
- /* Wait for completion */
- npu = mmio_atsd_reg[i].npu;
- reg = mmio_atsd_reg[i].reg;
- while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
- cpu_relax();
- put_mmio_atsd_reg(npu, reg);
- }
+ mmio_invalidate_wait(mmio_atsd_reg, flush);
+ if (flush)
+ /* Wait for the flush to complete */
+ mmio_invalidate_wait(mmio_atsd_reg, false);
}
static void pnv_npu2_mn_release(struct mmu_notifier *mn,
@@ -570,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
* There should be no more translation requests for this PID, but we
* need to ensure any entries for it are removed from the TLB.
*/
- mmio_invalidate(npu_context, 0, 0);
+ mmio_invalidate(npu_context, 0, 0, true);
}
static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
@@ -580,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
{
struct npu_context *npu_context = mn_to_npu_context(mn);
- mmio_invalidate(npu_context, 1, address);
+ mmio_invalidate(npu_context, 1, address, true);
}
static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
@@ -589,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
{
struct npu_context *npu_context = mn_to_npu_context(mn);
- mmio_invalidate(npu_context, 1, address);
+ mmio_invalidate(npu_context, 1, address, true);
}
static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
@@ -599,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
struct npu_context *npu_context = mn_to_npu_context(mn);
unsigned long address;
- for (address = start; address <= end; address += PAGE_SIZE)
- mmio_invalidate(npu_context, 1, address);
+ for (address = start; address < end; address += PAGE_SIZE)
+ mmio_invalidate(npu_context, 1, address, false);
+
+ /* Do the flush only on the final addess == end */
+ mmio_invalidate(npu_context, 1, address, true);
}
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -650,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
/* No nvlink associated with this GPU device */
return ERR_PTR(-ENODEV);
- if (!mm) {
- /* kernel thread contexts are not supported */
+ if (!mm || mm->context.id == 0) {
+ /*
+ * Kernel thread contexts are not supported and context id 0 is
+ * reserved on the GPU.
+ */
return ERR_PTR(-EINVAL);
}
@@ -714,7 +751,7 @@ static void pnv_npu2_release_context(struct kref *kref)
void pnv_npu2_destroy_context(struct npu_context *npu_context,
struct pci_dev *gpdev)
{
- struct pnv_phb *nphb, *phb;
+ struct pnv_phb *nphb;
struct npu *npu;
struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
struct device_node *nvlink_dn;
@@ -728,13 +765,12 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
nphb = pci_bus_to_host(npdev->bus)->private_data;
npu = &nphb->npu;
- phb = pci_bus_to_host(gpdev->bus)->private_data;
nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
&nvlink_index)))
return;
npu_context->npdev[npu->index][nvlink_index] = NULL;
- opal_npu_destroy_context(phb->opal_id, npu_context->mm->context.id,
+ opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
PCI_DEVID(gpdev->bus->number, gpdev->devfn));
kref_put(&npu_context->kref, pnv_npu2_release_context);
}
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 7925a9d72cca..59684b4af4d1 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -967,3 +967,4 @@ EXPORT_SYMBOL_GPL(opal_leds_set_ind);
EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
/* Export this for KVM */
EXPORT_SYMBOL_GPL(opal_int_set_mfrr);
+EXPORT_SYMBOL_GPL(opal_int_eoi);
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 0babef11136f..8c6119280c13 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -407,7 +407,13 @@ static DEVICE_ATTR(subcores_per_core, 0644,
static int subcore_init(void)
{
- if (!cpu_has_feature(CPU_FTR_SUBCORE))
+ unsigned pvr_ver;
+
+ pvr_ver = PVR_VER(mfspr(SPRN_PVR));
+
+ if (pvr_ver != PVR_POWER8 &&
+ pvr_ver != PVR_POWER8E &&
+ pvr_ver != PVR_POWER8NVL)
return 0;
/*
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index e104c71ea44a..1fb162ba9d1c 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
for (i = 0; i < num_lmbs; i++) {
lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+ lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
}
@@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
for (i = 0; i < num_lmbs; i++) {
lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+ lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
}
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 986cd111d4df..c651e668996b 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -377,6 +377,10 @@ static void cpm1_set_pin16(int port, int pin, int flags)
setbits16(&iop->odr_sor, pin);
else
clrbits16(&iop->odr_sor, pin);
+ if (flags & CPM_PIN_FALLEDGE)
+ setbits16(&iop->intr, pin);
+ else
+ clrbits16(&iop->intr, pin);
}
}
@@ -528,6 +532,9 @@ struct cpm1_gpio16_chip {
/* shadowed data register to clear/set bits safely */
u16 cpdata;
+
+ /* IRQ associated with Pins when relevant */
+ int irq[16];
};
static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc)
@@ -578,6 +585,14 @@ static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value)
spin_unlock_irqrestore(&cpm1_gc->lock, flags);
}
+static int cpm1_gpio16_to_irq(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
+
+ return cpm1_gc->irq[gpio] ? : -ENXIO;
+}
+
static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
@@ -618,6 +633,7 @@ int cpm1_gpiochip_add16(struct device_node *np)
struct cpm1_gpio16_chip *cpm1_gc;
struct of_mm_gpio_chip *mm_gc;
struct gpio_chip *gc;
+ u16 mask;
cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL);
if (!cpm1_gc)
@@ -625,6 +641,14 @@ int cpm1_gpiochip_add16(struct device_node *np)
spin_lock_init(&cpm1_gc->lock);
+ if (!of_property_read_u16(np, "fsl,cpm1-gpio-irq-mask", &mask)) {
+ int i, j;
+
+ for (i = 0, j = 0; i < 16; i++)
+ if (mask & (1 << (15 - i)))
+ cpm1_gc->irq[i] = irq_of_parse_and_map(np, j++);
+ }
+
mm_gc = &cpm1_gc->mm_gc;
gc = &mm_gc->gc;
@@ -634,6 +658,7 @@ int cpm1_gpiochip_add16(struct device_node *np)
gc->direction_output = cpm1_gpio16_dir_out;
gc->get = cpm1_gpio16_get;
gc->set = cpm1_gpio16_set;
+ gc->to_irq = cpm1_gpio16_to_irq;
return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc);
}
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
index ef470b470b04..6afddae2fb47 100644
--- a/arch/powerpc/sysdev/simple_gpio.c
+++ b/arch/powerpc/sysdev/simple_gpio.c
@@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
{
- struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
+ struct u8_gpio_chip *u8_gc =
+ container_of(mm_gc, struct u8_gpio_chip, mm_gc);
u8_gc->data = in_8(mm_gc->regs);
}
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 6a98efb14264..8f5e3035483b 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -46,13 +46,15 @@
#endif
bool __xive_enabled;
+EXPORT_SYMBOL_GPL(__xive_enabled);
bool xive_cmdline_disabled;
/* We use only one priority for now */
static u8 xive_irq_priority;
-/* TIMA */
+/* TIMA exported to KVM */
void __iomem *xive_tima;
+EXPORT_SYMBOL_GPL(xive_tima);
u32 xive_tima_offset;
/* Backend ops */
@@ -295,7 +297,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
{
/* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
- out_be64(xd->eoi_mmio, 0);
+ out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0);
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
/*
* The FW told us to call it. This happens for some
@@ -345,8 +347,11 @@ static void xive_irq_eoi(struct irq_data *d)
DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
d->irq, irqd_to_hwirq(d), xc->pending_prio);
- /* EOI the source if it hasn't been disabled */
- if (!irqd_irq_disabled(d))
+ /*
+ * EOI the source if it hasn't been disabled and hasn't
+ * been passed-through to a KVM guest
+ */
+ if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d))
xive_do_source_eoi(irqd_to_hwirq(d), xd);
/*
@@ -689,9 +694,14 @@ static int xive_irq_set_affinity(struct irq_data *d,
old_target = xd->target;
- rc = xive_ops->configure_irq(hw_irq,
- get_hard_smp_processor_id(target),
- xive_irq_priority, d->irq);
+ /*
+ * Only configure the irq if it's not currently passed-through to
+ * a KVM guest
+ */
+ if (!irqd_is_forwarded_to_vcpu(d))
+ rc = xive_ops->configure_irq(hw_irq,
+ get_hard_smp_processor_id(target),
+ xive_irq_priority, d->irq);
if (rc < 0) {
pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
return rc;
@@ -771,6 +781,123 @@ static int xive_irq_retrigger(struct irq_data *d)
return 1;
}
+static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
+{
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ int rc;
+ u8 pq;
+
+ /*
+ * We only support this on interrupts that do not require
+ * firmware calls for masking and unmasking
+ */
+ if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
+ return -EIO;
+
+ /*
+ * This is called by KVM with state non-NULL for enabling
+ * pass-through or NULL for disabling it
+ */
+ if (state) {
+ irqd_set_forwarded_to_vcpu(d);
+
+ /* Set it to PQ=10 state to prevent further sends */
+ pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_10);
+
+ /* No target ? nothing to do */
+ if (xd->target == XIVE_INVALID_TARGET) {
+ /*
+ * An untargetted interrupt should have been
+ * also masked at the source
+ */
+ WARN_ON(pq & 2);
+
+ return 0;
+ }
+
+ /*
+ * If P was set, adjust state to PQ=11 to indicate
+ * that a resend is needed for the interrupt to reach
+ * the guest. Also remember the value of P.
+ *
+ * This also tells us that it's in flight to a host queue
+ * or has already been fetched but hasn't been EOIed yet
+ * by the host. This it's potentially using up a host
+ * queue slot. This is important to know because as long
+ * as this is the case, we must not hard-unmask it when
+ * "returning" that interrupt to the host.
+ *
+ * This saved_p is cleared by the host EOI, when we know
+ * for sure the queue slot is no longer in use.
+ */
+ if (pq & 2) {
+ pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_11);
+ xd->saved_p = true;
+
+ /*
+ * Sync the XIVE source HW to ensure the interrupt
+ * has gone through the EAS before we change its
+ * target to the guest. That should guarantee us
+ * that we *will* eventually get an EOI for it on
+ * the host. Otherwise there would be a small window
+ * for P to be seen here but the interrupt going
+ * to the guest queue.
+ */
+ if (xive_ops->sync_source)
+ xive_ops->sync_source(hw_irq);
+ } else
+ xd->saved_p = false;
+ } else {
+ irqd_clr_forwarded_to_vcpu(d);
+
+ /* No host target ? hard mask and return */
+ if (xd->target == XIVE_INVALID_TARGET) {
+ xive_do_source_set_mask(xd, true);
+ return 0;
+ }
+
+ /*
+ * Sync the XIVE source HW to ensure the interrupt
+ * has gone through the EAS before we change its
+ * target to the host.
+ */
+ if (xive_ops->sync_source)
+ xive_ops->sync_source(hw_irq);
+
+ /*
+ * By convention we are called with the interrupt in
+ * a PQ=10 or PQ=11 state, ie, it won't fire and will
+ * have latched in Q whether there's a pending HW
+ * interrupt or not.
+ *
+ * First reconfigure the target.
+ */
+ rc = xive_ops->configure_irq(hw_irq,
+ get_hard_smp_processor_id(xd->target),
+ xive_irq_priority, d->irq);
+ if (rc)
+ return rc;
+
+ /*
+ * Then if saved_p is not set, effectively re-enable the
+ * interrupt with an EOI. If it is set, we know there is
+ * still a message in a host queue somewhere that will be
+ * EOId eventually.
+ *
+ * Note: We don't check irqd_irq_disabled(). Effectively,
+ * we *will* let the irq get through even if masked if the
+ * HW is still firing it in order to deal with the whole
+ * saved_p business properly. If the interrupt triggers
+ * while masked, the generic code will re-mask it anyway.
+ */
+ if (!xd->saved_p)
+ xive_do_source_eoi(hw_irq, xd);
+
+ }
+ return 0;
+}
+
static struct irq_chip xive_irq_chip = {
.name = "XIVE-IRQ",
.irq_startup = xive_irq_startup,
@@ -781,12 +908,14 @@ static struct irq_chip xive_irq_chip = {
.irq_set_affinity = xive_irq_set_affinity,
.irq_set_type = xive_irq_set_type,
.irq_retrigger = xive_irq_retrigger,
+ .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
};
bool is_xive_irq(struct irq_chip *chip)
{
return chip == &xive_irq_chip;
}
+EXPORT_SYMBOL_GPL(is_xive_irq);
void xive_cleanup_irq_data(struct xive_irq_data *xd)
{
@@ -801,6 +930,7 @@ void xive_cleanup_irq_data(struct xive_irq_data *xd)
xd->trig_mmio = NULL;
}
}
+EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
{
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 1a726229a427..ab9ecce61ee5 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -31,6 +31,7 @@
#include <asm/xive.h>
#include <asm/xive-regs.h>
#include <asm/opal.h>
+#include <asm/kvm_ppc.h>
#include "xive-internal.h"
@@ -95,6 +96,7 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
}
return 0;
}
+EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
{
@@ -108,6 +110,8 @@ int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
}
return rc == 0 ? 0 : -ENXIO;
}
+EXPORT_SYMBOL_GPL(xive_native_configure_irq);
+
/* This can be called multiple time to change a queue configuration */
int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
@@ -172,6 +176,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
fail:
return rc;
}
+EXPORT_SYMBOL_GPL(xive_native_configure_queue);
static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
{
@@ -192,6 +197,7 @@ void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
{
__xive_native_disable_queue(vp_id, q, prio);
}
+EXPORT_SYMBOL_GPL(xive_native_disable_queue);
static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
{
@@ -262,6 +268,7 @@ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
}
return 0;
}
+#endif /* CONFIG_SMP */
u32 xive_native_alloc_irq(void)
{
@@ -277,6 +284,7 @@ u32 xive_native_alloc_irq(void)
return 0;
return rc;
}
+EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
void xive_native_free_irq(u32 irq)
{
@@ -287,7 +295,9 @@ void xive_native_free_irq(u32 irq)
msleep(1);
}
}
+EXPORT_SYMBOL_GPL(xive_native_free_irq);
+#ifdef CONFIG_SMP
static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
{
s64 rc;
@@ -383,7 +393,7 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
return;
/* Enable the pool VP */
- vp = xive_pool_vps + get_hard_smp_processor_id(cpu);
+ vp = xive_pool_vps + cpu;
pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
for (;;) {
rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
@@ -428,7 +438,7 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
/* Disable it */
- vp = xive_pool_vps + get_hard_smp_processor_id(cpu);
+ vp = xive_pool_vps + cpu;
for (;;) {
rc = opal_xive_set_vp_info(vp, 0, 0);
if (rc != OPAL_BUSY)
@@ -437,10 +447,11 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
}
}
-static void xive_native_sync_source(u32 hw_irq)
+void xive_native_sync_source(u32 hw_irq)
{
opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
}
+EXPORT_SYMBOL_GPL(xive_native_sync_source);
static const struct xive_ops xive_native_ops = {
.populate_irq_data = xive_native_populate_irq_data,
@@ -501,10 +512,24 @@ static bool xive_parse_provisioning(struct device_node *np)
return true;
}
+static void xive_native_setup_pools(void)
+{
+ /* Allocate a pool big enough */
+ pr_debug("XIVE: Allocating VP block for pool size %d\n", nr_cpu_ids);
+
+ xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
+ if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
+ pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
+
+ pr_debug("XIVE: Pool VPs allocated at 0x%x for %d max CPUs\n",
+ xive_pool_vps, nr_cpu_ids);
+}
+
u32 xive_native_default_eq_shift(void)
{
return xive_queue_shift;
}
+EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
bool xive_native_init(void)
{
@@ -514,7 +539,7 @@ bool xive_native_init(void)
struct property *prop;
u8 max_prio = 7;
const __be32 *p;
- u32 val;
+ u32 val, cpu;
s64 rc;
if (xive_cmdline_disabled)
@@ -550,7 +575,11 @@ bool xive_native_init(void)
break;
}
- /* Grab size of provisioning pages */
+ /* Configure Thread Management areas for KVM */
+ for_each_possible_cpu(cpu)
+ kvmppc_set_xive_tima(cpu, r.start, tima);
+
+ /* Grab size of provisionning pages */
xive_parse_provisioning(np);
/* Switch the XIVE to exploitation mode */
@@ -560,6 +589,9 @@ bool xive_native_init(void)
return false;
}
+ /* Setup some dummy HV pool VPs */
+ xive_native_setup_pools();
+
/* Initialize XIVE core with our backend */
if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
max_prio)) {
@@ -638,3 +670,47 @@ void xive_native_free_vp_block(u32 vp_base)
pr_warn("OPAL error %lld freeing VP block\n", rc);
}
EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
+
+int xive_native_enable_vp(u32 vp_id)
+{
+ s64 rc;
+
+ for (;;) {
+ rc = opal_xive_set_vp_info(vp_id, OPAL_XIVE_VP_ENABLED, 0);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+ return rc ? -EIO : 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_enable_vp);
+
+int xive_native_disable_vp(u32 vp_id)
+{
+ s64 rc;
+
+ for (;;) {
+ rc = opal_xive_set_vp_info(vp_id, 0, 0);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+ return rc ? -EIO : 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_disable_vp);
+
+int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
+{
+ __be64 vp_cam_be;
+ __be32 vp_chip_id_be;
+ s64 rc;
+
+ rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
+ if (rc)
+ return -EIO;
+ *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
+ *out_chip_id = be32_to_cpu(vp_chip_id_be);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index e161fafb495b..6967addc6a89 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -363,9 +363,6 @@ config COMPAT
config SYSVIPC_COMPAT
def_bool y if COMPAT && SYSVIPC
-config KEYS_COMPAT
- def_bool y if COMPAT && KEYS
-
config SMP
def_bool y
prompt "Symmetric multi-processing support"
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index a5039fa89314..282072206df7 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -30,6 +30,7 @@ CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_WBT_SQ=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
@@ -90,6 +94,8 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
+CONFIG_SMC=m
+CONFIG_SMC_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -359,6 +365,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
@@ -367,16 +374,19 @@ CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
+CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=m
+CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_RBD=m
CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -442,6 +452,8 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
+CONFIG_MLX5_CORE=m
+CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_NATSEMI is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -452,7 +464,6 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -471,6 +482,7 @@ CONFIG_DIAG288_WATCHDOG=m
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
+CONFIG_MLX5_INFINIBAND=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -487,12 +499,18 @@ CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
CONFIG_XFS_DEBUG=y
CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_BTRFS_DEBUG=y
CONFIG_NILFS2_FS=m
+CONFIG_FS_DAX=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QUOTA_DEBUG=y
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m
@@ -558,6 +576,7 @@ CONFIG_HEADERS_CHECK=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_RODATA_TEST=y
CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_SELFTEST=y
CONFIG_DEBUG_OBJECTS_FREE=y
@@ -580,7 +599,6 @@ CONFIG_DETECT_HUNG_TASK=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_DEBUG_TIMEKEEPING=y
-CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
CONFIG_PROVE_LOCKING=y
@@ -595,6 +613,7 @@ CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
CONFIG_NOTIFIER_ERROR_INJECTION=m
CONFIG_PM_NOTIFIER_ERROR_INJECT=m
+CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y
CONFIG_FAILSLAB=y
CONFIG_FAIL_PAGE_ALLOC=y
@@ -616,13 +635,12 @@ CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
+CONFIG_TEST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
-CONFIG_TEST_STRING_HELPERS=y
-CONFIG_TEST_KSTRTOX=y
CONFIG_DMA_API_DEBUG=y
CONFIG_TEST_BPF=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
@@ -630,6 +648,7 @@ CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -640,7 +659,9 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
+CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
@@ -648,6 +669,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -657,8 +679,10 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -674,6 +698,7 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
@@ -685,6 +710,7 @@ CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
@@ -692,6 +718,7 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_CRC8=m
+CONFIG_RANDOM32_SELFTEST=y
CONFIG_CORDIC=m
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 83970b5afb2b..3c6b78189fbc 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -31,6 +31,7 @@ CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
@@ -46,7 +47,10 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_WBT_SQ=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
@@ -88,6 +92,8 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
+CONFIG_SMC=m
+CONFIG_SMC_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -356,6 +362,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
@@ -364,16 +371,18 @@ CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
+CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=m
+CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_VIRTIO_BLK=y
CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -439,6 +448,8 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
+CONFIG_MLX5_CORE=m
+CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_NATSEMI is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -449,7 +460,6 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -468,6 +478,7 @@ CONFIG_DIAG288_WATCHDOG=m
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
+CONFIG_MLX5_INFINIBAND=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -483,11 +494,15 @@ CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
+CONFIG_FS_DAX=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
@@ -553,7 +568,6 @@ CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y
-CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
@@ -576,6 +590,7 @@ CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -599,6 +614,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -611,6 +627,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -626,16 +643,19 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
+CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index fbc6542aaf59..653d72bcc007 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -31,6 +31,7 @@ CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_BLK_WBT=y
+CONFIG_BLK_WBT_SQ=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
@@ -86,6 +90,8 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
+CONFIG_SMC=m
+CONFIG_SMC_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@@ -354,6 +360,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
+CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
@@ -362,16 +369,18 @@ CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
+CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=m
+CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_VIRTIO_BLK=y
CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -437,6 +446,8 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
+CONFIG_MLX5_CORE=m
+CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_NATSEMI is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
@@ -447,7 +458,6 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -466,6 +476,7 @@ CONFIG_DIAG288_WATCHDOG=m
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
+CONFIG_MLX5_INFINIBAND=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -481,11 +492,15 @@ CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
+CONFIG_FS_DAX=y
+CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
@@ -551,7 +566,6 @@ CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y
-CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
@@ -574,6 +588,7 @@ CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -597,6 +612,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -609,6 +625,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
@@ -624,6 +641,7 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
@@ -635,6 +653,7 @@ CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index e23d97c13735..afa46a7406ea 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -12,8 +12,10 @@ CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=2
# CONFIG_HOTPLUG_CPU is not set
CONFIG_HZ_100=y
+# CONFIG_ARCH_RANDOM is not set
# CONFIG_COMPACTION is not set
# CONFIG_MIGRATION is not set
+# CONFIG_BOUNCE is not set
# CONFIG_CHECK_STACK is not set
# CONFIG_CHSC_SCH is not set
# CONFIG_SCM_BUS is not set
@@ -36,11 +38,11 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_FC_ATTRS=y
CONFIG_ZFCP=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_HVC_IUCV is not set
+# CONFIG_HW_RANDOM_S390 is not set
CONFIG_RAW_DRIVER=y
# CONFIG_SCLP_ASYNC is not set
# CONFIG_HMC_DRV is not set
@@ -54,9 +56,9 @@ CONFIG_RAW_DRIVER=y
# CONFIG_INOTIFY_USER is not set
CONFIG_CONFIGFS_FS=y
# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 97189dbaf34b..20244a38c886 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -28,6 +28,7 @@ CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
@@ -108,7 +109,6 @@ CONFIG_ZFCP=y
CONFIG_SCSI_VIRTIO=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
CONFIG_MD_MULTIPATH=m
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=m
@@ -131,6 +131,7 @@ CONFIG_TUN=m
CONFIG_VIRTIO_NET=y
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
CONFIG_DEVKMEM=y
@@ -162,7 +163,6 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_PANIC_ON_OOPS=y
-CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y
@@ -172,14 +172,12 @@ CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_RCU_TRACE=y
CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_UPROBE_EVENTS=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_KPROBES_SANITY_TEST=y
@@ -190,7 +188,6 @@ CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_CTS=m
-CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
@@ -230,6 +227,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
+CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 0206c8052328..df7b54ea956d 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/time.h>
+#include <linux/refcount.h>
#include <uapi/asm/debug.h>
#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
@@ -31,7 +32,7 @@ struct debug_view;
typedef struct debug_info {
struct debug_info* next;
struct debug_info* prev;
- atomic_t ref_count;
+ refcount_t ref_count;
spinlock_t lock;
int level;
int nr_areas;
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
index 60323c21938b..37f617dfbede 100644
--- a/arch/s390/include/asm/dis.h
+++ b/arch/s390/include/asm/dis.h
@@ -40,6 +40,8 @@ static inline int insn_length(unsigned char code)
return ((((int) code + 64) >> 7) + 1) << 1;
}
+struct pt_regs;
+
void show_code(struct pt_regs *regs);
void print_fn_code(unsigned char *code, unsigned long len);
int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
index 67026300c88e..144809a3f4f6 100644
--- a/arch/s390/include/asm/eadm.h
+++ b/arch/s390/include/asm/eadm.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/blkdev.h>
struct arqb {
u64 data;
@@ -105,13 +106,14 @@ struct scm_driver {
int (*probe) (struct scm_device *scmdev);
int (*remove) (struct scm_device *scmdev);
void (*notify) (struct scm_device *scmdev, enum scm_event event);
- void (*handler) (struct scm_device *scmdev, void *data, int error);
+ void (*handler) (struct scm_device *scmdev, void *data,
+ blk_status_t error);
};
int scm_driver_register(struct scm_driver *scmdrv);
void scm_driver_unregister(struct scm_driver *scmdrv);
int eadm_start_aob(struct aob *aob);
-void scm_irq_handler(struct aob *aob, int error);
+void scm_irq_handler(struct aob *aob, blk_status_t error);
#endif /* _ASM_S390_EADM_H */
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 1293c4066cfc..28792ef82c83 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -27,12 +27,21 @@
* 2005-Dec Used as a template for s390 by Mike Grundy
* <grundym@us.ibm.com>
*/
+#include <linux/types.h>
#include <asm-generic/kprobes.h>
#define BREAKPOINT_INSTRUCTION 0x0002
+#define FIXUP_PSW_NORMAL 0x08
+#define FIXUP_BRANCH_NOT_TAKEN 0x04
+#define FIXUP_RETURN_REGISTER 0x02
+#define FIXUP_NOT_REQUIRED 0x01
+
+int probe_is_prohibited_opcode(u16 *insn);
+int probe_get_fixup_type(u16 *insn);
+int probe_is_insn_relative_long(u16 *insn);
+
#ifdef CONFIG_KPROBES
-#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
#include <linux/sched/task_stack.h>
@@ -56,11 +65,6 @@ typedef u16 kprobe_opcode_t;
#define KPROBE_SWAP_INST 0x10
-#define FIXUP_PSW_NORMAL 0x08
-#define FIXUP_BRANCH_NOT_TAKEN 0x04
-#define FIXUP_RETURN_REGISTER 0x02
-#define FIXUP_NOT_REQUIRED 0x01
-
/* Architecture specific copy of original instruction */
struct arch_specific_insn {
/* copy of original instruction */
@@ -90,10 +94,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
-int probe_is_prohibited_opcode(u16 *insn);
-int probe_get_fixup_type(u16 *insn);
-int probe_is_insn_relative_long(u16 *insn);
-
#define flush_insn_slot(p) do { } while (0)
#endif /* CONFIG_KPROBES */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 426614a882a9..65d07ac34647 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -541,7 +541,6 @@ struct kvm_s390_float_interrupt {
struct mutex ais_lock;
u8 simm;
u8 nimm;
- int ais_enabled;
};
struct kvm_hw_wp_info_arch {
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 60d395fdc864..aeac013968f2 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -221,11 +221,6 @@ extern void release_thread(struct task_struct *);
/* Free guarded storage control block for current */
void exit_thread_gs(void);
-/*
- * Return saved PC of a blocked thread.
- */
-extern unsigned long thread_saved_pc(struct task_struct *t);
-
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \
(task_stack_page(tsk) + THREAD_SIZE) - 1)
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 73bff45ced55..2b498e58b914 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -109,7 +109,7 @@ struct sysinfo_2_2_2 {
unsigned short cpus_shared;
char reserved_4[3];
unsigned char vsne;
- uuid_be uuid;
+ uuid_t uuid;
char reserved_5[160];
char ext_name[256];
};
@@ -134,7 +134,7 @@ struct sysinfo_3_2_2 {
char reserved_1[3];
unsigned char evmne;
unsigned int reserved_2;
- uuid_be uuid;
+ uuid_t uuid;
} vm[8];
char reserved_3[1504];
char ext_names[8][256];
@@ -146,7 +146,7 @@ extern int topology_max_mnest;
* Returns the maximum nesting level supported by the cpu topology code.
* The current maximum level is 4 which is the drawer level.
*/
-static inline int topology_mnest_limit(void)
+static inline unsigned char topology_mnest_limit(void)
{
return min(topology_max_mnest, 4);
}
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index addb09cee0f5..ca62066895e0 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -10,49 +10,3 @@ generic-y += poll.h
generic-y += resource.h
generic-y += sockios.h
generic-y += termbits.h
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += chpid.h
-header-y += chsc.h
-header-y += clp.h
-header-y += cmb.h
-header-y += dasd.h
-header-y += debug.h
-header-y += errno.h
-header-y += guarded_storage.h
-header-y += hypfs.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm.h
-header-y += kvm_para.h
-header-y += kvm_perf.h
-header-y += kvm_virtio.h
-header-y += monwriter.h
-header-y += msgbuf.h
-header-y += pkey.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += qeth.h
-header-y += schid.h
-header-y += sclp_ctl.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sie.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += tape390.h
-header-y += termios.h
-header-y += types.h
-header-y += ucontext.h
-header-y += unistd.h
-header-y += virtio-ccw.h
-header-y += vtoc.h
-header-y += zcrypt.h
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 530226b6cb19..86b3e74f569e 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -277,7 +277,7 @@ debug_info_alloc(const char *name, int pages_per_area, int nr_areas,
memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *));
memset(rc->debugfs_entries, 0 ,DEBUG_MAX_VIEWS *
sizeof(struct dentry*));
- atomic_set(&(rc->ref_count), 0);
+ refcount_set(&(rc->ref_count), 0);
return rc;
@@ -361,7 +361,7 @@ debug_info_create(const char *name, int pages_per_area, int nr_areas,
debug_area_last = rc;
rc->next = NULL;
- debug_info_get(rc);
+ refcount_set(&rc->ref_count, 1);
out:
return rc;
}
@@ -416,7 +416,7 @@ static void
debug_info_get(debug_info_t * db_info)
{
if (db_info)
- atomic_inc(&db_info->ref_count);
+ refcount_inc(&db_info->ref_count);
}
/*
@@ -431,7 +431,7 @@ debug_info_put(debug_info_t *db_info)
if (!db_info)
return;
- if (atomic_dec_and_test(&db_info->ref_count)) {
+ if (refcount_dec_and_test(&db_info->ref_count)) {
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!db_info->views[i])
continue;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index a5f5d3bb3dbc..6315037335ba 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -231,12 +231,17 @@ ENTRY(sie64a)
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
.Lsie_done:
# some program checks are suppressing. C code (e.g. do_protection_exception)
-# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions between sie64a and .Lsie_done should not cause program
-# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
+# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
+# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
+# Other instructions between sie64a and .Lsie_done should not cause program
+# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
# See also .Lcleanup_sie
-.Lrewind_pad:
- nop 0
+.Lrewind_pad6:
+ nopr 7
+.Lrewind_pad4:
+ nopr 7
+.Lrewind_pad2:
+ nopr 7
.globl sie_exit
sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
@@ -249,7 +254,9 @@ sie_exit:
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
j sie_exit
- EX_TABLE(.Lrewind_pad,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad6,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad4,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad2,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault)
EXPORT_SYMBOL(sie64a)
EXPORT_SYMBOL(sie_exit)
@@ -312,6 +319,7 @@ ENTRY(system_call)
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+.Lsysc_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11)
@@ -623,6 +631,7 @@ ENTRY(io_int_handler)
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+.Lio_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11)
@@ -1174,15 +1183,23 @@ cleanup_critical:
br %r14
.Lcleanup_sysc_restore:
+ # check if stpt has been executed
clg %r9,BASED(.Lcleanup_sysc_restore_insn)
+ jh 0f
+ mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
+ mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
+0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
+ je 1f
lg %r9,24(%r11) # get saved pointer to pt_regs
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
-0: lmg %r8,%r9,__LC_RETURN_PSW
+1: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
.Lcleanup_sysc_restore_insn:
+ .quad .Lsysc_exit_timer
.quad .Lsysc_done - 4
.Lcleanup_io_tif:
@@ -1190,15 +1207,20 @@ cleanup_critical:
br %r14
.Lcleanup_io_restore:
+ # check if stpt has been executed
clg %r9,BASED(.Lcleanup_io_restore_insn)
- je 0f
+ jh 0f
+ mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
+0: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
+ je 1f
lg %r9,24(%r11) # get saved r11 pointer to pt_regs
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
-0: lmg %r8,%r9,__LC_RETURN_PSW
+1: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
.Lcleanup_io_restore_insn:
+ .quad .Lio_exit_timer
.quad .Lio_done - 4
.Lcleanup_idle:
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 27477f34cc0a..d03a6d12c4bd 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -173,6 +173,8 @@ int __init ftrace_dyn_arch_init(void)
return 0;
}
+#ifdef CONFIG_MODULES
+
static int __init ftrace_plt_init(void)
{
unsigned int *ip;
@@ -191,6 +193,8 @@ static int __init ftrace_plt_init(void)
}
device_initcall(ftrace_plt_init);
+#endif /* CONFIG_MODULES */
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addresses
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index e545ffe5155a..8e622bb52f7a 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -564,8 +564,6 @@ static struct kset *ipl_kset;
static void __ipl_run(void *unused)
{
- if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW)
- diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
diag308(DIAG308_LOAD_CLEAR, NULL);
if (MACHINE_IS_VM)
__cpcmd("IPL", NULL, 0, NULL);
@@ -1088,10 +1086,7 @@ static void __reipl_run(void *unused)
break;
case REIPL_METHOD_CCW_DIAG:
diag308(DIAG308_SET, reipl_block_ccw);
- if (MACHINE_IS_LPAR)
- diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
- else
- diag308(DIAG308_LOAD_CLEAR, NULL);
+ diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case REIPL_METHOD_FCP_RW_DIAG:
diag308(DIAG308_SET, reipl_block_fcp);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 999d7154bbdc..bb32b8618bf6 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -41,31 +41,6 @@
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
-/*
- * Return saved PC of a blocked thread. used in kernel/sched.
- * resume in entry.S does not create a new stack frame, it
- * just stores the registers %r6-%r15 to the frame given by
- * schedule. We want to return the address of the caller of
- * schedule, so we have to walk the backchain one time to
- * find the frame schedule() store its return address.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- struct stack_frame *sf, *low, *high;
-
- if (!tsk || !task_stack_page(tsk))
- return 0;
- low = task_stack_page(tsk);
- high = (struct stack_frame *) task_pt_regs(tsk);
- sf = (struct stack_frame *) tsk->thread.ksp;
- if (sf <= low || sf > high)
- return 0;
- sf = (struct stack_frame *) sf->back_chain;
- if (sf <= low || sf > high)
- return 0;
- return sf->gprs[8];
-}
-
extern void kernel_thread_starter(void);
/*
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index eefcb54872a5..fb869b103825 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -242,7 +242,7 @@ static void print_ext_name(struct seq_file *m, int lvl,
static void print_uuid(struct seq_file *m, int i, struct sysinfo_3_2_2 *info)
{
- if (!memcmp(&info->vm[i].uuid, &NULL_UUID_BE, sizeof(uuid_be)))
+ if (uuid_is_null(&info->vm[i].uuid))
return;
seq_printf(m, "VM%02d UUID: %pUb\n", i, &info->vm[i].uuid);
}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 72307f108c40..6e2c42bd1c3b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -31,8 +31,14 @@ SECTIONS
{
. = 0x00000000;
.text : {
- _text = .; /* Text and read-only data */
+ /* Text and read-only data */
HEAD_TEXT
+ /*
+ * E.g. perf doesn't like symbols starting at address zero,
+ * therefore skip the initial PSW and channel program located
+ * at address zero and let _text start at 0x200.
+ */
+ _text = 0x200;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 9da243d94cc3..3b297fa3aa67 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
ptr = asce.origin * 4096;
if (asce.r) {
*fake = 1;
+ ptr = 0;
asce.dt = ASCE_TYPE_REGION1;
}
switch (asce.dt) {
case ASCE_TYPE_REGION1:
- if (vaddr.rfx01 > asce.tl && !asce.r)
+ if (vaddr.rfx01 > asce.tl && !*fake)
return PGM_REGION_FIRST_TRANS;
break;
case ASCE_TYPE_REGION2:
@@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union region1_table_entry rfte;
if (*fake) {
- /* offset in 16EB guest memory block */
- ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
+ ptr += (unsigned long) vaddr.rfx << 53;
rfte.val = ptr;
goto shadow_r2t;
}
@@ -1036,8 +1036,7 @@ shadow_r2t:
union region2_table_entry rste;
if (*fake) {
- /* offset in 8PB guest memory block */
- ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
+ ptr += (unsigned long) vaddr.rsx << 42;
rste.val = ptr;
goto shadow_r3t;
}
@@ -1064,8 +1063,7 @@ shadow_r3t:
union region3_table_entry rtte;
if (*fake) {
- /* offset in 4TB guest memory block */
- ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
+ ptr += (unsigned long) vaddr.rtx << 31;
rtte.val = ptr;
goto shadow_sgt;
}
@@ -1101,8 +1099,7 @@ shadow_sgt:
union segment_table_entry ste;
if (*fake) {
- /* offset in 2G guest memory block */
- ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
+ ptr += (unsigned long) vaddr.sx << 20;
ste.val = ptr;
goto shadow_pgt;
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index caf15c8a8948..2d120fef7d90 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -2160,7 +2160,7 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
struct kvm_s390_ais_req req;
int ret = 0;
- if (!fi->ais_enabled)
+ if (!test_kvm_facility(kvm, 72))
return -ENOTSUPP;
if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
@@ -2204,7 +2204,7 @@ static int kvm_s390_inject_airq(struct kvm *kvm,
};
int ret = 0;
- if (!fi->ais_enabled || !adapter->suppressible)
+ if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
return kvm_s390_inject_vm(kvm, &s390int);
mutex_lock(&fi->ais_lock);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 689ac48361c6..f28e2e776931 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -558,7 +558,6 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
} else {
set_kvm_facility(kvm->arch.model.fac_mask, 72);
set_kvm_facility(kvm->arch.model.fac_list, 72);
- kvm->arch.float_int.ais_enabled = 1;
r = 0;
}
mutex_unlock(&kvm->lock);
@@ -1533,7 +1532,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
mutex_init(&kvm->arch.float_int.ais_lock);
kvm->arch.float_int.simm = 0;
kvm->arch.float_int.nimm = 0;
- kvm->arch.float_int.ais_enabled = 0;
spin_lock_init(&kvm->arch.float_int.lock);
for (i = 0; i < FIRQ_LIST_COUNT; i++)
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
diff --git a/arch/s390/lib/probes.c b/arch/s390/lib/probes.c
index ae90e1ae3607..1963ddbf4ab3 100644
--- a/arch/s390/lib/probes.c
+++ b/arch/s390/lib/probes.c
@@ -4,6 +4,7 @@
* Copyright IBM Corp. 2014
*/
+#include <linux/errno.h>
#include <asm/kprobes.h>
#include <asm/dis.h>
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 1e5bb2b86c42..b3bd3f23b8e8 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -337,8 +337,8 @@ long __strncpy_from_user(char *dst, const char __user *src, long size)
return 0;
done = 0;
do {
- offset = (size_t)src & ~PAGE_MASK;
- len = min(size - done, PAGE_SIZE - offset);
+ offset = (size_t)src & (L1_CACHE_BYTES - 1);
+ len = min(size - done, L1_CACHE_BYTES - offset);
if (copy_from_user(dst, src, len))
return -EFAULT;
len_str = strnlen(dst, len);
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index b017daed6887..b854b1da281a 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -101,7 +101,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
goto check_asce_limit;
}
@@ -151,7 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
goto check_asce_limit;
}
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index e3a8d0f96652..54b3b2039af1 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -1,6 +1,3 @@
-
-header-y +=
-
generic-y += barrier.h
generic-y += clkdev.h
generic-y += current.h
diff --git a/arch/score/include/asm/processor.h b/arch/score/include/asm/processor.h
index d9a922d8711b..299274581968 100644
--- a/arch/score/include/asm/processor.h
+++ b/arch/score/include/asm/processor.h
@@ -13,7 +13,6 @@ struct task_struct;
*/
extern void (*cpu_wait)(void);
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
extern void start_thread(struct pt_regs *regs,
unsigned long pc, unsigned long sp);
extern unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/score/include/uapi/asm/Kbuild b/arch/score/include/uapi/asm/Kbuild
index 040178cdb3eb..b15bf6bc0e94 100644
--- a/arch/score/include/uapi/asm/Kbuild
+++ b/arch/score/include/uapi/asm/Kbuild
@@ -1,34 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
index eb64d7a677cb..6e20241a1ed4 100644
--- a/arch/score/kernel/process.c
+++ b/arch/score/kernel/process.c
@@ -101,11 +101,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
return 1;
}
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- return task_pt_regs(tsk)->cp0_epc;
-}
-
unsigned long get_wchan(struct task_struct *task)
{
if (!task || task == current || task->state == TASK_RUNNING)
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 336f33a419d9..280bbff12102 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -94,7 +94,8 @@ defaultimage-$(CONFIG_SH_7206_SOLUTION_ENGINE) := vmlinux
defaultimage-$(CONFIG_SH_7619_SOLUTION_ENGINE) := vmlinux
# Set some sensible Kbuild defaults
-KBUILD_IMAGE := $(defaultimage-y)
+boot := arch/sh/boot
+KBUILD_IMAGE := $(boot)/$(defaultimage-y)
#
# Choosing incompatible machines durings configuration will result in
@@ -186,8 +187,6 @@ cpuincdir-y += cpu-common # Must be last
drivers-y += arch/sh/drivers/
drivers-$(CONFIG_OPROFILE) += arch/sh/oprofile/
-boot := arch/sh/boot
-
cflags-y += $(foreach d, $(cpuincdir-y), -Iarch/sh/include/$(d)) \
$(foreach d, $(machdir-y), -Iarch/sh/include/$(d))
@@ -211,7 +210,7 @@ BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.xz uImage.lzo \
romImage
PHONY += $(BOOT_TARGETS)
-all: $(KBUILD_IMAGE)
+all: $(notdir $(KBUILD_IMAGE))
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild
index 60613ae78513..b15bf6bc0e94 100644
--- a/arch/sh/include/uapi/asm/Kbuild
+++ b/arch/sh/include/uapi/asm/Kbuild
@@ -1,25 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += byteorder.h
-header-y += cachectl.h
-header-y += cpu-features.h
-header-y += hw_breakpoint.h
-header-y += ioctls.h
-header-y += posix_types.h
-header-y += posix_types_32.h
-header-y += posix_types_64.h
-header-y += ptrace.h
-header-y += ptrace_32.h
-header-y += ptrace_64.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += signal.h
-header-y += sockios.h
-header-y += stat.h
-header-y += swab.h
-header-y += types.h
-header-y += unistd.h
-header-y += unistd_32.h
-header-y += unistd_64.h
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 08e7af0be4a7..6a1a1297baae 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -64,7 +64,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -114,7 +114,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 58243b0d21c0..5639c9fe5b55 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -192,9 +192,9 @@ config NR_CPUS
int "Maximum number of CPUs"
depends on SMP
range 2 32 if SPARC32
- range 2 1024 if SPARC64
+ range 2 4096 if SPARC64
default 32 if SPARC32
- default 64 if SPARC64
+ default 4096 if SPARC64
source kernel/Kconfig.hz
@@ -295,9 +295,13 @@ config NUMA
depends on SPARC64 && SMP
config NODES_SHIFT
- int
- default "4"
+ int "Maximum NUMA Nodes (as a power of 2)"
+ range 4 5 if SPARC64
+ default "5"
depends on NEED_MULTIPLE_NODES
+ help
+ Specify the maximum number of NUMA Nodes available on the target
+ system. Increases memory reserved to accommodate various tables.
# Some NUMA nodes have memory ranges that span
# other nodes. Even though a pfn is valid and
@@ -573,9 +577,6 @@ config SYSVIPC_COMPAT
depends on COMPAT && SYSVIPC
default y
-config KEYS_COMPAT
- def_bool y if COMPAT && KEYS
-
endmenu
source "net/Kconfig"
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index dcbf985ab243..d1f837dc77a4 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -24,9 +24,11 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
- if (len & ~HPAGE_MASK)
+ struct hstate *h = hstate_file(file);
+
+ if (len & ~huge_page_mask(h))
return -EINVAL;
- if (addr & ~HPAGE_MASK)
+ if (addr & ~huge_page_mask(h))
return -EINVAL;
return 0;
}
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index f7de0dbc38af..83b36a5371ff 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -52,7 +52,7 @@
#define CTX_NR_MASK TAG_CONTEXT_BITS
#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
-#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
+#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
#define CTX_VALID(__ctx) \
(!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 22fede6eba11..2cddcda4f85f 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
+DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
void get_new_mmu_context(struct mm_struct *mm);
-#ifdef CONFIG_SMP
-void smp_new_mmu_context_version(void);
-#else
-#define smp_new_mmu_context_version() do { } while (0)
-#endif
-
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm);
@@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long ctx_valid, flags;
- int cpu;
+ int cpu = smp_processor_id();
+ per_cpu(per_cpu_secondary_mm, cpu) = mm;
if (unlikely(mm == &init_mm))
return;
@@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
* for the first time, we must flush that context out of the
* local TLB.
*/
- cpu = smp_processor_id();
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
cpumask_set_cpu(cpu, mm_cpumask(mm));
__flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
}
#define deactivate_mm(tsk,mm) do { } while (0)
-
-/* Activate a new MM instance for the current task. */
-static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
-{
- unsigned long flags;
- int cpu;
-
- spin_lock_irqsave(&mm->context.lock, flags);
- if (!CTX_VALID(mm->context))
- get_new_mmu_context(mm);
- cpu = smp_processor_id();
- if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
- cpumask_set_cpu(cpu, mm_cpumask(mm));
-
- load_secondary_context(mm);
- __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
- tsb_context_switch(mm);
- spin_unlock_irqrestore(&mm->context.lock, flags);
-}
-
+#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
#endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index ce6f56980aef..cf190728360b 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -91,9 +91,9 @@ extern unsigned long pfn_base;
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/*
* In general all page table modifications should use the V8 atomic
diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
index 266937030546..522b43db2ed3 100644
--- a/arch/sparc/include/asm/pil.h
+++ b/arch/sparc/include/asm/pil.h
@@ -20,7 +20,6 @@
#define PIL_SMP_CALL_FUNC 1
#define PIL_SMP_RECEIVE_SIGNAL 2
#define PIL_SMP_CAPTURE 3
-#define PIL_SMP_CTX_NEW_VERSION 4
#define PIL_DEVICE_IRQ 5
#define PIL_SMP_CALL_FUNC_SNGL 6
#define PIL_DEFERRED_PCR_WORK 7
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index dd27159819eb..b395e5620c0b 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -67,9 +67,6 @@ struct thread_struct {
.current_ds = KERNEL_DS, \
}
-/* Return saved PC of a blocked thread. */
-unsigned long thread_saved_pc(struct task_struct *t);
-
/* Do necessary setup to start up a newly executed thread. */
static inline void start_thread(struct pt_regs * regs, unsigned long pc,
unsigned long sp)
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index b58ee9018433..f04dc5a43062 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -89,9 +89,7 @@ struct thread_struct {
#include <linux/types.h>
#include <asm/fpumacro.h>
-/* Return saved PC of a blocked thread. */
struct task_struct;
-unsigned long thread_saved_pc(struct task_struct *);
/* On Uniprocessor, even in RMO processes see TSO semantics */
#ifdef CONFIG_SMP
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index 478bf6bb4598..3fae200dd251 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -16,7 +16,7 @@ extern char reboot_command[];
*/
extern unsigned char boot_cpu_id;
-extern unsigned long empty_zero_page;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
extern int serial_console;
static inline int con_is_present(void)
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 8174f6cdbbbb..9dca7a892978 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -327,6 +327,7 @@ struct vio_dev {
int compat_len;
u64 dev_no;
+ u64 id;
unsigned long channel_id;
diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild
index b5843ee09fb5..b15bf6bc0e94 100644
--- a/arch/sparc/include/uapi/asm/Kbuild
+++ b/arch/sparc/include/uapi/asm/Kbuild
@@ -1,50 +1,2 @@
# UAPI Header export list
-# User exported sparc header files
-
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += apc.h
-header-y += asi.h
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += display7seg.h
-header-y += envctrl.h
-header-y += errno.h
-header-y += fbio.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += jsflash.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += openpromio.h
-header-y += param.h
-header-y += perfctr.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += psr.h
-header-y += psrcompat.h
-header-y += pstate.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += traps.h
-header-y += uctx.h
-header-y += unistd.h
-header-y += utrap.h
-header-y += watchdog.h
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index b542cc7c8d94..f87265afb175 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -909,7 +909,7 @@ static int register_services(struct ds_info *dp)
pbuf.req.handle = cp->handle;
pbuf.req.major = 1;
pbuf.req.minor = 0;
- strcpy(pbuf.req.svc_id, cp->service_id);
+ strcpy(pbuf.id_buf, cp->service_id);
err = __ds_send(lp, &pbuf, msg_len);
if (err > 0)
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 6bcff698069b..cec54dc4ab81 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -130,17 +130,16 @@ unsigned long prepare_ftrace_return(unsigned long parent,
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return parent + 8UL;
- if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
- frame_pointer, NULL) == -EBUSY)
- return parent + 8UL;
-
trace.func = self_addr;
+ trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
+ if (!ftrace_graph_entry(&trace))
+ return parent + 8UL;
+
+ if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
+ frame_pointer, NULL) == -EBUSY)
return parent + 8UL;
- }
return return_hooker;
}
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 4d0248aa0928..99dd133a029f 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
{
#ifdef CONFIG_SMP
unsigned long page;
+ void *mondo, *p;
- BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
+ BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
+
+ /* Make sure mondo block is 64byte aligned */
+ p = kzalloc(127, GFP_KERNEL);
+ if (!p) {
+ prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
+ prom_halt();
+ }
+ mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
+ tb->cpu_mondo_block_pa = __pa(mondo);
page = get_zeroed_page(GFP_KERNEL);
if (!page) {
- prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
+ prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
prom_halt();
}
- tb->cpu_mondo_block_pa = __pa(page);
- tb->cpu_list_pa = __pa(page + 64);
+ tb->cpu_list_pa = __pa(page);
#endif
}
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index c9804551262c..6ae1e77be0bf 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
/* smp_64.c */
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index b6dac8e980f0..9245f93398c7 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -177,14 +177,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
}
/*
- * Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- return task_thread_info(tsk)->kpc;
-}
-
-/*
* Free current thread data structures etc..
*/
void exit_thread(struct task_struct *tsk)
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 1badc493e62e..b96104da5bd6 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -400,25 +400,6 @@ core_initcall(sparc_sysrq_init);
#endif
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- struct thread_info *ti = task_thread_info(tsk);
- unsigned long ret = 0xdeadbeefUL;
-
- if (ti && ti->ksp) {
- unsigned long *sp;
- sp = (unsigned long *)(ti->ksp + STACK_BIAS);
- if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
- sp[14]) {
- unsigned long *fp;
- fp = (unsigned long *)(sp[14] + STACK_BIAS);
- if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
- ret = fp[15];
- }
- }
- return ret;
-}
-
/* Free current thread data structures etc.. */
void exit_thread(struct task_struct *tsk)
{
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b3bc0ac757cc..fdf31040a7dc 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
preempt_enable();
}
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
-{
- struct mm_struct *mm;
- unsigned long flags;
-
- clear_softint(1 << irq);
-
- /* See if we need to allocate a new TLB context because
- * the version of the one we are using is now out of date.
- */
- mm = current->active_mm;
- if (unlikely(!mm || (mm == &init_mm)))
- return;
-
- spin_lock_irqsave(&mm->context.lock, flags);
-
- if (unlikely(!CTX_VALID(mm->context)))
- get_new_mmu_context(mm);
-
- spin_unlock_irqrestore(&mm->context.lock, flags);
-
- load_secondary_context(mm);
- __flush_tlb_mm(CTX_HWBITS(mm->context),
- SECONDARY_CONTEXT);
-}
-
-void smp_new_mmu_context_version(void)
-{
- smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
-}
-
#ifdef CONFIG_KGDB
void kgdb_roundup_cpus(unsigned long flags)
{
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index ef4520efc813..043544d0cda3 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -120,7 +120,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index 10689cfd0ad4..07c0df924960 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -455,13 +455,16 @@ __tsb_context_switch:
.type copy_tsb,#function
copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
* %o2=new_tsb_base, %o3=new_tsb_size
+ * %o4=page_size_shift
*/
sethi %uhi(TSB_PASS_BITS), %g7
srlx %o3, 4, %o3
- add %o0, %o1, %g1 /* end of old tsb */
+ add %o0, %o1, %o1 /* end of old tsb */
sllx %g7, 32, %g7
sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
+ mov %o4, %g1 /* page_size_shift */
+
661: prefetcha [%o0] ASI_N, #one_read
.section .tsb_phys_patch, "ax"
.word 661b
@@ -486,9 +489,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
/* This can definitely be computed faster... */
srlx %o0, 4, %o5 /* Build index */
and %o5, 511, %o5 /* Mask index */
- sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
+ sllx %o5, %g1, %o5 /* Put into vaddr position */
or %o4, %o5, %o4 /* Full VADDR. */
- srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
+ srlx %o4, %g1, %o4 /* Shift down to create index */
and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
@@ -496,7 +499,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
80: add %o0, 16, %o0
- cmp %o0, %g1
+ cmp %o0, %o1
bne,pt %xcc, 90b
nop
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
index 7bd8f6556352..efe93ab4a9c0 100644
--- a/arch/sparc/kernel/ttable_64.S
+++ b/arch/sparc/kernel/ttable_64.S
@@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
-tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4)
+tl0_irq4: BTRAP(0x44)
#else
tl0_irq1: BTRAP(0x41)
tl0_irq2: BTRAP(0x42)
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index f6bb857254fc..075d38980dee 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
if (!id) {
dev_set_name(&vdev->dev, "%s", bus_id_name);
vdev->dev_no = ~(u64)0;
+ vdev->id = ~(u64)0;
} else if (!cfg_handle) {
dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
vdev->dev_no = *id;
+ vdev->id = ~(u64)0;
} else {
dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
*cfg_handle, *id);
vdev->dev_no = *cfg_handle;
+ vdev->id = *id;
}
vdev->dev.parent = parent;
@@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
(void) vio_create_one(hp, node, &root_vdev->dev);
}
+struct vio_md_node_query {
+ const char *type;
+ u64 dev_no;
+ u64 id;
+};
+
static int vio_md_node_match(struct device *dev, void *arg)
{
+ struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
struct vio_dev *vdev = to_vio_dev(dev);
- if (vdev->mp == (u64) arg)
- return 1;
+ if (vdev->dev_no != query->dev_no)
+ return 0;
+ if (vdev->id != query->id)
+ return 0;
+ if (strcmp(vdev->type, query->type))
+ return 0;
- return 0;
+ return 1;
}
static void vio_remove(struct mdesc_handle *hp, u64 node)
{
+ const char *type;
+ const u64 *id, *cfg_handle;
+ u64 a;
+ struct vio_md_node_query query;
struct device *dev;
- dev = device_find_child(&root_vdev->dev, (void *) node,
+ type = mdesc_get_property(hp, node, "device-type", NULL);
+ if (!type) {
+ type = mdesc_get_property(hp, node, "name", NULL);
+ if (!type)
+ type = mdesc_node_name(hp, node);
+ }
+
+ query.type = type;
+
+ id = mdesc_get_property(hp, node, "id", NULL);
+ cfg_handle = NULL;
+ mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
+ u64 target;
+
+ target = mdesc_arc_target(hp, a);
+ cfg_handle = mdesc_get_property(hp, target,
+ "cfg-handle", NULL);
+ if (cfg_handle)
+ break;
+ }
+
+ if (!id) {
+ query.dev_no = ~(u64)0;
+ query.id = ~(u64)0;
+ } else if (!cfg_handle) {
+ query.dev_no = *id;
+ query.id = ~(u64)0;
+ } else {
+ query.dev_no = *cfg_handle;
+ query.id = *id;
+ }
+
+ dev = device_find_child(&root_vdev->dev, &query,
vio_md_node_match);
if (dev) {
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
device_unregister(dev);
put_device(dev);
+ } else {
+ if (!id)
+ printk(KERN_ERR "VIO: Removed unknown %s node.\n",
+ type);
+ else if (!cfg_handle)
+ printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
+ type, *id);
+ else
+ printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
+ type, *cfg_handle, *id);
}
}
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 69912d2f8b54..07c03e72d812 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o
lib-$(CONFIG_SPARC64) += atomic_64.o
lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
+lib-$(CONFIG_SPARC64) += multi3.o
lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
new file mode 100644
index 000000000000..d6b6c97fe3c7
--- /dev/null
+++ b/arch/sparc/lib/multi3.S
@@ -0,0 +1,35 @@
+#include <linux/linkage.h>
+#include <asm/export.h>
+
+ .text
+ .align 4
+ENTRY(__multi3) /* %o0 = u, %o1 = v */
+ mov %o1, %g1
+ srl %o3, 0, %g4
+ mulx %g4, %g1, %o1
+ srlx %g1, 0x20, %g3
+ mulx %g3, %g4, %g5
+ sllx %g5, 0x20, %o5
+ srl %g1, 0, %g4
+ sub %o1, %o5, %o5
+ srlx %o5, 0x20, %o5
+ addcc %g5, %o5, %g5
+ srlx %o3, 0x20, %o5
+ mulx %g4, %o5, %g4
+ mulx %g3, %o5, %o5
+ sethi %hi(0x80000000), %g3
+ addcc %g5, %g4, %g5
+ srlx %g5, 0x20, %g5
+ add %g3, %g3, %g3
+ movcc %xcc, %g0, %g3
+ addcc %o5, %g5, %o5
+ sllx %g4, 0x20, %g4
+ add %o1, %g4, %o1
+ add %o5, %g3, %g2
+ mulx %g1, %o2, %g1
+ add %g1, %g2, %g1
+ mulx %o0, %o3, %o0
+ retl
+ add %g1, %o0, %o0
+ENDPROC(__multi3)
+EXPORT_SYMBOL(__multi3)
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 7c29d38e6b99..88855e383b34 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -120,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index c6afe98de4d9..3bd0d513bddb 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -290,7 +290,7 @@ void __init mem_init(void)
/* Saves us work later. */
- memset((void *)&empty_zero_page, 0, PAGE_SIZE);
+ memset((void *)empty_zero_page, 0, PAGE_SIZE);
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
i += 1;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 0cda653ae007..3c40ebd50f92 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string)
}
if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
- pr_warn("hugepagesz=%llu not supported by MMU.\n",
+ hugetlb_bad_size();
+ pr_err("hugepagesz=%llu not supported by MMU.\n",
hugepage_size);
goto out;
}
@@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
/* get_new_mmu_context() uses "cache + 1". */
DEFINE_SPINLOCK(ctx_alloc_lock);
-unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+unsigned long tlb_context_cache = CTX_FIRST_VERSION;
#define MAX_CTX_NR (1UL << CTX_NR_BITS)
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
+
+static void mmu_context_wrap(void)
+{
+ unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
+ unsigned long new_ver, new_ctx, old_ctx;
+ struct mm_struct *mm;
+ int cpu;
+
+ bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
+
+ /* Reserve kernel context */
+ set_bit(0, mmu_context_bmap);
+
+ new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
+ if (unlikely(new_ver == 0))
+ new_ver = CTX_FIRST_VERSION;
+ tlb_context_cache = new_ver;
+
+ /*
+ * Make sure that any new mm that are added into per_cpu_secondary_mm,
+ * are going to go through get_new_mmu_context() path.
+ */
+ mb();
+
+ /*
+ * Updated versions to current on those CPUs that had valid secondary
+ * contexts
+ */
+ for_each_online_cpu(cpu) {
+ /*
+ * If a new mm is stored after we took this mm from the array,
+ * it will go into get_new_mmu_context() path, because we
+ * already bumped the version in tlb_context_cache.
+ */
+ mm = per_cpu(per_cpu_secondary_mm, cpu);
+
+ if (unlikely(!mm || mm == &init_mm))
+ continue;
+
+ old_ctx = mm->context.sparc64_ctx_val;
+ if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
+ new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
+ set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
+ mm->context.sparc64_ctx_val = new_ctx;
+ }
+ }
+}
/* Caller does TLB context flushing on local CPU if necessary.
* The caller also ensures that CTX_VALID(mm->context) is false.
@@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)
{
unsigned long ctx, new_ctx;
unsigned long orig_pgsz_bits;
- int new_version;
spin_lock(&ctx_alloc_lock);
+retry:
+ /* wrap might have happened, test again if our context became valid */
+ if (unlikely(CTX_VALID(mm->context)))
+ goto out;
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
- new_version = 0;
if (new_ctx >= (1 << CTX_NR_BITS)) {
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
if (new_ctx >= ctx) {
- int i;
- new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
- CTX_FIRST_VERSION;
- if (new_ctx == 1)
- new_ctx = CTX_FIRST_VERSION;
-
- /* Don't call memset, for 16 entries that's just
- * plain silly...
- */
- mmu_context_bmap[0] = 3;
- mmu_context_bmap[1] = 0;
- mmu_context_bmap[2] = 0;
- mmu_context_bmap[3] = 0;
- for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
- mmu_context_bmap[i + 0] = 0;
- mmu_context_bmap[i + 1] = 0;
- mmu_context_bmap[i + 2] = 0;
- mmu_context_bmap[i + 3] = 0;
- }
- new_version = 1;
- goto out;
+ mmu_context_wrap();
+ goto retry;
}
}
+ if (mm->context.sparc64_ctx_val)
+ cpumask_clear(mm_cpumask(mm));
mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
-out:
tlb_context_cache = new_ctx;
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
+out:
spin_unlock(&ctx_alloc_lock);
-
- if (unlikely(new_version))
- smp_new_mmu_context_version();
}
static int numa_enabled = 1;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index bedf08b22a47..0d4b998c7d7b 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -496,7 +496,8 @@ retry_tsb_alloc:
extern void copy_tsb(unsigned long old_tsb_base,
unsigned long old_tsb_size,
unsigned long new_tsb_base,
- unsigned long new_tsb_size);
+ unsigned long new_tsb_size,
+ unsigned long page_size_shift);
unsigned long old_tsb_base = (unsigned long) old_tsb;
unsigned long new_tsb_base = (unsigned long) new_tsb;
@@ -504,7 +505,9 @@ retry_tsb_alloc:
old_tsb_base = __pa(old_tsb_base);
new_tsb_base = __pa(new_tsb_base);
}
- copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
+ copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
+ tsb_index == MM_TSB_BASE ?
+ PAGE_SHIFT : REAL_HPAGE_SHIFT);
}
mm->context.tsb_block[tsb_index].tsb = new_tsb;
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 5d2fd6cd3189..fcf4d27a38fb 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -971,11 +971,6 @@ xcall_capture:
wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
retry
- .globl xcall_new_mmu_context_version
-xcall_new_mmu_context_version:
- wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
- retry
-
#ifdef CONFIG_KGDB
.globl xcall_kgdb_capture
xcall_kgdb_capture:
diff --git a/arch/tile/include/arch/Kbuild b/arch/tile/include/arch/Kbuild
deleted file mode 100644
index 3751c9fabcf2..000000000000
--- a/arch/tile/include/arch/Kbuild
+++ /dev/null
@@ -1 +0,0 @@
-# Tile arch headers
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 24c44e93804d..16f0b08c8ce9 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -1,6 +1,3 @@
-
-header-y += ../arch/
-
generic-y += bug.h
generic-y += bugs.h
generic-y += clkdev.h
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 0bc9968b97a1..f71e5206650b 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -214,13 +214,6 @@ static inline void release_thread(struct task_struct *dead_task)
extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags);
-
-/*
- * Return saved (kernel) PC of a blocked thread.
- * Only used in a printk() in kernel/sched/core.c, so don't work too hard.
- */
-#define thread_saved_pc(t) ((t)->thread.pc)
-
unsigned long get_wchan(struct task_struct *p);
/* Return initial ksp value for given task. */
diff --git a/arch/tile/include/uapi/arch/Kbuild b/arch/tile/include/uapi/arch/Kbuild
deleted file mode 100644
index 97dfbecec6b6..000000000000
--- a/arch/tile/include/uapi/arch/Kbuild
+++ /dev/null
@@ -1,17 +0,0 @@
-# UAPI Header export list
-header-y += abi.h
-header-y += chip.h
-header-y += chip_tilegx.h
-header-y += chip_tilepro.h
-header-y += icache.h
-header-y += interrupts.h
-header-y += interrupts_32.h
-header-y += interrupts_64.h
-header-y += opcode.h
-header-y += opcode_tilegx.h
-header-y += opcode_tilepro.h
-header-y += sim.h
-header-y += sim_def.h
-header-y += spr_def.h
-header-y += spr_def_32.h
-header-y += spr_def_64.h
diff --git a/arch/tile/include/uapi/asm/Kbuild b/arch/tile/include/uapi/asm/Kbuild
index c20db8e428bf..0c74c3c5ebfa 100644
--- a/arch/tile/include/uapi/asm/Kbuild
+++ b/arch/tile/include/uapi/asm/Kbuild
@@ -1,21 +1,4 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += cachectl.h
-header-y += hardwall.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += ptrace.h
-header-y += setup.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += stat.h
-header-y += swab.h
-header-y += ucontext.h
-header-y += unistd.h
-
generic-y += ucontext.h
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 1a70e6c0f259..94709ab41ed8 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -24,8 +24,7 @@
* has an opportunity to return -EFAULT to the user if needed.
* The 64-bit routines just return a "long long" with the value,
* since they are only used from kernel space and don't expect to fault.
- * Support for 16-bit ops is included in the framework but we don't provide
- * any (x86_64 has an atomic_inc_short(), so we might want to some day).
+ * Support for 16-bit ops is included in the framework but we don't provide any.
*
* Note that the caller is advised to issue a suitable L1 or L2
* prefetch on the address being manipulated to avoid extra stalls.
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index cb10153b5c9f..03e5cc4e76e4 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -233,7 +233,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (current->mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index ed9c5b5ff028..85f6dd204ab6 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -57,3 +57,8 @@ config HZ
config SUBARCH
string
option env="SUBARCH"
+
+config NR_CPUS
+ int
+ range 1 1
+ default 1
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 85410279beab..b55fe9bf5d3e 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -534,7 +534,7 @@ static void ubd_handler(void)
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
blk_end_request(
(*irq_req_buffer)[count]->req,
- 0,
+ BLK_STS_OK,
(*irq_req_buffer)[count]->length
);
kfree((*irq_req_buffer)[count]);
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 2d1e0dd5bb0b..f6d1a3f747a9 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -58,8 +58,6 @@ static inline void release_thread(struct task_struct *task)
{
}
-extern unsigned long thread_saved_pc(struct task_struct *t);
-
static inline void mm_copy_segments(struct mm_struct *from_mm,
struct mm_struct *new_mm)
{
diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
index 48bae81f8dca..6f6e7896e53f 100644
--- a/arch/um/kernel/initrd.c
+++ b/arch/um/kernel/initrd.c
@@ -14,7 +14,7 @@
static char *initrd __initdata = NULL;
static int load_initrd(char *filename, void *buf, int size);
-static int __init read_initrd(void)
+int __init read_initrd(void)
{
void *area;
long long size;
@@ -46,8 +46,6 @@ static int __init read_initrd(void)
return 0;
}
-__uml_postsetup(read_initrd);
-
static int __init uml_initrd_setup(char *line, int *add)
{
initrd = line;
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index a76295f7ede9..6b995e870d55 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -20,10 +20,8 @@
static void _print_addr(void *data, unsigned long address, int reliable)
{
- pr_info(" [<%08lx>]", address);
- pr_cont(" %s", reliable ? "" : "? ");
- print_symbol("%s", address);
- pr_cont("\n");
+ pr_info(" [<%08lx>] %s%pF\n", address, reliable ? "" : "? ",
+ (void *)address);
}
static const struct stacktrace_ops stackops = {
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 4b85acd4020c..7b5640117325 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -56,12 +56,6 @@ union thread_union cpu0_irqstack
__attribute__((__section__(".data..init_irqstack"))) =
{ INIT_THREAD_INFO(init_task) };
-unsigned long thread_saved_pc(struct task_struct *task)
-{
- /* FIXME: Need to look up userspace_pid by cpu */
- return os_process_pc(userspace_pid[0]);
-}
-
/* Changed in setup_arch, which is called in early boot */
static char host_info[(__NEW_UTS_LEN + 1) * 5];
@@ -338,11 +332,17 @@ int __init linux_main(int argc, char **argv)
return start_uml();
}
+int __init __weak read_initrd(void)
+{
+ return 0;
+}
+
void __init setup_arch(char **cmdline_p)
{
stack_protections((unsigned long) &init_thread_info);
setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
mem_total_pages(physmem_size, iomem_size, highmem);
+ read_initrd();
paging_init();
strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 23025d645160..03b3c4cc7735 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -21,6 +21,7 @@
#include <registers.h>
#include <skas.h>
#include <sysdep/stub.h>
+#include <linux/threads.h>
int is_skas_winch(int pid, int fd, void *data)
{
@@ -233,9 +234,6 @@ static int userspace_tramp(void *stack)
return 0;
}
-/* Each element set once, and only accessed by a single processor anyway */
-#undef NR_CPUS
-#define NR_CPUS 1
int userspace_pid[NR_CPUS];
int start_userspace(unsigned long stub_stack)
diff --git a/arch/unicore32/Makefile b/arch/unicore32/Makefile
index b6f5c4c1eaf9..98a5ca43ae87 100644
--- a/arch/unicore32/Makefile
+++ b/arch/unicore32/Makefile
@@ -43,9 +43,9 @@ boot := arch/unicore32/boot
# Default defconfig and target when executing plain make
KBUILD_DEFCONFIG := $(ARCH)_defconfig
-KBUILD_IMAGE := zImage
+KBUILD_IMAGE := $(boot)/zImage
-all: $(KBUILD_IMAGE)
+all: zImage
zImage Image uImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild
index 0514d7ad6855..13a97aa2285f 100644
--- a/arch/unicore32/include/uapi/asm/Kbuild
+++ b/arch/unicore32/include/uapi/asm/Kbuild
@@ -1,10 +1,4 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += byteorder.h
-header-y += kvm_para.h
-header-y += ptrace.h
-header-y += sigcontext.h
-header-y += unistd.h
-
generic-y += kvm_para.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cd18994a9555..737212c0333e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -69,7 +69,7 @@ config X86
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
- select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
+ select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select BUILDTIME_EXTABLE_SORT
@@ -360,7 +360,7 @@ config SMP
Management" code will be disabled if you say Y here.
See also <file:Documentation/x86/i386/IO-APIC.txt>,
- <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+ <file:Documentation/lockup-watchdogs.txt> and the SMP-HOWTO available at
<http://www.tldp.org/docs.html#howto>.
If you don't know what to do here, say N.
@@ -2776,10 +2776,6 @@ config COMPAT_FOR_U64_ALIGNMENT
config SYSVIPC_COMPAT
def_bool y
depends on SYSVIPC
-
-config KEYS_COMPAT
- def_bool y
- depends on KEYS
endif
endmenu
@@ -2797,6 +2793,9 @@ config X86_DMA_REMAP
bool
depends on STA2X11
+config HAVE_GENERIC_GUP
+ def_bool y
+
source "net/Kconfig"
source "drivers/Kconfig"
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 4430dd489620..bf240b920473 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -159,7 +159,7 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
# If '-Os' is enabled, disable it and print a warning.
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
undefine CONFIG_CC_OPTIMIZE_FOR_SIZE
- $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE. Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.)
+ $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE. Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.)
endif
endif
@@ -179,7 +179,8 @@ ifdef CONFIG_JUMP_LABEL
endif
ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
- KBUILD_CFLAGS += -maccumulate-outgoing-args
+ # This compiler flag is not supported by Clang:
+ KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
endif
# Stackpointer is addressed different for 32 bit and 64 bit x86
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 44163e8c3868..2c860ad4fe06 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -94,7 +94,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
quiet_cmd_check_data_rel = DATAREL $@
define cmd_check_data_rel
for obj in $(filter %.o,$^); do \
- readelf -S $$obj | grep -qF .rel.local && { \
+ ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
echo "error: $$obj has data relocations!" >&2; \
exit 1; \
} || true; \
diff --git a/arch/x86/boot/compressed/cmdline.c b/arch/x86/boot/compressed/cmdline.c
index 73ccf63b0f48..9dc1ce6ba3c0 100644
--- a/arch/x86/boot/compressed/cmdline.c
+++ b/arch/x86/boot/compressed/cmdline.c
@@ -13,7 +13,7 @@ static inline char rdfs8(addr_t addr)
return *((char *)(fs + addr));
}
#include "../cmdline.c"
-static unsigned long get_cmd_line_ptr(void)
+unsigned long get_cmd_line_ptr(void)
{
unsigned long cmd_line_ptr = boot_params->hdr.cmd_line_ptr;
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index cbf4b87f55b9..c3e869eaef0c 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1046,9 +1046,31 @@ struct boot_params *efi_main(struct efi_config *c,
memset((char *)gdt->address, 0x0, gdt->size);
desc = (struct desc_struct *)gdt->address;
- /* The first GDT is a dummy and the second is unused. */
- desc += 2;
+ /* The first GDT is a dummy. */
+ desc++;
+
+ if (IS_ENABLED(CONFIG_X86_64)) {
+ /* __KERNEL32_CS */
+ desc->limit0 = 0xffff;
+ desc->base0 = 0x0000;
+ desc->base1 = 0x0000;
+ desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
+ desc->s = DESC_TYPE_CODE_DATA;
+ desc->dpl = 0;
+ desc->p = 1;
+ desc->limit = 0xf;
+ desc->avl = 0;
+ desc->l = 0;
+ desc->d = SEG_OP_SIZE_32BIT;
+ desc->g = SEG_GRANULARITY_4KB;
+ desc->base2 = 0x00;
+ desc++;
+ } else {
+ /* Second entry is unused on 32-bit */
+ desc++;
+ }
+ /* __KERNEL_CS */
desc->limit0 = 0xffff;
desc->base0 = 0x0000;
desc->base1 = 0x0000;
@@ -1058,12 +1080,18 @@ struct boot_params *efi_main(struct efi_config *c,
desc->p = 1;
desc->limit = 0xf;
desc->avl = 0;
- desc->l = 0;
- desc->d = SEG_OP_SIZE_32BIT;
+ if (IS_ENABLED(CONFIG_X86_64)) {
+ desc->l = 1;
+ desc->d = 0;
+ } else {
+ desc->l = 0;
+ desc->d = SEG_OP_SIZE_32BIT;
+ }
desc->g = SEG_GRANULARITY_4KB;
desc->base2 = 0x00;
-
desc++;
+
+ /* __KERNEL_DS */
desc->limit0 = 0xffff;
desc->base0 = 0x0000;
desc->base1 = 0x0000;
@@ -1077,24 +1105,25 @@ struct boot_params *efi_main(struct efi_config *c,
desc->d = SEG_OP_SIZE_32BIT;
desc->g = SEG_GRANULARITY_4KB;
desc->base2 = 0x00;
-
-#ifdef CONFIG_X86_64
- /* Task segment value */
desc++;
- desc->limit0 = 0x0000;
- desc->base0 = 0x0000;
- desc->base1 = 0x0000;
- desc->type = SEG_TYPE_TSS;
- desc->s = 0;
- desc->dpl = 0;
- desc->p = 1;
- desc->limit = 0x0;
- desc->avl = 0;
- desc->l = 0;
- desc->d = 0;
- desc->g = SEG_GRANULARITY_4KB;
- desc->base2 = 0x00;
-#endif /* CONFIG_X86_64 */
+
+ if (IS_ENABLED(CONFIG_X86_64)) {
+ /* Task segment value */
+ desc->limit0 = 0x0000;
+ desc->base0 = 0x0000;
+ desc->base1 = 0x0000;
+ desc->type = SEG_TYPE_TSS;
+ desc->s = 0;
+ desc->dpl = 0;
+ desc->p = 1;
+ desc->limit = 0x0;
+ desc->avl = 0;
+ desc->l = 0;
+ desc->d = 0;
+ desc->g = SEG_GRANULARITY_4KB;
+ desc->base2 = 0x00;
+ desc++;
+ }
asm volatile("cli");
asm volatile ("lgdt %0" : : "m" (*gdt));
diff --git a/arch/x86/boot/compressed/error.h b/arch/x86/boot/compressed/error.h
index 2e59dac07f9e..d732e608e3af 100644
--- a/arch/x86/boot/compressed/error.h
+++ b/arch/x86/boot/compressed/error.h
@@ -1,7 +1,9 @@
#ifndef BOOT_COMPRESSED_ERROR_H
#define BOOT_COMPRESSED_ERROR_H
+#include <linux/compiler.h>
+
void warn(char *m);
-void error(char *m);
+void error(char *m) __noreturn;
#endif /* BOOT_COMPRESSED_ERROR_H */
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index d2ae1f821e0c..fbf4c32d0b62 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -346,6 +346,48 @@ preferred_addr:
/* Set up the stack */
leaq boot_stack_end(%rbx), %rsp
+#ifdef CONFIG_X86_5LEVEL
+ /* Check if 5-level paging has already enabled */
+ movq %cr4, %rax
+ testl $X86_CR4_LA57, %eax
+ jnz lvl5
+
+ /*
+ * At this point we are in long mode with 4-level paging enabled,
+ * but we want to enable 5-level paging.
+ *
+ * The problem is that we cannot do it directly. Setting LA57 in
+ * long mode would trigger #GP. So we need to switch off long mode
+ * first.
+ *
+ * NOTE: This is not going to work if bootloader put us above 4G
+ * limit.
+ *
+ * The first step is go into compatibility mode.
+ */
+
+ /* Clear additional page table */
+ leaq lvl5_pgtable(%rbx), %rdi
+ xorq %rax, %rax
+ movq $(PAGE_SIZE/8), %rcx
+ rep stosq
+
+ /*
+ * Setup current CR3 as the first and only entry in a new top level
+ * page table.
+ */
+ movq %cr3, %rdi
+ leaq 0x7 (%rdi), %rax
+ movq %rax, lvl5_pgtable(%rbx)
+
+ /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
+ pushq $__KERNEL32_CS
+ leaq compatible_mode(%rip), %rax
+ pushq %rax
+ lretq
+lvl5:
+#endif
+
/* Zero EFLAGS */
pushq $0
popfq
@@ -429,6 +471,44 @@ relocated:
jmp *%rax
.code32
+#ifdef CONFIG_X86_5LEVEL
+compatible_mode:
+ /* Setup data and stack segments */
+ movl $__KERNEL_DS, %eax
+ movl %eax, %ds
+ movl %eax, %ss
+
+ /* Disable paging */
+ movl %cr0, %eax
+ btrl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+
+ /* Point CR3 to 5-level paging */
+ leal lvl5_pgtable(%ebx), %eax
+ movl %eax, %cr3
+
+ /* Enable PAE and LA57 mode */
+ movl %cr4, %eax
+ orl $(X86_CR4_PAE | X86_CR4_LA57), %eax
+ movl %eax, %cr4
+
+ /* Calculate address we are running at */
+ call 1f
+1: popl %edi
+ subl $1b, %edi
+
+ /* Prepare stack for far return to Long Mode */
+ pushl $__KERNEL_CS
+ leal lvl5(%edi), %eax
+ push %eax
+
+ /* Enable paging back */
+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
+ movl %eax, %cr0
+
+ lret
+#endif
+
no_longmode:
/* This isn't an x86-64 CPU so hang */
1:
@@ -442,7 +522,7 @@ gdt:
.word gdt_end - gdt
.long gdt
.word 0
- .quad 0x0000000000000000 /* NULL descriptor */
+ .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
.quad 0x00af9a000000ffff /* __KERNEL_CS */
.quad 0x00cf92000000ffff /* __KERNEL_DS */
.quad 0x0080890000000000 /* TS descriptor */
@@ -486,3 +566,7 @@ boot_stack_end:
.balign 4096
pgtable:
.fill BOOT_PGT_SIZE, 1, 0
+#ifdef CONFIG_X86_5LEVEL
+lvl5_pgtable:
+ .fill PAGE_SIZE, 1, 0
+#endif
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 54c24f0a43d3..91f27ab970ef 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -9,16 +9,42 @@
* contain the entire properly aligned running kernel image.
*
*/
+
+/*
+ * isspace() in linux/ctype.h is expected by next_args() to filter
+ * out "space/lf/tab". While boot/ctype.h conflicts with linux/ctype.h,
+ * since isdigit() is implemented in both of them. Hence disable it
+ * here.
+ */
+#define BOOT_CTYPE_H
+
+/*
+ * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h.
+ * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL
+ * which is meaningless and will cause compiling error in some cases.
+ * So do not include linux/export.h and define EXPORT_SYMBOL(sym)
+ * as empty.
+ */
+#define _LINUX_EXPORT_H
+#define EXPORT_SYMBOL(sym)
+
#include "misc.h"
#include "error.h"
-#include "../boot.h"
+#include "../string.h"
#include <generated/compile.h>
#include <linux/module.h>
#include <linux/uts.h>
#include <linux/utsname.h>
+#include <linux/ctype.h>
#include <generated/utsrelease.h>
+/* Macros used by the included decompressor code below. */
+#define STATIC
+#include <linux/decompress/mm.h>
+
+extern unsigned long get_cmd_line_ptr(void);
+
/* Simplified build-specific string for starting entropy. */
static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
@@ -62,6 +88,11 @@ struct mem_vector {
static bool memmap_too_large;
+
+/* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
+unsigned long long mem_limit = ULLONG_MAX;
+
+
enum mem_avoid_index {
MEM_AVOID_ZO_RANGE = 0,
MEM_AVOID_INITRD,
@@ -85,49 +116,14 @@ static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
return true;
}
-/**
- * _memparse - Parse a string with mem suffixes into a number
- * @ptr: Where parse begins
- * @retptr: (output) Optional pointer to next char after parse completes
- *
- * Parses a string into a number. The number stored at @ptr is
- * potentially suffixed with K, M, G, T, P, E.
- */
-static unsigned long long _memparse(const char *ptr, char **retptr)
+char *skip_spaces(const char *str)
{
- char *endptr; /* Local pointer to end of parsed string */
-
- unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
-
- switch (*endptr) {
- case 'E':
- case 'e':
- ret <<= 10;
- case 'P':
- case 'p':
- ret <<= 10;
- case 'T':
- case 't':
- ret <<= 10;
- case 'G':
- case 'g':
- ret <<= 10;
- case 'M':
- case 'm':
- ret <<= 10;
- case 'K':
- case 'k':
- ret <<= 10;
- endptr++;
- default:
- break;
- }
-
- if (retptr)
- *retptr = endptr;
-
- return ret;
+ while (isspace(*str))
+ ++str;
+ return (char *)str;
}
+#include "../../../../lib/ctype.c"
+#include "../../../../lib/cmdline.c"
static int
parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
@@ -142,40 +138,41 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
return -EINVAL;
oldp = p;
- *size = _memparse(p, &p);
+ *size = memparse(p, &p);
if (p == oldp)
return -EINVAL;
switch (*p) {
- case '@':
- /* Skip this region, usable */
- *start = 0;
- *size = 0;
- return 0;
case '#':
case '$':
case '!':
- *start = _memparse(p + 1, &p);
+ *start = memparse(p + 1, &p);
+ return 0;
+ case '@':
+ /* memmap=nn@ss specifies usable region, should be skipped */
+ *size = 0;
+ /* Fall through */
+ default:
+ /*
+ * If w/o offset, only size specified, memmap=nn[KMG] has the
+ * same behaviour as mem=nn[KMG]. It limits the max address
+ * system can use. Region above the limit should be avoided.
+ */
+ *start = 0;
return 0;
}
return -EINVAL;
}
-static void mem_avoid_memmap(void)
+static void mem_avoid_memmap(char *str)
{
- char arg[128];
+ static int i;
int rc;
- int i;
- char *str;
- /* See if we have any memmap areas */
- rc = cmdline_find_option("memmap", arg, sizeof(arg));
- if (rc <= 0)
+ if (i >= MAX_MEMMAP_REGIONS)
return;
- i = 0;
- str = arg;
while (str && (i < MAX_MEMMAP_REGIONS)) {
int rc;
unsigned long long start, size;
@@ -188,9 +185,14 @@ static void mem_avoid_memmap(void)
if (rc < 0)
break;
str = k;
- /* A usable region that should not be skipped */
- if (size == 0)
+
+ if (start == 0) {
+ /* Store the specified memory limit if size > 0 */
+ if (size > 0)
+ mem_limit = size;
+
continue;
+ }
mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start;
mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size;
@@ -202,6 +204,57 @@ static void mem_avoid_memmap(void)
memmap_too_large = true;
}
+static int handle_mem_memmap(void)
+{
+ char *args = (char *)get_cmd_line_ptr();
+ size_t len = strlen((char *)args);
+ char *tmp_cmdline;
+ char *param, *val;
+ u64 mem_size;
+
+ if (!strstr(args, "memmap=") && !strstr(args, "mem="))
+ return 0;
+
+ tmp_cmdline = malloc(len + 1);
+ if (!tmp_cmdline )
+ error("Failed to allocate space for tmp_cmdline");
+
+ memcpy(tmp_cmdline, args, len);
+ tmp_cmdline[len] = 0;
+ args = tmp_cmdline;
+
+ /* Chew leading spaces */
+ args = skip_spaces(args);
+
+ while (*args) {
+ args = next_arg(args, &param, &val);
+ /* Stop at -- */
+ if (!val && strcmp(param, "--") == 0) {
+ warn("Only '--' specified in cmdline");
+ free(tmp_cmdline);
+ return -1;
+ }
+
+ if (!strcmp(param, "memmap")) {
+ mem_avoid_memmap(val);
+ } else if (!strcmp(param, "mem")) {
+ char *p = val;
+
+ if (!strcmp(p, "nopentium"))
+ continue;
+ mem_size = memparse(p, &p);
+ if (mem_size == 0) {
+ free(tmp_cmdline);
+ return -EINVAL;
+ }
+ mem_limit = mem_size;
+ }
+ }
+
+ free(tmp_cmdline);
+ return 0;
+}
+
/*
* In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
* The mem_avoid array is used to store the ranges that need to be avoided
@@ -323,7 +376,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
/* We don't need to set a mapping for setup_data. */
/* Mark the memmap regions we need to avoid */
- mem_avoid_memmap();
+ handle_mem_memmap();
#ifdef CONFIG_X86_VERBOSE_BOOTUP
/* Make sure video RAM can be used. */
@@ -432,7 +485,8 @@ static void process_e820_entry(struct boot_e820_entry *entry,
{
struct mem_vector region, overlap;
struct slot_area slot_area;
- unsigned long start_orig;
+ unsigned long start_orig, end;
+ struct boot_e820_entry cur_entry;
/* Skip non-RAM entries. */
if (entry->type != E820_TYPE_RAM)
@@ -446,8 +500,15 @@ static void process_e820_entry(struct boot_e820_entry *entry,
if (entry->addr + entry->size < minimum)
return;
- region.start = entry->addr;
- region.size = entry->size;
+ /* Ignore entries above memory limit */
+ end = min(entry->size + entry->addr, mem_limit);
+ if (entry->addr >= end)
+ return;
+ cur_entry.addr = entry->addr;
+ cur_entry.size = end - entry->addr;
+
+ region.start = cur_entry.addr;
+ region.size = cur_entry.size;
/* Give up if slot area array is full. */
while (slot_area_index < MAX_SLOT_AREA) {
@@ -461,7 +522,7 @@ static void process_e820_entry(struct boot_e820_entry *entry,
region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
/* Did we raise the address above this e820 region? */
- if (region.start > entry->addr + entry->size)
+ if (region.start > cur_entry.addr + cur_entry.size)
return;
/* Reduce size by any delta from the original address. */
@@ -564,9 +625,6 @@ void choose_random_location(unsigned long input,
{
unsigned long random_addr, min_addr;
- /* By default, keep output position unchanged. */
- *virt_addr = *output;
-
if (cmdline_find_option_bool("nokaslr")) {
warn("KASLR disabled: 'nokaslr' on cmdline.");
return;
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index b3c5a5f030ce..00241c815524 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -338,7 +338,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
unsigned long output_len)
{
const unsigned long kernel_total_size = VO__end - VO__text;
- unsigned long virt_addr = (unsigned long)output;
+ unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
/* Retain x86 boot parameters pointer passed from startup_32/64. */
boot_params = rmode;
@@ -390,6 +390,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
#ifdef CONFIG_X86_64
if (heap > 0x3fffffffffffUL)
error("Destination address too large");
+ if (virt_addr + max(output_len, kernel_total_size) > KERNEL_IMAGE_SIZE)
+ error("Destination virtual address is beyond the kernel mapping area");
#else
if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
error("Destination address too large");
@@ -397,7 +399,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
#ifndef CONFIG_RELOCATABLE
if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
error("Destination address does not match LOAD_PHYSICAL_ADDR");
- if ((unsigned long)output != virt_addr)
+ if (virt_addr != LOAD_PHYSICAL_ADDR)
error("Destination virtual address changed when not relocatable");
#endif
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 1c8355eadbd1..766a5211f827 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -81,8 +81,6 @@ static inline void choose_random_location(unsigned long input,
unsigned long output_size,
unsigned long *virt_addr)
{
- /* No change from existing output location. */
- *virt_addr = *output;
}
#endif
diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
index 56589d0a804b..28029be47fbb 100644
--- a/arch/x86/boot/compressed/pagetable.c
+++ b/arch/x86/boot/compressed/pagetable.c
@@ -63,14 +63,14 @@ static void *alloc_pgt_page(void *context)
static struct alloc_pgt_data pgt_data;
/* The top level page table entry pointer. */
-static unsigned long level4p;
+static unsigned long top_level_pgt;
/*
* Mapping information structure passed to kernel_ident_mapping_init().
* Due to relocation, pointers must be assigned at run time not build time.
*/
static struct x86_mapping_info mapping_info = {
- .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
+ .page_flag = __PAGE_KERNEL_LARGE_EXEC,
};
/* Locates and clears a region for a new top level page table. */
@@ -91,9 +91,15 @@ void initialize_identity_maps(void)
* If we came here via startup_32(), cr3 will be _pgtable already
* and we must append to the existing area instead of entirely
* overwriting it.
+ *
+ * With 5-level paging, we use '_pgtable' to allocate the p4d page table,
+ * the top-level page table is allocated separately.
+ *
+ * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level
+ * cases. On 4-level paging it's equal to 'top_level_pgt'.
*/
- level4p = read_cr3();
- if (level4p == (unsigned long)_pgtable) {
+ top_level_pgt = read_cr3_pa();
+ if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) {
debug_putstr("booted via startup_32()\n");
pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
@@ -103,7 +109,7 @@ void initialize_identity_maps(void)
pgt_data.pgt_buf = _pgtable;
pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
- level4p = (unsigned long)alloc_pgt_page(&pgt_data);
+ top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data);
}
}
@@ -123,7 +129,7 @@ void add_identity_map(unsigned long start, unsigned long size)
return;
/* Build the mapping. */
- kernel_ident_mapping_init(&mapping_info, (pgd_t *)level4p,
+ kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt,
start, end);
}
@@ -134,5 +140,5 @@ void add_identity_map(unsigned long start, unsigned long size)
*/
void finalize_identity_maps(void)
{
- write_cr3(level4p);
+ write_cr3(top_level_pgt);
}
diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S
index 1eb7d298b47d..15d9f74b0008 100644
--- a/arch/x86/boot/copy.S
+++ b/arch/x86/boot/copy.S
@@ -65,23 +65,3 @@ GLOBAL(copy_to_fs)
popw %es
retl
ENDPROC(copy_to_fs)
-
-#if 0 /* Not currently used, but can be enabled as needed */
-GLOBAL(copy_from_gs)
- pushw %ds
- pushw %gs
- popw %ds
- calll memcpy
- popw %ds
- retl
-ENDPROC(copy_from_gs)
-
-GLOBAL(copy_to_gs)
- pushw %es
- pushw %gs
- popw %es
- calll memcpy
- popw %es
- retl
-ENDPROC(copy_to_gs)
-#endif
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 5457b02fc050..630e3664906b 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -122,6 +122,14 @@ unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int bas
return result;
}
+long simple_strtol(const char *cp, char **endp, unsigned int base)
+{
+ if (*cp == '-')
+ return -simple_strtoull(cp + 1, endp, base);
+
+ return simple_strtoull(cp, endp, base);
+}
+
/**
* strlen - Find the length of a string
* @s: The string to be sized
diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
index 113588ddb43f..f274a50db5fa 100644
--- a/arch/x86/boot/string.h
+++ b/arch/x86/boot/string.h
@@ -22,6 +22,7 @@ extern int strcmp(const char *str1, const char *str2);
extern int strncmp(const char *cs, const char *ct, size_t count);
extern size_t strlen(const char *s);
extern char *strstr(const char *s1, const char *s2);
+extern char *strchr(const char *s, int c);
extern size_t strnlen(const char *s, size_t maxlen);
extern unsigned int atou(const char *s);
extern unsigned long long simple_strtoull(const char *cp, char **endp,
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 34b3fa2889d1..9e32d40d71bd 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -2,6 +2,8 @@
# Arch-specific CryptoAPI modules.
#
+OBJECT_FILES_NON_STANDARD := y
+
avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no)
avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
$(comma)4)$(comma)%ymm2,yes,no)
diff --git a/arch/x86/crypto/sha1-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile
index 2f8756375df5..2e14acc3da25 100644
--- a/arch/x86/crypto/sha1-mb/Makefile
+++ b/arch/x86/crypto/sha1-mb/Makefile
@@ -2,6 +2,8 @@
# Arch-specific CryptoAPI modules.
#
+OBJECT_FILES_NON_STANDARD := y
+
avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
$(comma)4)$(comma)%ymm2,yes,no)
ifeq ($(avx2_supported),yes)
diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile
index 41089e7c400c..45b4fca6c4a8 100644
--- a/arch/x86/crypto/sha256-mb/Makefile
+++ b/arch/x86/crypto/sha256-mb/Makefile
@@ -2,6 +2,8 @@
# Arch-specific CryptoAPI modules.
#
+OBJECT_FILES_NON_STANDARD := y
+
avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
$(comma)4)$(comma)%ymm2,yes,no)
ifeq ($(avx2_supported),yes)
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 50bc26949e9e..48ef7bb32c42 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -252,6 +252,23 @@ ENTRY(__switch_to_asm)
END(__switch_to_asm)
/*
+ * The unwinder expects the last frame on the stack to always be at the same
+ * offset from the end of the page, which allows it to validate the stack.
+ * Calling schedule_tail() directly would break that convention because its an
+ * asmlinkage function so its argument has to be pushed on the stack. This
+ * wrapper creates a proper "end of stack" frame header before the call.
+ */
+ENTRY(schedule_tail_wrapper)
+ FRAME_BEGIN
+
+ pushl %eax
+ call schedule_tail
+ popl %eax
+
+ FRAME_END
+ ret
+ENDPROC(schedule_tail_wrapper)
+/*
* A newly forked process directly context switches into this address.
*
* eax: prev task we switched from
@@ -259,24 +276,15 @@ END(__switch_to_asm)
* edi: kernel thread arg
*/
ENTRY(ret_from_fork)
- FRAME_BEGIN /* help unwinder find end of stack */
-
- /*
- * schedule_tail() is asmlinkage so we have to put its 'prev' argument
- * on the stack.
- */
- pushl %eax
- call schedule_tail
- popl %eax
+ call schedule_tail_wrapper
testl %ebx, %ebx
jnz 1f /* kernel threads are uncommon */
2:
/* When we fork, we trace the syscall return in the child, too. */
- leal FRAME_OFFSET(%esp), %eax
+ movl %esp, %eax
call syscall_return_slowpath
- FRAME_END
jmp restore_all
/* kernel thread */
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 607d72c4a485..a9a8027a6c0e 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -36,7 +36,6 @@
#include <asm/smap.h>
#include <asm/pgtable_types.h>
#include <asm/export.h>
-#include <asm/frame.h>
#include <linux/err.h>
.code64
@@ -266,7 +265,8 @@ return_from_SYSCALL_64:
* If width of "canonical tail" ever becomes variable, this will need
* to be updated to remain correct on both old and new CPUs.
*
- * Change top 16 bits to be the sign-extension of 47th bit
+ * Change top bits to match most significant bit (47th or 56th bit
+ * depending on paging mode) in the address.
*/
shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
@@ -406,19 +406,17 @@ END(__switch_to_asm)
* r12: kernel thread arg
*/
ENTRY(ret_from_fork)
- FRAME_BEGIN /* help unwinder find end of stack */
movq %rax, %rdi
- call schedule_tail /* rdi: 'prev' task parameter */
+ call schedule_tail /* rdi: 'prev' task parameter */
- testq %rbx, %rbx /* from kernel_thread? */
- jnz 1f /* kernel threads are uncommon */
+ testq %rbx, %rbx /* from kernel_thread? */
+ jnz 1f /* kernel threads are uncommon */
2:
- leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */
+ movq %rsp, %rdi
call syscall_return_slowpath /* returns with IRQs disabled */
TRACE_IRQS_ON /* user mode is traced as IRQS on */
SWAPGS
- FRAME_END
jmp restore_regs_and_iret
1:
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 580b60f5ac83..2de0dd73830a 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1750,6 +1750,8 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
return ret;
}
+static struct attribute_group x86_pmu_attr_group;
+
static int __init init_hw_perf_events(void)
{
struct x86_pmu_quirk *quirk;
@@ -1813,6 +1815,14 @@ static int __init init_hw_perf_events(void)
x86_pmu_events_group.attrs = tmp;
}
+ if (x86_pmu.attrs) {
+ struct attribute **tmp;
+
+ tmp = merge_attr(x86_pmu_attr_group.attrs, x86_pmu.attrs);
+ if (!WARN_ON(!tmp))
+ x86_pmu_attr_group.attrs = tmp;
+ }
+
pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
pr_info("... generic registers: %d\n", x86_pmu.num_counters);
@@ -2101,8 +2111,7 @@ static int x86_pmu_event_init(struct perf_event *event)
static void refresh_pce(void *ignored)
{
- if (current->active_mm)
- load_mm_cr4(current->active_mm);
+ load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
}
static void x86_pmu_event_mapped(struct perf_event *event)
@@ -2255,7 +2264,7 @@ static struct pmu pmu = {
void arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg, u64 now)
{
- struct cyc2ns_data *data;
+ struct cyc2ns_data data;
u64 offset;
userpg->cap_user_time = 0;
@@ -2267,17 +2276,17 @@ void arch_perf_update_userpage(struct perf_event *event,
if (!using_native_sched_clock() || !sched_clock_stable())
return;
- data = cyc2ns_read_begin();
+ cyc2ns_read_begin(&data);
- offset = data->cyc2ns_offset + __sched_clock_offset;
+ offset = data.cyc2ns_offset + __sched_clock_offset;
/*
* Internal timekeeping for enabled/running/stopped times
* is always in the local_clock domain.
*/
userpg->cap_user_time = 1;
- userpg->time_mult = data->cyc2ns_mul;
- userpg->time_shift = data->cyc2ns_shift;
+ userpg->time_mult = data.cyc2ns_mul;
+ userpg->time_shift = data.cyc2ns_shift;
userpg->time_offset = offset - now;
/*
@@ -2289,7 +2298,7 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->time_zero = offset;
}
- cyc2ns_read_end(data);
+ cyc2ns_read_end();
}
void
@@ -2334,7 +2343,7 @@ static unsigned long get_segment_base(unsigned int segment)
/* IRQs are off, so this synchronizes with smp_store_release */
ldt = lockless_dereference(current->active_mm->context.ldt);
- if (!ldt || idx > ldt->size)
+ if (!ldt || idx > ldt->nr_entries)
return 0;
desc = &ldt->entries[idx];
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index a6d91d4e37a1..31acf2a98394 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
- [ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
+ [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
- [ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */
+ [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0,
@@ -3160,6 +3160,19 @@ err:
return -ENOMEM;
}
+static void flip_smm_bit(void *data)
+{
+ unsigned long set = *(unsigned long *)data;
+
+ if (set > 0) {
+ msr_set_bit(MSR_IA32_DEBUGCTLMSR,
+ DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
+ } else {
+ msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
+ DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
+ }
+}
+
static void intel_pmu_cpu_starting(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
@@ -3174,6 +3187,8 @@ static void intel_pmu_cpu_starting(int cpu)
cpuc->lbr_sel = NULL;
+ flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
+
if (!cpuc->shared_regs)
return;
@@ -3595,6 +3610,52 @@ static struct attribute *hsw_events_attrs[] = {
NULL
};
+static ssize_t freeze_on_smi_show(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
+}
+
+static DEFINE_MUTEX(freeze_on_smi_mutex);
+
+static ssize_t freeze_on_smi_store(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val > 1)
+ return -EINVAL;
+
+ mutex_lock(&freeze_on_smi_mutex);
+
+ if (x86_pmu.attr_freeze_on_smi == val)
+ goto done;
+
+ x86_pmu.attr_freeze_on_smi = val;
+
+ get_online_cpus();
+ on_each_cpu(flip_smm_bit, &val, 1);
+ put_online_cpus();
+done:
+ mutex_unlock(&freeze_on_smi_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(freeze_on_smi);
+
+static struct attribute *intel_pmu_attrs[] = {
+ &dev_attr_freeze_on_smi.attr,
+ NULL,
+};
+
__init int intel_pmu_init(void)
{
union cpuid10_edx edx;
@@ -3641,6 +3702,8 @@ __init int intel_pmu_init(void)
x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
+
+ x86_pmu.attrs = intel_pmu_attrs;
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events, when not running in a hypervisor:
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index f924629836a8..eb261656a320 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -18,7 +18,7 @@ enum {
LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
};
-static enum {
+static const enum {
LBR_EIP_FLAGS = 1,
LBR_TSX = 2,
} lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
@@ -287,7 +287,7 @@ inline u64 lbr_from_signext_quirk_wr(u64 val)
/*
* If quirk is needed, ensure sign extension is 61 bits:
*/
-u64 lbr_from_signext_quirk_rd(u64 val)
+static u64 lbr_from_signext_quirk_rd(u64 val)
{
if (static_branch_unlikely(&lbr_from_quirk_key)) {
/*
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 9d05c7e67f60..a45e2114a846 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -761,7 +761,7 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
- X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsw_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 758c1aa5009d..44ec523287f6 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1170,7 +1170,7 @@ static int uncore_event_cpu_online(unsigned int cpu)
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
- if (!box && atomic_inc_return(&box->refcnt) == 1)
+ if (box && atomic_inc_return(&box->refcnt) == 1)
uncore_box_init(box);
}
}
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index be3d36254040..53728eea1bed 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -562,6 +562,9 @@ struct x86_pmu {
ssize_t (*events_sysfs_show)(char *page, u64 config);
struct attribute **cpu_events;
+ unsigned long attr_freeze_on_smi;
+ struct attribute **attrs;
+
/*
* CPU Hotplug hooks
*/
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 7acb51c49fec..7a9df3beb89b 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -32,6 +32,7 @@
#define _ASM_ADD __ASM_SIZE(add)
#define _ASM_SUB __ASM_SIZE(sub)
#define _ASM_XADD __ASM_SIZE(xadd)
+#define _ASM_MUL __ASM_SIZE(mul)
#define _ASM_AX __ASM_REG(ax)
#define _ASM_BX __ASM_REG(bx)
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index caa5798c92f4..33380b871463 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -246,19 +246,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
-/**
- * atomic_inc_short - increment of a short integer
- * @v: pointer to type int
- *
- * Atomically adds 1 to @v
- * Returns the new value of @u
- */
-static __always_inline short int atomic_inc_short(short int *v)
-{
- asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
- return *v;
-}
-
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 2f77bcefe6b4..d2ff779f347e 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -74,7 +74,7 @@ struct efi_scratch {
__kernel_fpu_begin(); \
\
if (efi_scratch.use_pgd) { \
- efi_scratch.prev_cr3 = read_cr3(); \
+ efi_scratch.prev_cr3 = __read_cr3(); \
write_cr3((unsigned long)efi_scratch.efi_pgt); \
__flush_tlb_all(); \
} \
diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h
index b8ad261d11dc..c66d19e3c23e 100644
--- a/arch/x86/include/asm/extable.h
+++ b/arch/x86/include/asm/extable.h
@@ -29,6 +29,7 @@ struct pt_regs;
} while (0)
extern int fixup_exception(struct pt_regs *regs, int trapnr);
+extern int fixup_bug(struct pt_regs *regs, int trapnr);
extern bool ex_has_fault_handler(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 59405a248fc2..9b76cd331990 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -22,8 +22,8 @@ typedef struct {
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
- unsigned int irq_tlb_count;
#endif
+ unsigned int irq_tlb_count;
#ifdef CONFIG_X86_THERMAL_VECTOR
unsigned int irq_thermal_count;
#endif
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index 737da62bfeb0..474eb8c66fee 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -4,8 +4,9 @@
struct x86_mapping_info {
void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
void *context; /* context for alloc_pgt_page */
- unsigned long pmd_flag; /* page flag for PMD entry */
+ unsigned long page_flag; /* page flag for PMD or PUD entry */
unsigned long offset; /* ident mapping offset */
+ bool direct_gbpages; /* PUD level 1GB page support */
};
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 055962615779..722d0e568863 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */
+ bool tf; /* TF value before instruction (after for syscall/sysret) */
bool have_exception;
struct x86_exception exception;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f5bddf92faba..695605eb1dfb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -43,7 +43,7 @@
#define KVM_PRIVATE_MEM_SLOTS 3
#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
-#define KVM_HALT_POLL_NS_DEFAULT 400000
+#define KVM_HALT_POLL_NS_DEFAULT 200000
#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
@@ -1020,6 +1020,8 @@ struct kvm_x86_ops {
void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t offset, unsigned long mask);
+ int (*write_log_dirty)(struct kvm_vcpu *vcpu);
+
/* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops;
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 4fd5195deed0..3f9a3d2a5209 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -266,6 +266,7 @@ static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *s
#endif
int mce_available(struct cpuinfo_x86 *c);
+bool mce_is_memory_error(struct mce *m);
DECLARE_PER_CPU(unsigned, mce_exception_count);
DECLARE_PER_CPU(unsigned, mce_poll_count);
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index f9813b6d8b80..79b647a7ebd0 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -37,12 +37,6 @@ typedef struct {
#endif
} mm_context_t;
-#ifdef CONFIG_SMP
void leave_mm(int cpu);
-#else
-static inline void leave_mm(int cpu)
-{
-}
-#endif
#endif /* _ASM_X86_MMU_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 68b329d77b3a..ecfcb6643c9b 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -47,7 +47,7 @@ struct ldt_struct {
* allocations, but it's not worth trying to optimize.
*/
struct desc_struct *entries;
- unsigned int size;
+ unsigned int nr_entries;
};
/*
@@ -87,22 +87,46 @@ static inline void load_mm_ldt(struct mm_struct *mm)
*/
if (unlikely(ldt))
- set_ldt(ldt->entries, ldt->size);
+ set_ldt(ldt->entries, ldt->nr_entries);
else
clear_LDT();
#else
clear_LDT();
#endif
+}
+
+static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
+{
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ /*
+ * Load the LDT if either the old or new mm had an LDT.
+ *
+ * An mm will never go from having an LDT to not having an LDT. Two
+ * mms never share an LDT, so we don't gain anything by checking to
+ * see whether the LDT changed. There's also no guarantee that
+ * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
+ * then prev->context.ldt will also be non-NULL.
+ *
+ * If we really cared, we could optimize the case where prev == next
+ * and we're exiting lazy mode. Most of the time, if this happens,
+ * we don't actually need to reload LDTR, but modify_ldt() is mostly
+ * used by legacy code and emulators where we don't need this level of
+ * performance.
+ *
+ * This uses | instead of || because it generates better code.
+ */
+ if (unlikely((unsigned long)prev->context.ldt |
+ (unsigned long)next->context.ldt))
+ load_mm_ldt(next);
+#endif
DEBUG_LOCKS_WARN_ON(preemptible());
}
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
-#ifdef CONFIG_SMP
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
-#endif
}
static inline int init_new_context(struct task_struct *tsk,
@@ -220,18 +244,6 @@ static inline int vma_pkey(struct vm_area_struct *vma)
}
#endif
-static inline bool __pkru_allows_pkey(u16 pkey, bool write)
-{
- u32 pkru = read_pkru();
-
- if (!__pkru_allows_read(pkru, pkey))
- return false;
- if (write && !__pkru_allows_write(pkru, pkey))
- return false;
-
- return true;
-}
-
/*
* We only want to enforce protection keys on the current process
* because we effectively have no access to PKRU for other
@@ -268,4 +280,23 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
return __pkru_allows_pkey(vma_pkey(vma), write);
}
+
+/*
+ * This can be used from process context to figure out what the value of
+ * CR3 is without needing to do a (slow) __read_cr3().
+ *
+ * It's intended to be used for code like KVM that sneakily changes CR3
+ * and needs to restore it. It needs to be used very carefully.
+ */
+static inline unsigned long __get_current_cr3_fast(void)
+{
+ unsigned long cr3 = __pa(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd);
+
+ /* For now, be very restrictive about when this can be called. */
+ VM_WARN_ON(in_nmi() || !in_atomic());
+
+ VM_BUG_ON(cr3 != __read_cr3());
+ return cr3;
+}
+
#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index fba100713924..d5acc27ed1cc 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -2,8 +2,7 @@
#define _ASM_X86_MSHYPER_H
#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/clocksource.h>
+#include <linux/atomic.h>
#include <asm/hyperv.h>
/*
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 673f9ac50f6d..18b162322eff 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -137,6 +137,8 @@
#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
+#define DEBUGCTLMSR_FREEZE_IN_SMM_BIT 14
+#define DEBUGCTLMSR_FREEZE_IN_SMM (1UL << DEBUGCTLMSR_FREEZE_IN_SMM_BIT)
#define MSR_PEBS_FRONTEND 0x000003f7
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 55fa56fe4e45..9ccac1926587 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -61,7 +61,7 @@ static inline void write_cr2(unsigned long x)
PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
}
-static inline unsigned long read_cr3(void)
+static inline unsigned long __read_cr3(void)
{
return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
}
@@ -118,7 +118,7 @@ static inline u64 paravirt_read_msr(unsigned msr)
static inline void paravirt_write_msr(unsigned msr,
unsigned low, unsigned high)
{
- return PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
+ PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
}
static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
@@ -312,11 +312,9 @@ static inline void __flush_tlb_single(unsigned long addr)
}
static inline void flush_tlb_others(const struct cpumask *cpumask,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+ const struct flush_tlb_info *info)
{
- PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
+ PVOP_VCALL2(pv_mmu_ops.flush_tlb_others, cpumask, info);
}
static inline int paravirt_pgd_alloc(struct mm_struct *mm)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 7465d6fe336f..cb976bab6299 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -51,6 +51,7 @@ struct mm_struct;
struct desc_struct;
struct task_struct;
struct cpumask;
+struct flush_tlb_info;
/*
* Wrapper type for pointers to code which uses the non-standard
@@ -223,9 +224,7 @@ struct pv_mmu_ops {
void (*flush_tlb_kernel)(void);
void (*flush_tlb_single)(unsigned long addr);
void (*flush_tlb_others)(const struct cpumask *cpus,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end);
+ const struct flush_tlb_info *info);
/* Hooks for allocating and freeing a pagetable top-level */
int (*pgd_alloc)(struct mm_struct *mm);
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 50d35e3185f5..c8821bab938f 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -212,4 +212,51 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
+#define gup_get_pte gup_get_pte
+/*
+ * WARNING: only to be used in the get_user_pages_fast() implementation.
+ *
+ * With get_user_pages_fast(), we walk down the pagetables without taking
+ * any locks. For this we would like to load the pointers atomically,
+ * but that is not possible (without expensive cmpxchg8b) on PAE. What
+ * we do have is the guarantee that a PTE will only either go from not
+ * present to present, or present to not present or both -- it will not
+ * switch to a completely different present page without a TLB flush in
+ * between; something that we are blocking by holding interrupts off.
+ *
+ * Setting ptes from not present to present goes:
+ *
+ * ptep->pte_high = h;
+ * smp_wmb();
+ * ptep->pte_low = l;
+ *
+ * And present to not present goes:
+ *
+ * ptep->pte_low = 0;
+ * smp_wmb();
+ * ptep->pte_high = 0;
+ *
+ * We must ensure here that the load of pte_low sees 'l' iff pte_high
+ * sees 'h'. We load pte_high *after* loading pte_low, which ensures we
+ * don't see an older value of pte_high. *Then* we recheck pte_low,
+ * which ensures that we haven't picked up a changed pte high. We might
+ * have gotten rubbish values from pte_low and pte_high, but we are
+ * guaranteed that pte_low will not have the present bit set *unless*
+ * it is 'l'. Because get_user_pages_fast() only operates on present ptes
+ * we're safe.
+ */
+static inline pte_t gup_get_pte(pte_t *ptep)
+{
+ pte_t pte;
+
+ do {
+ pte.pte_low = ptep->pte_low;
+ smp_rmb();
+ pte.pte_high = ptep->pte_high;
+ smp_rmb();
+ } while (unlikely(pte.pte_low != ptep->pte_low));
+
+ return pte;
+}
+
#endif /* _ASM_X86_PGTABLE_3LEVEL_H */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index f5af95a0c6b8..77037b6f1caa 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -244,6 +244,11 @@ static inline int pud_devmap(pud_t pud)
return 0;
}
#endif
+
+static inline int pgd_devmap(pgd_t pgd)
+{
+ return 0;
+}
#endif
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -917,7 +922,7 @@ extern pgd_t trampoline_pgd_entry;
static inline void __meminit init_trampoline_default(void)
{
/* Default trampoline pgd value */
- trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)];
+ trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
}
# ifdef CONFIG_RANDOMIZE_MEMORY
void __meminit init_trampoline(void);
@@ -1185,6 +1190,54 @@ static inline u16 pte_flags_pkey(unsigned long pte_flags)
#endif
}
+static inline bool __pkru_allows_pkey(u16 pkey, bool write)
+{
+ u32 pkru = read_pkru();
+
+ if (!__pkru_allows_read(pkru, pkey))
+ return false;
+ if (write && !__pkru_allows_write(pkru, pkey))
+ return false;
+
+ return true;
+}
+
+/*
+ * 'pteval' can come from a PTE, PMD or PUD. We only check
+ * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
+ * same value on all 3 types.
+ */
+static inline bool __pte_access_permitted(unsigned long pteval, bool write)
+{
+ unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
+
+ if (write)
+ need_pte_bits |= _PAGE_RW;
+
+ if ((pteval & need_pte_bits) != need_pte_bits)
+ return 0;
+
+ return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
+}
+
+#define pte_access_permitted pte_access_permitted
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ return __pte_access_permitted(pte_val(pte), write);
+}
+
+#define pmd_access_permitted pmd_access_permitted
+static inline bool pmd_access_permitted(pmd_t pmd, bool write)
+{
+ return __pte_access_permitted(pmd_val(pmd), write);
+}
+
+#define pud_access_permitted pud_access_permitted
+static inline bool pud_access_permitted(pud_t pud, bool write)
+{
+ return __pte_access_permitted(pud_val(pud), write);
+}
+
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 9991224f6238..2160c1fee920 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -14,15 +14,17 @@
#include <linux/bitops.h>
#include <linux/threads.h>
+extern p4d_t level4_kernel_pgt[512];
+extern p4d_t level4_ident_pgt[512];
extern pud_t level3_kernel_pgt[512];
extern pud_t level3_ident_pgt[512];
extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512];
extern pmd_t level2_ident_pgt[512];
extern pte_t level1_fixmap_pgt[512];
-extern pgd_t init_level4_pgt[];
+extern pgd_t init_top_pgt[];
-#define swapper_pg_dir init_level4_pgt
+#define swapper_pg_dir init_top_pgt
extern void paging_init(void);
@@ -227,6 +229,20 @@ extern void cleanup_highmap(void);
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
-#endif /* !__ASSEMBLY__ */
+#define gup_fast_permitted gup_fast_permitted
+static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
+ int write)
+{
+ unsigned long len, end;
+
+ len = (unsigned long)nr_pages << PAGE_SHIFT;
+ end = start + len;
+ if (end < start)
+ return false;
+ if (end >> __VIRTUAL_MASK_SHIFT)
+ return false;
+ return true;
+}
+#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index d5a22bac9988..0ff8fe71b255 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -98,7 +98,7 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
if (bytes < 8) {
if (!IS_ALIGNED(dest, 4) || (bytes != 4))
- arch_wb_cache_pmem(addr, 1);
+ arch_wb_cache_pmem(addr, bytes);
} else {
if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 39fb618e2211..79aa2f98398d 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -8,4 +8,40 @@
#else
#define X86_VM_MASK 0 /* No VM86 support */
#endif
+
+/*
+ * CR3's layout varies depending on several things.
+ *
+ * If CR4.PCIDE is set (64-bit only), then CR3[11:0] is the address space ID.
+ * If PAE is enabled, then CR3[11:5] is part of the PDPT address
+ * (i.e. it's 32-byte aligned, not page-aligned) and CR3[4:0] is ignored.
+ * Otherwise (non-PAE, non-PCID), CR3[3] is PWT, CR3[4] is PCD, and
+ * CR3[2:0] and CR3[11:5] are ignored.
+ *
+ * In all cases, Linux puts zeros in the low ignored bits and in PWT and PCD.
+ *
+ * CR3[63] is always read as zero. If CR4.PCIDE is set, then CR3[63] may be
+ * written as 1 to prevent the write to CR3 from flushing the TLB.
+ *
+ * On systems with SME, one bit (in a variable position!) is stolen to indicate
+ * that the top-level paging structure is encrypted.
+ *
+ * All of the remaining bits indicate the physical address of the top-level
+ * paging structure.
+ *
+ * CR3_ADDR_MASK is the mask used by read_cr3_pa().
+ */
+#ifdef CONFIG_X86_64
+/* Mask off the address space ID bits. */
+#define CR3_ADDR_MASK 0x7FFFFFFFFFFFF000ull
+#define CR3_PCID_MASK 0xFFFull
+#else
+/*
+ * CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save
+ * a tiny bit of code size by setting all the bits.
+ */
+#define CR3_ADDR_MASK 0xFFFFFFFFull
+#define CR3_PCID_MASK 0ull
+#endif
+
#endif /* _ASM_X86_PROCESSOR_FLAGS_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 3cada998a402..2e1696294af5 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -231,6 +231,14 @@ native_cpuid_reg(ebx)
native_cpuid_reg(ecx)
native_cpuid_reg(edx)
+/*
+ * Friendlier CR3 helpers.
+ */
+static inline unsigned long read_cr3_pa(void)
+{
+ return __read_cr3() & CR3_ADDR_MASK;
+}
+
static inline void load_cr3(pgd_t *pgdir)
{
write_cr3(__pa(pgdir));
@@ -860,8 +868,6 @@ extern unsigned long KSTK_ESP(struct task_struct *task);
#endif /* CONFIG_X86_64 */
-extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
unsigned long new_sp);
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 12af3e35edfa..9efaabf5b54b 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -39,7 +39,7 @@ static inline void native_write_cr2(unsigned long val)
asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
}
-static inline unsigned long native_read_cr3(void)
+static inline unsigned long __native_read_cr3(void)
{
unsigned long val;
asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
@@ -159,9 +159,13 @@ static inline void write_cr2(unsigned long x)
native_write_cr2(x);
}
-static inline unsigned long read_cr3(void)
+/*
+ * Careful! CR3 contains more than just an address. You probably want
+ * read_cr3_pa() instead.
+ */
+static inline unsigned long __read_cr3(void)
{
- return native_read_cr3();
+ return __native_read_cr3();
}
static inline void write_cr3(unsigned long x)
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 27e9f9d769b8..2016962103df 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -29,11 +29,9 @@ struct cyc2ns_data {
u32 cyc2ns_mul;
u32 cyc2ns_shift;
u64 cyc2ns_offset;
- u32 __count;
- /* u32 hole */
-}; /* 24 bytes -- do not grow */
+}; /* 16 bytes */
-extern struct cyc2ns_data *cyc2ns_read_begin(void);
-extern void cyc2ns_read_end(struct cyc2ns_data *);
+extern void cyc2ns_read_begin(struct cyc2ns_data *);
+extern void cyc2ns_read_end(void);
#endif /* _ASM_X86_TIMER_H */
diff --git a/arch/x86/include/asm/tlbbatch.h b/arch/x86/include/asm/tlbbatch.h
new file mode 100644
index 000000000000..f4a6ff352a0e
--- /dev/null
+++ b/arch/x86/include/asm/tlbbatch.h
@@ -0,0 +1,14 @@
+#ifndef _ARCH_X86_TLBBATCH_H
+#define _ARCH_X86_TLBBATCH_H
+
+#include <linux/cpumask.h>
+
+struct arch_tlbflush_unmap_batch {
+ /*
+ * Each bit set is a CPU that potentially has a TLB entry for one of
+ * the PFNs being flushed..
+ */
+ struct cpumask cpumask;
+};
+
+#endif /* _ARCH_X86_TLBBATCH_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 6ed9ea469b48..50ea3482e1d1 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -7,6 +7,7 @@
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
+#include <asm/smp.h>
static inline void __invpcid(unsigned long pcid, unsigned long addr,
unsigned long type)
@@ -65,10 +66,14 @@ static inline void invpcid_flush_all_nonglobals(void)
#endif
struct tlb_state {
-#ifdef CONFIG_SMP
- struct mm_struct *active_mm;
+ /*
+ * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
+ * are on. This means that it may not match current->active_mm,
+ * which will contain the previous user mm when we're in lazy TLB
+ * mode even if we've already switched back to swapper_pg_dir.
+ */
+ struct mm_struct *loaded_mm;
int state;
-#endif
/*
* Access to this CR4 shadow and to H/W CR4 is protected by
@@ -151,7 +156,7 @@ static inline void __native_flush_tlb(void)
* back:
*/
preempt_disable();
- native_write_cr3(native_read_cr3());
+ native_write_cr3(__native_read_cr3());
preempt_enable();
}
@@ -220,84 +225,16 @@ static inline void __flush_tlb_one(unsigned long addr)
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
+ * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
*
* ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up.
*/
-
-#ifndef CONFIG_SMP
-
-/* "_up" is for UniProcessor.
- *
- * This is a helper for other header functions. *Not* intended to be called
- * directly. All global TLB flushes need to either call this, or to bump the
- * vm statistics themselves.
- */
-static inline void __flush_tlb_up(void)
-{
- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
- __flush_tlb();
-}
-
-static inline void flush_tlb_all(void)
-{
- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
- __flush_tlb_all();
-}
-
-static inline void local_flush_tlb(void)
-{
- __flush_tlb_up();
-}
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
- if (mm == current->active_mm)
- __flush_tlb_up();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- if (vma->vm_mm == current->active_mm)
- __flush_tlb_one(addr);
-}
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- if (vma->vm_mm == current->active_mm)
- __flush_tlb_up();
-}
-
-static inline void flush_tlb_mm_range(struct mm_struct *mm,
- unsigned long start, unsigned long end, unsigned long vmflag)
-{
- if (mm == current->active_mm)
- __flush_tlb_up();
-}
-
-static inline void native_flush_tlb_others(const struct cpumask *cpumask,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
-{
-}
-
-static inline void reset_lazy_tlbstate(void)
-{
-}
-
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
-{
- flush_tlb_all();
-}
-
-#else /* SMP */
-
-#include <asm/smp.h>
+struct flush_tlb_info {
+ struct mm_struct *mm;
+ unsigned long start;
+ unsigned long end;
+};
#define local_flush_tlb() __flush_tlb()
@@ -307,29 +244,32 @@ static inline void flush_tlb_kernel_range(unsigned long start,
flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
extern void flush_tlb_all(void);
-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
+{
+ flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
+}
+
void native_flush_tlb_others(const struct cpumask *cpumask,
- struct mm_struct *mm,
- unsigned long start, unsigned long end);
+ const struct flush_tlb_info *info);
#define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2
-static inline void reset_lazy_tlbstate(void)
+static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
+ struct mm_struct *mm)
{
- this_cpu_write(cpu_tlbstate.state, 0);
- this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
+ cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
}
-#endif /* SMP */
+extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
#ifndef CONFIG_PARAVIRT
-#define flush_tlb_others(mask, mm, start, end) \
- native_flush_tlb_others(mask, mm, start, end)
+#define flush_tlb_others(mask, info) \
+ native_flush_tlb_others(mask, info)
#endif
#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 68766b276d9e..a059aac9e937 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -319,10 +319,10 @@ do { \
#define __get_user_asm_u64(x, ptr, retval, errret) \
({ \
__typeof__(ptr) __ptr = (ptr); \
- asm volatile(ASM_STAC "\n" \
+ asm volatile("\n" \
"1: movl %2,%%eax\n" \
"2: movl %3,%%edx\n" \
- "3: " ASM_CLAC "\n" \
+ "3:\n" \
".section .fixup,\"ax\"\n" \
"4: mov %4,%0\n" \
" xorl %%eax,%%eax\n" \
@@ -331,7 +331,7 @@ do { \
".previous\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
- : "=r" (retval), "=A"(x) \
+ : "=r" (retval), "=&A"(x) \
: "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
"i" (errret), "0" (retval)); \
})
@@ -703,14 +703,15 @@ extern struct movsl_mask {
#define unsafe_put_user(x, ptr, err_label) \
do { \
int __pu_err; \
- __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
if (unlikely(__pu_err)) goto err_label; \
} while (0)
#define unsafe_get_user(x, ptr, err_label) \
do { \
int __gu_err; \
- unsigned long __gu_val; \
+ __inttype(*(ptr)) __gu_val; \
__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
if (unlikely(__gu_err)) goto err_label; \
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 6686820feae9..b5a32231abd8 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_UV_UV_H
#define _ASM_X86_UV_UV_H
+#include <asm/tlbflush.h>
+
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
struct cpumask;
@@ -15,10 +17,7 @@ extern void uv_cpu_init(void);
extern void uv_nmi_init(void);
extern void uv_system_init(void);
extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- unsigned int cpu);
+ const struct flush_tlb_info *info);
#else /* X86_UV */
@@ -28,8 +27,8 @@ static inline int is_uv_hubless(void) { return 0; }
static inline void uv_cpu_init(void) { }
static inline void uv_system_init(void) { }
static inline const struct cpumask *
-uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
- unsigned long start, unsigned long end, unsigned int cpu)
+uv_flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
{ return cpumask; }
#endif /* X86_UV */
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
index 3dec769cadf7..83b6e9a0dce4 100644
--- a/arch/x86/include/uapi/asm/Kbuild
+++ b/arch/x86/include/uapi/asm/Kbuild
@@ -4,62 +4,3 @@ include include/uapi/asm-generic/Kbuild.asm
genhdr-y += unistd_32.h
genhdr-y += unistd_64.h
genhdr-y += unistd_x32.h
-header-y += a.out.h
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += boot.h
-header-y += bootparam.h
-header-y += byteorder.h
-header-y += debugreg.h
-header-y += e820.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += hw_breakpoint.h
-header-y += hyperv.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += ist.h
-header-y += kvm.h
-header-y += kvm_para.h
-header-y += kvm_perf.h
-header-y += ldt.h
-header-y += mce.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += msr-index.h
-header-y += msr.h
-header-y += mtrr.h
-header-y += param.h
-header-y += perf_regs.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += posix_types_32.h
-header-y += posix_types_64.h
-header-y += posix_types_x32.h
-header-y += prctl.h
-header-y += processor-flags.h
-header-y += ptrace-abi.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += sigcontext32.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += svm.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += ucontext.h
-header-y += unistd.h
-header-y += vm86.h
-header-y += vmx.h
-header-y += vsyscall.h
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 432df4b1baec..f4fef5a24ebd 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -34,16 +34,10 @@
#define HV_X64_MSR_REFERENCE_TSC 0x40000021
/*
- * There is a single feature flag that signifies the presence of the MSR
- * that can be used to retrieve both the local APIC Timer frequency as
- * well as the TSC frequency.
+ * There is a single feature flag that signifies if the partition has access
+ * to MSRs with local APIC and TSC frequencies.
*/
-
-/* Local APIC timer frequency MSR (HV_X64_MSR_APIC_FREQUENCY) is available */
-#define HV_X64_MSR_APIC_FREQUENCY_AVAILABLE (1 << 11)
-
-/* TSC frequency MSR (HV_X64_MSR_TSC_FREQUENCY) is available */
-#define HV_X64_MSR_TSC_FREQUENCY_AVAILABLE (1 << 11)
+#define HV_X64_ACCESS_FREQUENCY_MSRS (1 << 11)
/*
* Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM
@@ -73,6 +67,9 @@
*/
#define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8)
+/* Frequency MSRs available */
+#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE (1 << 8)
+
/* Crash MSR available */
#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE (1 << 10)
diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h
index 567de50a4c2a..185f3d10c194 100644
--- a/arch/x86/include/uapi/asm/processor-flags.h
+++ b/arch/x86/include/uapi/asm/processor-flags.h
@@ -104,6 +104,8 @@
#define X86_CR4_OSFXSR _BITUL(X86_CR4_OSFXSR_BIT)
#define X86_CR4_OSXMMEXCPT_BIT 10 /* enable unmasked SSE exceptions */
#define X86_CR4_OSXMMEXCPT _BITUL(X86_CR4_OSXMMEXCPT_BIT)
+#define X86_CR4_LA57_BIT 12 /* enable 5-level page tables */
+#define X86_CR4_LA57 _BITUL(X86_CR4_LA57_BIT)
#define X86_CR4_VMXE_BIT 13 /* enable VMX virtualization */
#define X86_CR4_VMXE _BITUL(X86_CR4_VMXE_BIT)
#define X86_CR4_SMXE_BIT 14 /* enable safer mode (TXT) */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 4b994232cb57..a01892bdd61a 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -18,6 +18,7 @@ CFLAGS_REMOVE_pvclock.o = -pg
CFLAGS_REMOVE_kvmclock.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
+CFLAGS_REMOVE_head64.o = -pg
endif
KASAN_SANITIZE_head$(BITS).o := n
@@ -29,6 +30,7 @@ OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_test_nx.o := y
+OBJECT_FILES_NON_STANDARD_paravirt_patch_$(BITS).o := y
# If instrumentation of this dir is enabled, boot hangs during first second.
# Probably could be more selective here, but note that files related to irqs,
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index 26b78d86f25a..85a9e17e0dbc 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -1,3 +1,5 @@
+OBJECT_FILES_NON_STANDARD_wakeup_$(BITS).o := y
+
obj-$(CONFIG_ACPI) += boot.o
obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o
obj-$(CONFIG_ACPI_APEI) += apei.o
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c5b8f760473c..32e14d137416 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -409,8 +409,13 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
memcpy(insnbuf, replacement, a->replacementlen);
insnbuf_sz = a->replacementlen;
- /* 0xe8 is a relative jump; fix the offset. */
- if (*insnbuf == 0xe8 && a->replacementlen == 5) {
+ /*
+ * 0xe8 is a relative jump; fix the offset.
+ *
+ * Instruction length is checked before the opcode to avoid
+ * accessing uninitialized bytes for zero-length replacements.
+ */
+ if (a->replacementlen == 5 && *insnbuf == 0xe8) {
*(s32 *)(insnbuf + 1) += replacement - instr;
DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
*(s32 *)(insnbuf + 1),
diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c
index ae50d3454d78..81ff48905623 100644
--- a/arch/x86/kernel/apic/htirq.c
+++ b/arch/x86/kernel/apic/htirq.c
@@ -150,7 +150,7 @@ static const struct irq_domain_ops htirq_domain_ops = {
.deactivate = htirq_domain_deactivate,
};
-void arch_init_htirq_domain(struct irq_domain *parent)
+void __init arch_init_htirq_domain(struct irq_domain *parent)
{
if (disable_apic)
return;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 347bb9f65737..247880fc29f9 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1200,28 +1200,6 @@ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
static struct irq_chip ioapic_chip, ioapic_ir_chip;
-#ifdef CONFIG_X86_32
-static inline int IO_APIC_irq_trigger(int irq)
-{
- int apic, idx, pin;
-
- for_each_ioapic_pin(apic, pin) {
- idx = find_irq_entry(apic, pin, mp_INT);
- if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin, 0)))
- return irq_trigger(idx);
- }
- /*
- * nonexistent IRQs are edge default
- */
- return 0;
-}
-#else
-static inline int IO_APIC_irq_trigger(int irq)
-{
- return 1;
-}
-#endif
-
static void __init setup_IO_APIC_irqs(void)
{
unsigned int ioapic, pin;
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index c61aec7e65f4..4c5d1882bec5 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -136,7 +136,7 @@ static struct msi_domain_info pci_msi_domain_info = {
.handler_name = "edge",
};
-void arch_init_msi_domain(struct irq_domain *parent)
+void __init arch_init_msi_domain(struct irq_domain *parent)
{
if (disable_apic)
return;
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index f3557a1eb562..e66d8e48e456 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -405,7 +405,7 @@ int __init arch_probe_nr_irqs(void)
}
#ifdef CONFIG_X86_IO_APIC
-static void init_legacy_irqs(void)
+static void __init init_legacy_irqs(void)
{
int i, node = cpu_to_node(0);
struct apic_chip_data *data;
@@ -424,7 +424,7 @@ static void init_legacy_irqs(void)
}
}
#else
-static void init_legacy_irqs(void) { }
+static inline void init_legacy_irqs(void) { }
#endif
int __init arch_early_irq_init(void)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index ee8f11800295..bb5abe8f5fd4 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -799,8 +799,9 @@ static void init_amd(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
- /* AMD CPUs don't reset SS attributes on SYSRET */
- set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+ if (!cpu_has(c, X86_FEATURE_XENPV))
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index a70fd61095f8..6f077445647a 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -255,6 +255,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
break;
case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
+ case 11: /* GX1 with inverted Device ID */
#ifdef CONFIG_PCI
{
u32 vendor, device;
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index f5af0cc7eb0d..9257bd9dc664 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -856,11 +856,13 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type,
dentry = kernfs_mount(fs_type, flags, rdt_root,
RDTGROUP_SUPER_MAGIC, NULL);
if (IS_ERR(dentry))
- goto out_cdp;
+ goto out_destroy;
static_branch_enable(&rdt_enable_key);
goto out;
+out_destroy:
+ kernfs_remove(kn_info);
out_cdp:
cdp_disable();
out:
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 5abd4bf73d6e..5cfbaeb6529a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -499,16 +499,14 @@ static int mce_usable_address(struct mce *m)
return 1;
}
-static bool memory_error(struct mce *m)
+bool mce_is_memory_error(struct mce *m)
{
- struct cpuinfo_x86 *c = &boot_cpu_data;
-
- if (c->x86_vendor == X86_VENDOR_AMD) {
+ if (m->cpuvendor == X86_VENDOR_AMD) {
/* ErrCodeExt[20:16] */
u8 xec = (m->status >> 16) & 0x1f;
return (xec == 0x0 || xec == 0x8);
- } else if (c->x86_vendor == X86_VENDOR_INTEL) {
+ } else if (m->cpuvendor == X86_VENDOR_INTEL) {
/*
* Intel SDM Volume 3B - 15.9.2 Compound Error Codes
*
@@ -529,6 +527,7 @@ static bool memory_error(struct mce *m)
return false;
}
+EXPORT_SYMBOL_GPL(mce_is_memory_error);
static bool cec_add_mce(struct mce *m)
{
@@ -536,7 +535,7 @@ static bool cec_add_mce(struct mce *m)
return false;
/* We eat only correctable DRAM errors with usable addresses. */
- if (memory_error(m) &&
+ if (mce_is_memory_error(m) &&
!(m->status & MCI_STATUS_UC) &&
mce_usable_address(m))
if (!cec_add_elem(m->addr >> PAGE_SHIFT))
@@ -713,7 +712,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
- if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
+ if (severity == MCE_DEFERRED_SEVERITY && mce_is_memory_error(&m))
if (m.status & MCI_STATUS_ADDRV)
m.severity = severity;
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 7889ae492af0..21b185793c80 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -10,7 +10,7 @@
* Author: Peter Oruba <peter.oruba@amd.com>
*
* Based on work by:
- * Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+ * Tigran Aivazian <aivazian.tigran@gmail.com>
*
* early loader:
* Copyright (C) 2013 Advanced Micro Devices, Inc.
@@ -251,7 +251,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
#endif
}
-void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
+static void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
{
struct ucode_cpu_info *uci;
struct cpio_data cp;
@@ -320,7 +320,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
}
static enum ucode_state
-load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
+load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
{
@@ -338,8 +338,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
if (!desc.mc)
return -EINVAL;
- ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax),
- desc.data, desc.size);
+ ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
if (ret != UCODE_OK)
return -EINVAL;
@@ -352,8 +351,6 @@ void reload_ucode_amd(void)
u32 rev, dummy;
mc = (struct microcode_amd *)amd_ucode_patch;
- if (!mc)
- return;
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
@@ -677,7 +674,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
}
static enum ucode_state
-load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
+load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
{
enum ucode_state ret;
@@ -691,8 +688,8 @@ load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
#ifdef CONFIG_X86_32
/* save BSP's matching patch for early load */
- if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
- struct ucode_patch *p = find_patch(cpu);
+ if (save) {
+ struct ucode_patch *p = find_patch(0);
if (p) {
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
@@ -724,11 +721,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
{
char fw_name[36] = "amd-ucode/microcode_amd.bin";
struct cpuinfo_x86 *c = &cpu_data(cpu);
+ bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
enum ucode_state ret = UCODE_NFOUND;
const struct firmware *fw;
/* reload ucode container only on the boot cpu */
- if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
+ if (!refresh_fw || !bsp)
return UCODE_OK;
if (c->x86 >= 0x15)
@@ -745,7 +743,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
goto fw_release;
}
- ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
+ ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
fw_release:
release_firmware(fw);
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index b4a4cd39b358..9cb98ee103db 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -1,7 +1,7 @@
/*
* CPU Microcode Update Driver for Linux
*
- * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+ * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
* 2006 Shaohua Li <shaohua.li@intel.com>
* 2013-2016 Borislav Petkov <bp@alien8.de>
*
@@ -290,6 +290,17 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
return (struct cpio_data){ NULL, 0, "" };
if (initrd_start)
start = initrd_start;
+ } else {
+ /*
+ * The picture with physical addresses is a bit different: we
+ * need to get the *physical* address to which the ramdisk was
+ * relocated, i.e., relocated_ramdisk (not initrd_start) and
+ * since we're running from physical addresses, we need to access
+ * relocated_ramdisk through its *physical* address too.
+ */
+ u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk);
+ if (*rr)
+ start = *rr;
}
return find_cpio_data(path, (void *)start, size, NULL);
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 8325d8a09ab0..59edbe9d4ccb 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -1,7 +1,7 @@
/*
* Intel CPU Microcode Update Driver for Linux
*
- * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+ * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
* 2006 Shaohua Li <shaohua.li@intel.com>
*
* Intel CPU microcode early update for Linux
@@ -42,7 +42,7 @@
static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
/* Current microcode patch used in early patching on the APs. */
-struct microcode_intel *intel_ucode_patch;
+static struct microcode_intel *intel_ucode_patch;
static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
unsigned int s2, unsigned int p2)
@@ -166,7 +166,7 @@ static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
static void save_microcode_patch(void *data, unsigned int size)
{
struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
- struct ucode_patch *iter, *tmp, *p;
+ struct ucode_patch *iter, *tmp, *p = NULL;
bool prev_found = false;
unsigned int sig, pf;
@@ -202,6 +202,18 @@ static void save_microcode_patch(void *data, unsigned int size)
else
list_add_tail(&p->plist, &microcode_cache);
}
+
+ /*
+ * Save for early loading. On 32-bit, that needs to be a physical
+ * address as the APs are running from physical addresses, before
+ * paging has been enabled.
+ */
+ if (p) {
+ if (IS_ENABLED(CONFIG_X86_32))
+ intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
+ else
+ intel_ucode_patch = p->data;
+ }
}
static int microcode_sanity_check(void *mc, int print_err)
@@ -607,6 +619,14 @@ int __init save_microcode_in_initrd_intel(void)
struct ucode_cpu_info uci;
struct cpio_data cp;
+ /*
+ * initrd is going away, clear patch ptr. We will scan the microcode one
+ * last time before jettisoning and save a patch, if found. Then we will
+ * update that pointer too, with a stable patch address to use when
+ * resuming the cores.
+ */
+ intel_ucode_patch = NULL;
+
if (!load_builtin_intel_microcode(&cp))
cp = find_microcode_in_initrd(ucode_path, false);
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 04cb8d34ccb8..70e717fccdd6 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -161,6 +161,15 @@ static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
}
#endif
+static unsigned long hv_get_tsc_khz(void)
+{
+ unsigned long freq;
+
+ rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
+
+ return freq / 1000;
+}
+
static void __init ms_hyperv_init_platform(void)
{
int hv_host_info_eax;
@@ -193,8 +202,15 @@ static void __init ms_hyperv_init_platform(void)
hv_host_info_edx >> 24, hv_host_info_edx & 0xFFFFFF);
}
+ if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
+ ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
+ x86_platform.calibrate_tsc = hv_get_tsc_khz;
+ x86_platform.calibrate_cpu = hv_get_tsc_khz;
+ }
+
#ifdef CONFIG_X86_LOCAL_APIC
- if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) {
+ if (ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
+ ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
/*
* Get the APIC frequency.
*/
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 8e598a1ad986..6b91e2eb8d3f 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -125,7 +125,7 @@ void __init init_espfix_bsp(void)
p4d_t *p4d;
/* Install the espfix pud into the kernel page directory */
- pgd = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
+ pgd = &init_top_pgt[pgd_index(ESPFIX_BASE_ADDR)];
p4d = p4d_alloc(&init_mm, pgd, ESPFIX_BASE_ADDR);
p4d_populate(&init_mm, p4d, espfix_pud_page);
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index c2f8dde3255c..d5d44c452624 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -90,6 +90,7 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
* Boot time FPU feature detection code:
*/
unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
+EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
static void __init fpu__init_system_mxcsr(void)
{
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 0651e974dcb3..9bef1bbeba63 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -689,8 +689,12 @@ static inline void *alloc_tramp(unsigned long size)
{
return module_alloc(size);
}
-static inline void tramp_free(void *tramp)
+static inline void tramp_free(void *tramp, int size)
{
+ int npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ set_memory_nx((unsigned long)tramp, npages);
+ set_memory_rw((unsigned long)tramp, npages);
module_memfree(tramp);
}
#else
@@ -699,7 +703,7 @@ static inline void *alloc_tramp(unsigned long size)
{
return NULL;
}
-static inline void tramp_free(void *tramp) { }
+static inline void tramp_free(void *tramp, int size) { }
#endif
/* Defined as markers to the end of the ftrace default trampolines */
@@ -771,7 +775,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
/* Copy ftrace_caller onto the trampoline memory */
ret = probe_kernel_read(trampoline, (void *)start_offset, size);
if (WARN_ON(ret < 0)) {
- tramp_free(trampoline);
+ tramp_free(trampoline, *tramp_size);
return 0;
}
@@ -797,7 +801,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
/* Are we pointing to the reference? */
if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
- tramp_free(trampoline);
+ tramp_free(trampoline, *tramp_size);
return 0;
}
@@ -839,7 +843,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
unsigned long offset;
unsigned long ip;
unsigned int size;
- int ret;
+ int ret, npages;
if (ops->trampoline) {
/*
@@ -848,11 +852,14 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
*/
if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return;
+ npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT;
+ set_memory_rw(ops->trampoline, npages);
} else {
ops->trampoline = create_trampoline(ops, &size);
if (!ops->trampoline)
return;
ops->trampoline_size = size;
+ npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
}
offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
@@ -863,6 +870,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
/* Do a safe modify in case the trampoline is executing */
new = ftrace_call_replace(ip, (unsigned long)func);
ret = update_ftrace_func(ip, new);
+ set_memory_ro(ops->trampoline, npages);
/* The update should never fail */
WARN_ON(ret);
@@ -939,7 +947,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return;
- tramp_free((void *)ops->trampoline);
+ tramp_free((void *)ops->trampoline, ops->trampoline_size);
ops->trampoline = 0;
}
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 43b7002f44fb..46c3c73e7f43 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -33,17 +33,120 @@
/*
* Manage page tables very early on.
*/
-extern pgd_t early_level4_pgt[PTRS_PER_PGD];
+extern pgd_t early_top_pgt[PTRS_PER_PGD];
extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
-static unsigned int __initdata next_early_pgt = 2;
+static unsigned int __initdata next_early_pgt;
pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
+#define __head __section(.head.text)
+
+static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
+{
+ return ptr - (void *)_text + (void *)physaddr;
+}
+
+void __head __startup_64(unsigned long physaddr)
+{
+ unsigned long load_delta, *p;
+ pgdval_t *pgd;
+ p4dval_t *p4d;
+ pudval_t *pud;
+ pmdval_t *pmd, pmd_entry;
+ int i;
+
+ /* Is the address too large? */
+ if (physaddr >> MAX_PHYSMEM_BITS)
+ for (;;);
+
+ /*
+ * Compute the delta between the address I am compiled to run at
+ * and the address I am actually running at.
+ */
+ load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
+
+ /* Is the address not 2M aligned? */
+ if (load_delta & ~PMD_PAGE_MASK)
+ for (;;);
+
+ /* Fixup the physical addresses in the page table */
+
+ pgd = fixup_pointer(&early_top_pgt, physaddr);
+ pgd[pgd_index(__START_KERNEL_map)] += load_delta;
+
+ if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+ p4d = fixup_pointer(&level4_kernel_pgt, physaddr);
+ p4d[511] += load_delta;
+ }
+
+ pud = fixup_pointer(&level3_kernel_pgt, physaddr);
+ pud[510] += load_delta;
+ pud[511] += load_delta;
+
+ pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
+ pmd[506] += load_delta;
+
+ /*
+ * Set up the identity mapping for the switchover. These
+ * entries should *NOT* have the global bit set! This also
+ * creates a bunch of nonsense entries but that is fine --
+ * it avoids problems around wraparound.
+ */
+
+ pud = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
+ pmd = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
+
+ if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
+ p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
+
+ i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
+ pgd[i + 0] = (pgdval_t)p4d + _KERNPG_TABLE;
+ pgd[i + 1] = (pgdval_t)p4d + _KERNPG_TABLE;
+
+ i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D;
+ p4d[i + 0] = (pgdval_t)pud + _KERNPG_TABLE;
+ p4d[i + 1] = (pgdval_t)pud + _KERNPG_TABLE;
+ } else {
+ i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
+ pgd[i + 0] = (pgdval_t)pud + _KERNPG_TABLE;
+ pgd[i + 1] = (pgdval_t)pud + _KERNPG_TABLE;
+ }
+
+ i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD;
+ pud[i + 0] = (pudval_t)pmd + _KERNPG_TABLE;
+ pud[i + 1] = (pudval_t)pmd + _KERNPG_TABLE;
+
+ pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
+ pmd_entry += physaddr;
+
+ for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
+ int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD;
+ pmd[idx] = pmd_entry + i * PMD_SIZE;
+ }
+
+ /*
+ * Fixup the kernel text+data virtual addresses. Note that
+ * we might write invalid pmds, when the kernel is relocated
+ * cleanup_highmap() fixes this up along with the mappings
+ * beyond _end.
+ */
+
+ pmd = fixup_pointer(level2_kernel_pgt, physaddr);
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (pmd[i] & _PAGE_PRESENT)
+ pmd[i] += load_delta;
+ }
+
+ /* Fixup phys_base */
+ p = fixup_pointer(&phys_base, physaddr);
+ *p += load_delta;
+}
+
/* Wipe all early page tables except for the kernel symbol map */
static void __init reset_early_page_tables(void)
{
- memset(early_level4_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
+ memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
next_early_pgt = 0;
- write_cr3(__pa_nodebug(early_level4_pgt));
+ write_cr3(__pa_nodebug(early_top_pgt));
}
/* Create a new PMD entry */
@@ -51,15 +154,16 @@ int __init early_make_pgtable(unsigned long address)
{
unsigned long physaddr = address - __PAGE_OFFSET;
pgdval_t pgd, *pgd_p;
+ p4dval_t p4d, *p4d_p;
pudval_t pud, *pud_p;
pmdval_t pmd, *pmd_p;
/* Invalid address or early pgt is done ? */
- if (physaddr >= MAXMEM || read_cr3() != __pa_nodebug(early_level4_pgt))
+ if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
return -1;
again:
- pgd_p = &early_level4_pgt[pgd_index(address)].pgd;
+ pgd_p = &early_top_pgt[pgd_index(address)].pgd;
pgd = *pgd_p;
/*
@@ -67,8 +171,25 @@ again:
* critical -- __PAGE_OFFSET would point us back into the dynamic
* range and we might end up looping forever...
*/
- if (pgd)
- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
+ if (!IS_ENABLED(CONFIG_X86_5LEVEL))
+ p4d_p = pgd_p;
+ else if (pgd)
+ p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
+ else {
+ if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
+ reset_early_page_tables();
+ goto again;
+ }
+
+ p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
+ memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
+ *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
+ }
+ p4d_p += p4d_index(address);
+ p4d = *p4d_p;
+
+ if (p4d)
+ pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
else {
if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
reset_early_page_tables();
@@ -77,7 +198,7 @@ again:
pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
+ *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
}
pud_p += pud_index(address);
pud = *pud_p;
@@ -156,7 +277,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
clear_bss();
- clear_page(init_level4_pgt);
+ clear_page(init_top_pgt);
kasan_early_init();
@@ -171,8 +292,8 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
*/
load_ucode_bsp();
- /* set init_level4_pgt kernel high mapping*/
- init_level4_pgt[511] = early_level4_pgt[511];
+ /* set init_top_pgt kernel high mapping*/
+ init_top_pgt[511] = early_top_pgt[511];
x86_64_start_reservations(real_mode_data);
}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index ac9d327d2e42..6225550883df 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -37,10 +37,11 @@
*
*/
+#define p4d_index(x) (((x) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
-L4_START_KERNEL = pgd_index(__START_KERNEL_map)
+PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
+PGD_START_KERNEL = pgd_index(__START_KERNEL_map)
L3_START_KERNEL = pud_index(__START_KERNEL_map)
.text
@@ -72,101 +73,12 @@ startup_64:
/* Sanitize CPU configuration */
call verify_cpu
- /*
- * Compute the delta between the address I am compiled to run at and the
- * address I am actually running at.
- */
- leaq _text(%rip), %rbp
- subq $_text - __START_KERNEL_map, %rbp
-
- /* Is the address not 2M aligned? */
- testl $~PMD_PAGE_MASK, %ebp
- jnz bad_address
-
- /*
- * Is the address too large?
- */
- leaq _text(%rip), %rax
- shrq $MAX_PHYSMEM_BITS, %rax
- jnz bad_address
-
- /*
- * Fixup the physical addresses in the page table
- */
- addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
-
- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
-
- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
-
- /*
- * Set up the identity mapping for the switchover. These
- * entries should *NOT* have the global bit set! This also
- * creates a bunch of nonsense entries but that is fine --
- * it avoids problems around wraparound.
- */
leaq _text(%rip), %rdi
- leaq early_level4_pgt(%rip), %rbx
-
- movq %rdi, %rax
- shrq $PGDIR_SHIFT, %rax
-
- leaq (PAGE_SIZE + _KERNPG_TABLE)(%rbx), %rdx
- movq %rdx, 0(%rbx,%rax,8)
- movq %rdx, 8(%rbx,%rax,8)
-
- addq $PAGE_SIZE, %rdx
- movq %rdi, %rax
- shrq $PUD_SHIFT, %rax
- andl $(PTRS_PER_PUD-1), %eax
- movq %rdx, PAGE_SIZE(%rbx,%rax,8)
- incl %eax
- andl $(PTRS_PER_PUD-1), %eax
- movq %rdx, PAGE_SIZE(%rbx,%rax,8)
-
- addq $PAGE_SIZE * 2, %rbx
- movq %rdi, %rax
- shrq $PMD_SHIFT, %rdi
- addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
- leaq (_end - 1)(%rip), %rcx
- shrq $PMD_SHIFT, %rcx
- subq %rdi, %rcx
- incl %ecx
-
-1:
- andq $(PTRS_PER_PMD - 1), %rdi
- movq %rax, (%rbx,%rdi,8)
- incq %rdi
- addq $PMD_SIZE, %rax
- decl %ecx
- jnz 1b
-
- test %rbp, %rbp
- jz .Lskip_fixup
+ pushq %rsi
+ call __startup_64
+ popq %rsi
- /*
- * Fixup the kernel text+data virtual addresses. Note that
- * we might write invalid pmds, when the kernel is relocated
- * cleanup_highmap() fixes this up along with the mappings
- * beyond _end.
- */
- leaq level2_kernel_pgt(%rip), %rdi
- leaq PAGE_SIZE(%rdi), %r8
- /* See if it is a valid page table entry */
-1: testb $_PAGE_PRESENT, 0(%rdi)
- jz 2f
- addq %rbp, 0(%rdi)
- /* Go to the next page */
-2: addq $8, %rdi
- cmp %r8, %rdi
- jne 1b
-
- /* Fixup phys_base */
- addq %rbp, phys_base(%rip)
-
-.Lskip_fixup:
- movq $(early_level4_pgt - __START_KERNEL_map), %rax
+ movq $(early_top_pgt - __START_KERNEL_map), %rax
jmp 1f
ENTRY(secondary_startup_64)
/*
@@ -186,14 +98,17 @@ ENTRY(secondary_startup_64)
/* Sanitize CPU configuration */
call verify_cpu
- movq $(init_level4_pgt - __START_KERNEL_map), %rax
+ movq $(init_top_pgt - __START_KERNEL_map), %rax
1:
- /* Enable PAE mode and PGE */
+ /* Enable PAE mode, PGE and LA57 */
movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
+#ifdef CONFIG_X86_5LEVEL
+ orl $X86_CR4_LA57, %ecx
+#endif
movq %rcx, %cr4
- /* Setup early boot stage 4 level pagetables. */
+ /* Setup early boot stage 4-/5-level pagetables. */
addq phys_base(%rip), %rax
movq %rax, %cr3
@@ -417,9 +332,13 @@ GLOBAL(name)
.endr
__INITDATA
-NEXT_PAGE(early_level4_pgt)
+NEXT_PAGE(early_top_pgt)
.fill 511,8,0
+#ifdef CONFIG_X86_5LEVEL
+ .quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+#else
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+#endif
NEXT_PAGE(early_dynamic_pgts)
.fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
@@ -427,14 +346,14 @@ NEXT_PAGE(early_dynamic_pgts)
.data
#ifndef CONFIG_XEN
-NEXT_PAGE(init_level4_pgt)
+NEXT_PAGE(init_top_pgt)
.fill 512,8,0
#else
-NEXT_PAGE(init_level4_pgt)
+NEXT_PAGE(init_top_pgt)
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
- .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
+ .org init_top_pgt + PGD_PAGE_OFFSET*8, 0
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
- .org init_level4_pgt + L4_START_KERNEL*8, 0
+ .org init_top_pgt + PGD_START_KERNEL*8, 0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
@@ -448,6 +367,12 @@ NEXT_PAGE(level2_ident_pgt)
PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
#endif
+#ifdef CONFIG_X86_5LEVEL
+NEXT_PAGE(level4_kernel_pgt)
+ .fill 511,8,0
+ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+#endif
+
NEXT_PAGE(level3_kernel_pgt)
.fill L3_START_KERNEL,8,0
/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index be22f5a2192e..4e3b8a587c88 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -418,6 +418,7 @@ struct legacy_pic default_legacy_pic = {
};
struct legacy_pic *legacy_pic = &default_legacy_pic;
+EXPORT_SYMBOL(legacy_pic);
static int __init i8259A_init_ops(void)
{
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 5b2bbfbb3712..6b877807598b 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -52,6 +52,7 @@
#include <linux/ftrace.h>
#include <linux/frame.h>
#include <linux/kasan.h>
+#include <linux/moduleloader.h>
#include <asm/text-patching.h>
#include <asm/cacheflush.h>
@@ -417,6 +418,14 @@ static void prepare_boost(struct kprobe *p, struct insn *insn)
}
}
+/* Recover page to RW mode before releasing it */
+void free_insn_page(void *page)
+{
+ set_memory_nx((unsigned long)page & PAGE_MASK, 1);
+ set_memory_rw((unsigned long)page & PAGE_MASK, 1);
+ module_memfree(page);
+}
+
static int arch_copy_kprobe(struct kprobe *p)
{
struct insn insn;
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 901c640d152f..69ea0bc1cfa3 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -28,6 +28,7 @@
#include <linux/kdebug.h>
#include <linux/kallsyms.h>
#include <linux/ftrace.h>
+#include <linux/frame.h>
#include <asm/text-patching.h>
#include <asm/cacheflush.h>
@@ -94,6 +95,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
}
asm (
+ "optprobe_template_func:\n"
".global optprobe_template_entry\n"
"optprobe_template_entry:\n"
#ifdef CONFIG_X86_64
@@ -131,7 +133,12 @@ asm (
" popf\n"
#endif
".global optprobe_template_end\n"
- "optprobe_template_end:\n");
+ "optprobe_template_end:\n"
+ ".type optprobe_template_func, @function\n"
+ ".size optprobe_template_func, .-optprobe_template_func\n");
+
+void optprobe_template_func(void);
+STACK_FRAME_NON_STANDARD(optprobe_template_func);
#define TMPL_MOVE_IDX \
((long)&optprobe_template_val - (long)&optprobe_template_entry)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index da5c09789984..43e10d6fdbed 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token)
*/
rcu_irq_exit();
native_safe_halt();
- rcu_irq_enter();
local_irq_disable();
+ rcu_irq_enter();
}
}
if (!n.halted)
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index d4a15831ac58..a870910c8565 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -22,24 +22,25 @@
#include <asm/syscalls.h>
/* context.lock is held for us, so we don't need any locking. */
-static void flush_ldt(void *current_mm)
+static void flush_ldt(void *__mm)
{
+ struct mm_struct *mm = __mm;
mm_context_t *pc;
- if (current->active_mm != current_mm)
+ if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
return;
- pc = &current->active_mm->context;
- set_ldt(pc->ldt->entries, pc->ldt->size);
+ pc = &mm->context;
+ set_ldt(pc->ldt->entries, pc->ldt->nr_entries);
}
/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
-static struct ldt_struct *alloc_ldt_struct(unsigned int size)
+static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
{
struct ldt_struct *new_ldt;
unsigned int alloc_size;
- if (size > LDT_ENTRIES)
+ if (num_entries > LDT_ENTRIES)
return NULL;
new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
@@ -47,7 +48,7 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int size)
return NULL;
BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
- alloc_size = size * LDT_ENTRY_SIZE;
+ alloc_size = num_entries * LDT_ENTRY_SIZE;
/*
* Xen is very picky: it requires a page-aligned LDT that has no
@@ -65,14 +66,14 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int size)
return NULL;
}
- new_ldt->size = size;
+ new_ldt->nr_entries = num_entries;
return new_ldt;
}
/* After calling this, the LDT is immutable. */
static void finalize_ldt_struct(struct ldt_struct *ldt)
{
- paravirt_alloc_ldt(ldt->entries, ldt->size);
+ paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
}
/* context.lock is held */
@@ -91,8 +92,8 @@ static void free_ldt_struct(struct ldt_struct *ldt)
if (likely(!ldt))
return;
- paravirt_free_ldt(ldt->entries, ldt->size);
- if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
+ paravirt_free_ldt(ldt->entries, ldt->nr_entries);
+ if (ldt->nr_entries * LDT_ENTRY_SIZE > PAGE_SIZE)
vfree_atomic(ldt->entries);
else
free_page((unsigned long)ldt->entries);
@@ -122,14 +123,14 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
goto out_unlock;
}
- new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
+ new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
if (!new_ldt) {
retval = -ENOMEM;
goto out_unlock;
}
memcpy(new_ldt->entries, old_mm->context.ldt->entries,
- new_ldt->size * LDT_ENTRY_SIZE);
+ new_ldt->nr_entries * LDT_ENTRY_SIZE);
finalize_ldt_struct(new_ldt);
mm->context.ldt = new_ldt;
@@ -152,9 +153,9 @@ void destroy_context_ldt(struct mm_struct *mm)
static int read_ldt(void __user *ptr, unsigned long bytecount)
{
- int retval;
- unsigned long size;
struct mm_struct *mm = current->mm;
+ unsigned long entries_size;
+ int retval;
mutex_lock(&mm->context.lock);
@@ -166,18 +167,18 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
- size = mm->context.ldt->size * LDT_ENTRY_SIZE;
- if (size > bytecount)
- size = bytecount;
+ entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
+ if (entries_size > bytecount)
+ entries_size = bytecount;
- if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
+ if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
retval = -EFAULT;
goto out_unlock;
}
- if (size != bytecount) {
+ if (entries_size != bytecount) {
/* Zero-fill the rest and pretend we read bytecount bytes. */
- if (clear_user(ptr + size, bytecount - size)) {
+ if (clear_user(ptr + entries_size, bytecount - entries_size)) {
retval = -EFAULT;
goto out_unlock;
}
@@ -208,7 +209,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
{
struct mm_struct *mm = current->mm;
struct ldt_struct *new_ldt, *old_ldt;
- unsigned int oldsize, newsize;
+ unsigned int old_nr_entries, new_nr_entries;
struct user_desc ldt_info;
struct desc_struct ldt;
int error;
@@ -247,17 +248,18 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
mutex_lock(&mm->context.lock);
- old_ldt = mm->context.ldt;
- oldsize = old_ldt ? old_ldt->size : 0;
- newsize = max(ldt_info.entry_number + 1, oldsize);
+ old_ldt = mm->context.ldt;
+ old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
+ new_nr_entries = max(ldt_info.entry_number + 1, old_nr_entries);
error = -ENOMEM;
- new_ldt = alloc_ldt_struct(newsize);
+ new_ldt = alloc_ldt_struct(new_nr_entries);
if (!new_ldt)
goto out_unlock;
if (old_ldt)
- memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
+ memcpy(new_ldt->entries, old_ldt->entries, old_nr_entries * LDT_ENTRY_SIZE);
+
new_ldt->entries[ldt_info.entry_number] = ldt;
finalize_ldt_struct(new_ldt);
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index ce640428d6fe..cb0a30473c23 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -114,7 +114,7 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
struct x86_mapping_info info = {
.alloc_pgt_page = alloc_pgt_page,
.context = image,
- .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
+ .page_flag = __PAGE_KERNEL_LARGE_EXEC,
};
unsigned long mstart, mend;
pgd_t *level4p;
@@ -123,6 +123,10 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
level4p = (pgd_t *)__va(start_pgtable);
clear_page(level4p);
+
+ if (direct_gbpages)
+ info.direct_gbpages = true;
+
for (i = 0; i < nr_pfn_mapped; i++) {
mstart = pfn_mapped[i].start << PAGE_SHIFT;
mend = pfn_mapped[i].end << PAGE_SHIFT;
@@ -343,7 +347,7 @@ void machine_kexec(struct kimage *image)
void arch_crash_save_vmcoreinfo(void)
{
VMCOREINFO_NUMBER(phys_base);
- VMCOREINFO_SYMBOL(init_level4_pgt);
+ VMCOREINFO_SYMBOL(init_top_pgt);
#ifdef CONFIG_NUMA
VMCOREINFO_SYMBOL(node_data);
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
index 6d9582ec0324..d27f8d84c4ff 100644
--- a/arch/x86/kernel/nmi_selftest.c
+++ b/arch/x86/kernel/nmi_selftest.c
@@ -78,7 +78,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
/* Don't wait longer than a second */
timeout = USEC_PER_SEC;
- while (!cpumask_empty(mask) && timeout--)
+ while (!cpumask_empty(mask) && --timeout)
udelay(1);
/* What happens if we timeout, do we still unregister?? */
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 3586996fc50d..bc0a849589bb 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -391,7 +391,7 @@ struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
.read_cr2 = native_read_cr2,
.write_cr2 = native_write_cr2,
- .read_cr3 = native_read_cr3,
+ .read_cr3 = __native_read_cr3,
.write_cr3 = native_write_cr3,
.flush_tlb_user = native_flush_tlb,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 0bb88428cbf2..3ca198080ea9 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -545,17 +545,6 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
}
/*
- * Return saved PC of a blocked thread.
- * What is this good for? it will be always the scheduler or ret_from_fork.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
- struct inactive_task_frame *frame =
- (struct inactive_task_frame *) READ_ONCE(tsk->thread.sp);
- return READ_ONCE_NOCHECK(frame->ret_addr);
-}
-
-/*
* Called from fs/proc with a reference on @p to find the function
* which called into schedule(). This needs to be done carefully
* because the task might wake up and we might look at a stack
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index ff40e74c9181..c6d6dc5f8bb2 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -78,7 +78,7 @@ void __show_regs(struct pt_regs *regs, int all)
printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
- smp_processor_id());
+ raw_smp_processor_id());
printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
regs->ax, regs->bx, regs->cx, regs->dx);
@@ -92,7 +92,7 @@ void __show_regs(struct pt_regs *regs, int all)
cr0 = read_cr0();
cr2 = read_cr2();
- cr3 = read_cr3();
+ cr3 = __read_cr3();
cr4 = __read_cr4();
printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
cr0, cr2, cr3, cr4);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b6840bf3940b..c3169be4c596 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -104,7 +104,7 @@ void __show_regs(struct pt_regs *regs, int all)
cr0 = read_cr0();
cr2 = read_cr2();
- cr3 = read_cr3();
+ cr3 = __read_cr3();
cr4 = __read_cr4();
printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
@@ -142,7 +142,7 @@ void release_thread(struct task_struct *dead_task)
pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
dead_task->comm,
dead_task->mm->context.ldt->entries,
- dead_task->mm->context.ldt->size);
+ dead_task->mm->context.ldt->nr_entries);
BUG();
}
#endif
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 2544700a2a87..67393fc88353 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/tboot.h>
#include <linux/delay.h>
+#include <linux/frame.h>
#include <acpi/reboot.h>
#include <asm/io.h>
#include <asm/apic.h>
@@ -123,6 +124,7 @@ void __noreturn machine_real_restart(unsigned int type)
#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(machine_real_restart);
#endif
+STACK_FRAME_NON_STANDARD(machine_real_restart);
/*
* Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 603a1669a2ec..65622f07e633 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -503,7 +503,7 @@ static int __init reserve_crashkernel_low(void)
return 0;
}
- low_base = memblock_find_in_range(low_size, 1ULL << 32, low_size, CRASH_ALIGN);
+ low_base = memblock_find_in_range(0, 1ULL << 32, low_size, CRASH_ALIGN);
if (!low_base) {
pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
(unsigned long)(low_size >> 20));
@@ -980,8 +980,6 @@ void __init setup_arch(char **cmdline_p)
*/
x86_configure_nx();
- simple_udelay_calibration();
-
parse_early_param();
#ifdef CONFIG_MEMORY_HOTPLUG
@@ -1041,6 +1039,8 @@ void __init setup_arch(char **cmdline_p)
*/
init_hypervisor_platform();
+ simple_udelay_calibration();
+
x86_init.resources.probe_roms();
/* after parse_early_param, so could debug it */
@@ -1225,6 +1225,21 @@ void __init setup_arch(char **cmdline_p)
kasan_init();
+#ifdef CONFIG_X86_32
+ /* sync back kernel address range */
+ clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+ KERNEL_PGD_PTRS);
+
+ /*
+ * sync back low identity map too. It is used for example
+ * in the 32-bit EFI stub.
+ */
+ clone_pgd_range(initial_page_table,
+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
+ min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+#endif
+
tboot_probe();
map_vsyscall();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index bb1e8cc0bc84..10edd1e69a68 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -291,11 +291,11 @@ void __init setup_per_cpu_areas(void)
#ifdef CONFIG_X86_32
/*
- * Sync back kernel address range. We want to make sure that
- * all kernel mappings, including percpu mappings, are available
- * in the smpboot asm. We can't reliably pick up percpu
- * mappings using vmalloc_fault(), because exception dispatch
- * needs percpu data.
+ * Sync back kernel address range again. We already did this in
+ * setup_arch(), but percpu data also needs to be available in
+ * the smpboot asm. We can't reliably pick up percpu mappings
+ * using vmalloc_fault(), because exception dispatch needs
+ * percpu data.
*/
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f04479a8f74f..b474c8de7fba 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -863,7 +863,7 @@ static void announce_cpu(int cpu, int apicid)
if (cpu == 1)
printk(KERN_INFO "x86: Booting SMP configuration:\n");
- if (system_state == SYSTEM_BOOTING) {
+ if (system_state < SYSTEM_RUNNING) {
if (node != current_node) {
if (current_node > (-1))
pr_cont("\n");
@@ -1589,7 +1589,6 @@ void native_cpu_die(unsigned int cpu)
void play_dead_common(void)
{
idle_task_exit();
- reset_lazy_tlbstate();
/* Ack it */
(void)cpu_report_death();
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index f07f83b3611b..5f25cfbd952e 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -34,7 +34,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
mutex_lock(&child->mm->context.lock);
if (unlikely(!child->mm->context.ldt ||
- seg >= child->mm->context.ldt->size))
+ seg >= child->mm->context.ldt->nr_entries))
addr = -1L; /* bogus selector, access would fault */
else {
desc = &child->mm->context.ldt->entries[seg];
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 207b8f2582c7..213ddf3e937d 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (end - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -187,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 4b1724059909..a4eb27918ceb 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -514,7 +514,7 @@ int tboot_force_iommu(void)
if (!tboot_enabled())
return 0;
- if (!intel_iommu_tboot_noforce)
+ if (intel_iommu_tboot_noforce)
return 1;
if (no_iommu || swiotlb || dmar_disabled)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 3995d3a777d4..bf54309b85da 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -182,7 +182,7 @@ int is_valid_bugaddr(unsigned long addr)
return ud == INSN_UD0 || ud == INSN_UD2;
}
-static int fixup_bug(struct pt_regs *regs, int trapnr)
+int fixup_bug(struct pt_regs *regs, int trapnr)
{
if (trapnr != X86_TRAP_UD)
return 0;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 714dfba6a1e7..5270fc0c2df6 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -51,115 +51,34 @@ static u32 art_to_tsc_denominator;
static u64 art_to_tsc_offset;
struct clocksource *art_related_clocksource;
-/*
- * Use a ring-buffer like data structure, where a writer advances the head by
- * writing a new data entry and a reader advances the tail when it observes a
- * new entry.
- *
- * Writers are made to wait on readers until there's space to write a new
- * entry.
- *
- * This means that we can always use an {offset, mul} pair to compute a ns
- * value that is 'roughly' in the right direction, even if we're writing a new
- * {offset, mul} pair during the clock read.
- *
- * The down-side is that we can no longer guarantee strict monotonicity anymore
- * (assuming the TSC was that to begin with), because while we compute the
- * intersection point of the two clock slopes and make sure the time is
- * continuous at the point of switching; we can no longer guarantee a reader is
- * strictly before or after the switch point.
- *
- * It does mean a reader no longer needs to disable IRQs in order to avoid
- * CPU-Freq updates messing with his times, and similarly an NMI reader will
- * no longer run the risk of hitting half-written state.
- */
-
struct cyc2ns {
- struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */
- struct cyc2ns_data *head; /* 48 + 8 = 56 */
- struct cyc2ns_data *tail; /* 56 + 8 = 64 */
-}; /* exactly fits one cacheline */
-
-static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
-
-struct cyc2ns_data *cyc2ns_read_begin(void)
-{
- struct cyc2ns_data *head;
-
- preempt_disable();
-
- head = this_cpu_read(cyc2ns.head);
- /*
- * Ensure we observe the entry when we observe the pointer to it.
- * matches the wmb from cyc2ns_write_end().
- */
- smp_read_barrier_depends();
- head->__count++;
- barrier();
+ struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */
+ seqcount_t seq; /* 32 + 4 = 36 */
- return head;
-}
+}; /* fits one cacheline */
-void cyc2ns_read_end(struct cyc2ns_data *head)
-{
- barrier();
- /*
- * If we're the outer most nested read; update the tail pointer
- * when we're done. This notifies possible pending writers
- * that we've observed the head pointer and that the other
- * entry is now free.
- */
- if (!--head->__count) {
- /*
- * x86-TSO does not reorder writes with older reads;
- * therefore once this write becomes visible to another
- * cpu, we must be finished reading the cyc2ns_data.
- *
- * matches with cyc2ns_write_begin().
- */
- this_cpu_write(cyc2ns.tail, head);
- }
- preempt_enable();
-}
+static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
-/*
- * Begin writing a new @data entry for @cpu.
- *
- * Assumes some sort of write side lock; currently 'provided' by the assumption
- * that cpufreq will call its notifiers sequentially.
- */
-static struct cyc2ns_data *cyc2ns_write_begin(int cpu)
+void cyc2ns_read_begin(struct cyc2ns_data *data)
{
- struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
- struct cyc2ns_data *data = c2n->data;
+ int seq, idx;
- if (data == c2n->head)
- data++;
+ preempt_disable_notrace();
- /* XXX send an IPI to @cpu in order to guarantee a read? */
+ do {
+ seq = this_cpu_read(cyc2ns.seq.sequence);
+ idx = seq & 1;
- /*
- * When we observe the tail write from cyc2ns_read_end(),
- * the cpu must be done with that entry and its safe
- * to start writing to it.
- */
- while (c2n->tail == data)
- cpu_relax();
+ data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
+ data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
+ data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
- return data;
+ } while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
}
-static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
+void cyc2ns_read_end(void)
{
- struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
-
- /*
- * Ensure the @data writes are visible before we publish the
- * entry. Matches the data-depencency in cyc2ns_read_begin().
- */
- smp_wmb();
-
- ACCESS_ONCE(c2n->head) = data;
+ preempt_enable_notrace();
}
/*
@@ -191,7 +110,6 @@ static void cyc2ns_data_init(struct cyc2ns_data *data)
data->cyc2ns_mul = 0;
data->cyc2ns_shift = 0;
data->cyc2ns_offset = 0;
- data->__count = 0;
}
static void cyc2ns_init(int cpu)
@@ -201,51 +119,29 @@ static void cyc2ns_init(int cpu)
cyc2ns_data_init(&c2n->data[0]);
cyc2ns_data_init(&c2n->data[1]);
- c2n->head = c2n->data;
- c2n->tail = c2n->data;
+ seqcount_init(&c2n->seq);
}
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
- struct cyc2ns_data *data, *tail;
+ struct cyc2ns_data data;
unsigned long long ns;
- /*
- * See cyc2ns_read_*() for details; replicated in order to avoid
- * an extra few instructions that came with the abstraction.
- * Notable, it allows us to only do the __count and tail update
- * dance when its actually needed.
- */
-
- preempt_disable_notrace();
- data = this_cpu_read(cyc2ns.head);
- tail = this_cpu_read(cyc2ns.tail);
-
- if (likely(data == tail)) {
- ns = data->cyc2ns_offset;
- ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
- } else {
- data->__count++;
-
- barrier();
-
- ns = data->cyc2ns_offset;
- ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
+ cyc2ns_read_begin(&data);
- barrier();
+ ns = data.cyc2ns_offset;
+ ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
- if (!--data->__count)
- this_cpu_write(cyc2ns.tail, data);
- }
- preempt_enable_notrace();
+ cyc2ns_read_end();
return ns;
}
-static void set_cyc2ns_scale(unsigned long khz, int cpu)
+static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
{
- unsigned long long tsc_now, ns_now;
- struct cyc2ns_data *data;
+ unsigned long long ns_now;
+ struct cyc2ns_data data;
+ struct cyc2ns *c2n;
unsigned long flags;
local_irq_save(flags);
@@ -254,9 +150,6 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu)
if (!khz)
goto done;
- data = cyc2ns_write_begin(cpu);
-
- tsc_now = rdtsc();
ns_now = cycles_2_ns(tsc_now);
/*
@@ -264,7 +157,7 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu)
* time function is continuous; see the comment near struct
* cyc2ns_data.
*/
- clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, khz,
+ clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
NSEC_PER_MSEC, 0);
/*
@@ -273,20 +166,26 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu)
* conversion algorithm shifting a 32-bit value (now specifies a 64-bit
* value) - refer perf_event_mmap_page documentation in perf_event.h.
*/
- if (data->cyc2ns_shift == 32) {
- data->cyc2ns_shift = 31;
- data->cyc2ns_mul >>= 1;
+ if (data.cyc2ns_shift == 32) {
+ data.cyc2ns_shift = 31;
+ data.cyc2ns_mul >>= 1;
}
- data->cyc2ns_offset = ns_now -
- mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, data->cyc2ns_shift);
+ data.cyc2ns_offset = ns_now -
+ mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
+
+ c2n = per_cpu_ptr(&cyc2ns, cpu);
- cyc2ns_write_end(cpu, data);
+ raw_write_seqcount_latch(&c2n->seq);
+ c2n->data[0] = data;
+ raw_write_seqcount_latch(&c2n->seq);
+ c2n->data[1] = data;
done:
- sched_clock_idle_wakeup_event(0);
+ sched_clock_idle_wakeup_event();
local_irq_restore(flags);
}
+
/*
* Scheduler clock - returns current time in nanosec units.
*/
@@ -374,6 +273,8 @@ static int __init tsc_setup(char *str)
tsc_clocksource_reliable = 1;
if (!strncmp(str, "noirqtime", 9))
no_sched_irq_time = 1;
+ if (!strcmp(str, "unstable"))
+ mark_tsc_unstable("boot parameter");
return 1;
}
@@ -986,7 +887,6 @@ void tsc_restore_sched_clock_state(void)
}
#ifdef CONFIG_CPU_FREQ
-
/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
* changes.
*
@@ -1027,7 +927,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
mark_tsc_unstable("cpufreq changes");
- set_cyc2ns_scale(tsc_khz, freq->cpu);
+ set_cyc2ns_scale(tsc_khz, freq->cpu, rdtsc());
}
return 0;
@@ -1127,6 +1027,15 @@ static void tsc_cs_mark_unstable(struct clocksource *cs)
pr_info("Marking TSC unstable due to clocksource watchdog\n");
}
+static void tsc_cs_tick_stable(struct clocksource *cs)
+{
+ if (tsc_unstable)
+ return;
+
+ if (using_native_sched_clock())
+ sched_clock_tick_stable();
+}
+
/*
* .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
*/
@@ -1140,6 +1049,7 @@ static struct clocksource clocksource_tsc = {
.archdata = { .vclock_mode = VCLOCK_TSC },
.resume = tsc_resume,
.mark_unstable = tsc_cs_mark_unstable,
+ .tick_stable = tsc_cs_tick_stable,
};
void mark_tsc_unstable(char *reason)
@@ -1255,6 +1165,7 @@ static void tsc_refine_calibration_work(struct work_struct *work)
static int hpet;
u64 tsc_stop, ref_stop, delta;
unsigned long freq;
+ int cpu;
/* Don't bother refining TSC on unstable systems */
if (check_tsc_unstable())
@@ -1305,6 +1216,10 @@ static void tsc_refine_calibration_work(struct work_struct *work)
/* Inform the TSC deadline clockevent devices about the recalibration */
lapic_update_tsc_freq();
+ /* Update the sched_clock() rate to match the clocksource one */
+ for_each_possible_cpu(cpu)
+ set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
+
out:
if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc;
@@ -1350,7 +1265,7 @@ device_initcall(init_tsc_clocksource);
void __init tsc_init(void)
{
- u64 lpj;
+ u64 lpj, cyc;
int cpu;
if (!boot_cpu_has(X86_FEATURE_TSC)) {
@@ -1390,9 +1305,10 @@ void __init tsc_init(void)
* speed as the bootup CPU. (cpufreq notifiers will fix this
* up if their speed diverges)
*/
+ cyc = rdtsc();
for_each_possible_cpu(cpu) {
cyc2ns_init(cpu);
- set_cyc2ns_scale(tsc_khz, cpu);
+ set_cyc2ns_scale(tsc_khz, cpu, cyc);
}
if (tsc_disabled > 0)
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 82c6d7f1fd73..b9389d72b2f7 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -104,6 +104,11 @@ static inline unsigned long *last_frame(struct unwind_state *state)
return (unsigned long *)task_pt_regs(state->task) - 2;
}
+static bool is_last_frame(struct unwind_state *state)
+{
+ return state->bp == last_frame(state);
+}
+
#ifdef CONFIG_X86_32
#define GCC_REALIGN_WORDS 3
#else
@@ -115,16 +120,15 @@ static inline unsigned long *last_aligned_frame(struct unwind_state *state)
return last_frame(state) - GCC_REALIGN_WORDS;
}
-static bool is_last_task_frame(struct unwind_state *state)
+static bool is_last_aligned_frame(struct unwind_state *state)
{
unsigned long *last_bp = last_frame(state);
unsigned long *aligned_bp = last_aligned_frame(state);
/*
- * We have to check for the last task frame at two different locations
- * because gcc can occasionally decide to realign the stack pointer and
- * change the offset of the stack frame in the prologue of a function
- * called by head/entry code. Examples:
+ * GCC can occasionally decide to realign the stack pointer and change
+ * the offset of the stack frame in the prologue of a function called
+ * by head/entry code. Examples:
*
* <start_secondary>:
* push %edi
@@ -141,11 +145,38 @@ static bool is_last_task_frame(struct unwind_state *state)
* push %rbp
* mov %rsp,%rbp
*
- * Note that after aligning the stack, it pushes a duplicate copy of
- * the return address before pushing the frame pointer.
+ * After aligning the stack, it pushes a duplicate copy of the return
+ * address before pushing the frame pointer.
+ */
+ return (state->bp == aligned_bp && *(aligned_bp + 1) == *(last_bp + 1));
+}
+
+static bool is_last_ftrace_frame(struct unwind_state *state)
+{
+ unsigned long *last_bp = last_frame(state);
+ unsigned long *last_ftrace_bp = last_bp - 3;
+
+ /*
+ * When unwinding from an ftrace handler of a function called by entry
+ * code, the stack layout of the last frame is:
+ *
+ * bp
+ * parent ret addr
+ * bp
+ * function ret addr
+ * parent ret addr
+ * pt_regs
+ * -----------------
*/
- return (state->bp == last_bp ||
- (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
+ return (state->bp == last_ftrace_bp &&
+ *state->bp == *(state->bp + 2) &&
+ *(state->bp + 1) == *(state->bp + 4));
+}
+
+static bool is_last_task_frame(struct unwind_state *state)
+{
+ return is_last_frame(state) || is_last_aligned_frame(state) ||
+ is_last_ftrace_frame(state);
}
/*
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index a181ae76c71c..59ca2eea522c 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -780,18 +780,20 @@ out:
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
{
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
- int j, nent = vcpu->arch.cpuid_nent;
+ struct kvm_cpuid_entry2 *ej;
+ int j = i;
+ int nent = vcpu->arch.cpuid_nent;
e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
/* when no next entry is found, the current entry[i] is reselected */
- for (j = i + 1; ; j = (j + 1) % nent) {
- struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
- if (ej->function == e->function) {
- ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
- return j;
- }
- }
- return 0; /* silence gcc, even though control never reaches here */
+ do {
+ j = (j + 1) % nent;
+ ej = &vcpu->arch.cpuid_entries[j];
+ } while (ej->function != e->function);
+
+ ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+
+ return j;
}
/* find an entry with matching function, matching index (if needed), and that
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index c25cfaf584e7..80890dee66ce 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
}
+ ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
return X86EMUL_CONTINUE;
}
@@ -4173,7 +4174,7 @@ static int check_dr_write(struct x86_emulate_ctxt *ctxt)
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
- u64 efer;
+ u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index c329d2894905..d24c8742d9b0 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1495,8 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
static void cancel_hv_timer(struct kvm_lapic *apic)
{
+ preempt_disable();
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
apic->lapic_timer.hv_timer_in_use = false;
+ preempt_enable();
}
static bool start_hv_timer(struct kvm_lapic *apic)
@@ -1934,7 +1936,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
for (i = 0; i < KVM_APIC_LVT_NUM; i++)
kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
apic_update_lvtt(apic);
- if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
+ if (kvm_vcpu_is_reset_bsp(vcpu) &&
+ kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
kvm_lapic_set_reg(apic, APIC_LVT0,
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 558676538fca..cb8225969255 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1498,6 +1498,21 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
+/**
+ * kvm_arch_write_log_dirty - emulate dirty page logging
+ * @vcpu: Guest mode vcpu
+ *
+ * Emulate arch specific page modification logging for the
+ * nested hypervisor
+ */
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
+{
+ if (kvm_x86_ops->write_log_dirty)
+ return kvm_x86_ops->write_log_dirty(vcpu);
+
+ return 0;
+}
+
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn)
{
@@ -3683,12 +3698,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
}
-static bool can_do_async_pf(struct kvm_vcpu *vcpu)
+bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
{
if (unlikely(!lapic_in_kernel(vcpu) ||
kvm_event_needs_reinjection(vcpu)))
return false;
+ if (is_guest_mode(vcpu))
+ return false;
+
return kvm_x86_ops->interrupt_allowed(vcpu);
}
@@ -3704,7 +3722,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
if (!async)
return false; /* *pfn has correct page already */
- if (!prefault && can_do_async_pf(vcpu)) {
+ if (!prefault && kvm_can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(gva, gfn);
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
trace_kvm_async_pf_doublefault(gva, gfn);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index d8ccb32f7308..330bf3a811fb 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -76,6 +76,7 @@ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
bool accessed_dirty);
+bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{
@@ -202,4 +203,5 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn);
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
#endif
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 314d2071b337..b0454c7e4cff 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -226,6 +226,10 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
if (level == walker->level && write_fault &&
!(pte & PT_GUEST_DIRTY_MASK)) {
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
+#if PTTYPE == PTTYPE_EPT
+ if (kvm_arch_write_log_dirty(vcpu))
+ return -EINVAL;
+#endif
pte |= PT_GUEST_DIRTY_MASK;
}
if (pte == orig_pte)
@@ -279,11 +283,13 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
pt_element_t pte;
pt_element_t __user *uninitialized_var(ptep_user);
gfn_t table_gfn;
- unsigned index, pt_access, pte_access, accessed_dirty, pte_pkey;
+ u64 pt_access, pte_access;
+ unsigned index, accessed_dirty, pte_pkey;
unsigned nested_access;
gpa_t pte_gpa;
bool have_ad;
int offset;
+ u64 walk_nx_mask = 0;
const int write_fault = access & PFERR_WRITE_MASK;
const int user_fault = access & PFERR_USER_MASK;
const int fetch_fault = access & PFERR_FETCH_MASK;
@@ -298,6 +304,7 @@ retry_walk:
have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
#if PTTYPE == 64
+ walk_nx_mask = 1ULL << PT64_NX_SHIFT;
if (walker->level == PT32E_ROOT_LEVEL) {
pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
trace_kvm_mmu_paging_element(pte, walker->level);
@@ -309,8 +316,6 @@ retry_walk:
walker->max_level = walker->level;
ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
- accessed_dirty = have_ad ? PT_GUEST_ACCESSED_MASK : 0;
-
/*
* FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
* by the MOV to CR instruction are treated as reads and do not cause the
@@ -318,14 +323,14 @@ retry_walk:
*/
nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK;
- pt_access = pte_access = ACC_ALL;
+ pte_access = ~0;
++walker->level;
do {
gfn_t real_gfn;
unsigned long host_addr;
- pt_access &= pte_access;
+ pt_access = pte_access;
--walker->level;
index = PT_INDEX(addr, walker->level);
@@ -367,6 +372,12 @@ retry_walk:
trace_kvm_mmu_paging_element(pte, walker->level);
+ /*
+ * Inverting the NX it lets us AND it like other
+ * permission bits.
+ */
+ pte_access = pt_access & (pte ^ walk_nx_mask);
+
if (unlikely(!FNAME(is_present_gpte)(pte)))
goto error;
@@ -375,14 +386,16 @@ retry_walk:
goto error;
}
- accessed_dirty &= pte;
- pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
-
walker->ptes[walker->level - 1] = pte;
} while (!is_last_gpte(mmu, walker->level, pte));
pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
- errcode = permission_fault(vcpu, mmu, pte_access, pte_pkey, access);
+ accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
+
+ /* Convert to ACC_*_MASK flags for struct guest_walker. */
+ walker->pt_access = FNAME(gpte_access)(vcpu, pt_access ^ walk_nx_mask);
+ walker->pte_access = FNAME(gpte_access)(vcpu, pte_access ^ walk_nx_mask);
+ errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
if (unlikely(errcode))
goto error;
@@ -399,7 +412,7 @@ retry_walk:
walker->gfn = real_gpa >> PAGE_SHIFT;
if (!write_fault)
- FNAME(protect_clean_gpte)(mmu, &pte_access, pte);
+ FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
else
/*
* On a write fault, fold the dirty bit into accessed_dirty.
@@ -417,10 +430,8 @@ retry_walk:
goto retry_walk;
}
- walker->pt_access = pt_access;
- walker->pte_access = pte_access;
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
- __func__, (u64)pte, pte_access, pt_access);
+ __func__, (u64)pte, walker->pte_access, walker->pt_access);
return 1;
error:
@@ -448,7 +459,7 @@ error:
*/
if (!(errcode & PFERR_RSVD_MASK)) {
vcpu->arch.exit_qualification &= 0x187;
- vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3;
+ vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3;
}
#endif
walker->fault.address = addr;
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index 9d4a8504a95a..5ab4a364348e 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
((u64)1 << edx.split.bit_width_fixed) - 1;
}
- pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
+ pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu->global_ctrl_mask = ~pmu->global_ctrl;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c27ac6923a18..33460fcdeef9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -36,6 +36,7 @@
#include <linux/slab.h>
#include <linux/amd-iommu.h>
#include <linux/hashtable.h>
+#include <linux/frame.h>
#include <asm/apic.h>
#include <asm/perf_event.h>
@@ -1272,7 +1273,8 @@ static void init_vmcb(struct vcpu_svm *svm)
}
-static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, int index)
+static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
+ unsigned int index)
{
u64 *avic_physical_id_table;
struct kvm_arch *vm_data = &vcpu->kvm->arch;
@@ -1806,7 +1808,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
* AMD's VMCB does not have an explicit unusable field, so emulate it
* for cross vendor migration purposes by "not present"
*/
- var->unusable = !var->present || (var->type == 0);
+ var->unusable = !var->present;
switch (seg) {
case VCPU_SREG_TR:
@@ -1839,6 +1841,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
*/
if (var->unusable)
var->db = 0;
+ /* This is symmetric with svm_set_segment() */
var->dpl = to_svm(vcpu)->vmcb->save.cpl;
break;
}
@@ -1979,18 +1982,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
s->base = var->base;
s->limit = var->limit;
s->selector = var->selector;
- if (var->unusable)
- s->attrib = 0;
- else {
- s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
- s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
- s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
- s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
- s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
- s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
- s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
- s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
- }
+ s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
+ s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
+ s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
+ s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
+ s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
+ s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
+ s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
+ s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
/*
* This is always accurate, except if SYSRET returned to a segment
@@ -1999,7 +1998,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
* would entail passing the CPL to userspace and back.
*/
if (seg == VCPU_SREG_SS)
- svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
+ /* This is symmetric with svm_get_segment() */
+ svm->vmcb->save.cpl = (var->dpl & 3);
mark_dirty(svm->vmcb, VMCB_SEG);
}
@@ -4907,6 +4907,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
mark_all_clean(svm->vmcb);
}
+STACK_FRAME_NON_STANDARD(svm_vcpu_run);
static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c5fd459c4043..6dcc4873e435 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/tboot.h>
#include <linux/hrtimer.h>
+#include <linux/frame.h>
#include "kvm_cache_regs.h"
#include "x86.h"
@@ -48,6 +49,7 @@
#include <asm/kexec.h>
#include <asm/apic.h>
#include <asm/irq_remapping.h>
+#include <asm/mmu_context.h>
#include "trace.h"
#include "pmu.h"
@@ -248,6 +250,7 @@ struct __packed vmcs12 {
u64 xss_exit_bitmap;
u64 guest_physical_address;
u64 vmcs_link_pointer;
+ u64 pml_address;
u64 guest_ia32_debugctl;
u64 guest_ia32_pat;
u64 guest_ia32_efer;
@@ -369,6 +372,7 @@ struct __packed vmcs12 {
u16 guest_ldtr_selector;
u16 guest_tr_selector;
u16 guest_intr_status;
+ u16 guest_pml_index;
u16 host_es_selector;
u16 host_cs_selector;
u16 host_ss_selector;
@@ -407,6 +411,7 @@ struct nested_vmx {
/* Has the level1 guest done vmxon? */
bool vmxon;
gpa_t vmxon_ptr;
+ bool pml_full;
/* The guest-physical address of the current VMCS L1 keeps for L2 */
gpa_t current_vmptr;
@@ -593,6 +598,7 @@ struct vcpu_vmx {
int gs_ldt_reload_needed;
int fs_reload_needed;
u64 msr_host_bndcfgs;
+ unsigned long vmcs_host_cr3; /* May not match real cr3 */
unsigned long vmcs_host_cr4; /* May not match real cr4 */
} host_state;
struct {
@@ -742,6 +748,7 @@ static const unsigned short vmcs_field_to_offset_table[] = {
FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
FIELD(GUEST_INTR_STATUS, guest_intr_status),
+ FIELD(GUEST_PML_INDEX, guest_pml_index),
FIELD(HOST_ES_SELECTOR, host_es_selector),
FIELD(HOST_CS_SELECTOR, host_cs_selector),
FIELD(HOST_SS_SELECTOR, host_ss_selector),
@@ -767,6 +774,7 @@ static const unsigned short vmcs_field_to_offset_table[] = {
FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
+ FIELD64(PML_ADDRESS, pml_address),
FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
@@ -1314,6 +1322,11 @@ static inline bool report_flexpriority(void)
return flexpriority_enabled;
}
+static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
+{
+ return vmx_misc_cr3_count(to_vmx(vcpu)->nested.nested_vmx_misc_low);
+}
+
static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
{
return vmcs12->cpu_based_vm_exec_control & bit;
@@ -1348,6 +1361,11 @@ static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
vmx_xsaves_supported();
}
+static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
+}
+
static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
{
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
@@ -2410,7 +2428,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
if (!(vmcs12->exception_bitmap & (1u << nr)))
return 0;
- nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_readl(EXIT_QUALIFICATION));
return 1;
@@ -2751,8 +2769,11 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
VMX_EPT_1GB_PAGE_BIT;
- if (enable_ept_ad_bits)
+ if (enable_ept_ad_bits) {
+ vmx->nested.nested_vmx_secondary_ctls_high |=
+ SECONDARY_EXEC_ENABLE_PML;
vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
+ }
} else
vmx->nested.nested_vmx_ept_caps = 0;
@@ -4994,12 +5015,19 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
u32 low32, high32;
unsigned long tmpl;
struct desc_ptr dt;
- unsigned long cr0, cr4;
+ unsigned long cr0, cr3, cr4;
cr0 = read_cr0();
WARN_ON(cr0 & X86_CR0_TS);
vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */
- vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
+
+ /*
+ * Save the most likely value for this task's CR3 in the VMCS.
+ * We can't use __get_current_cr3_fast() because we're not atomic.
+ */
+ cr3 = __read_cr3();
+ vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
+ vmx->host_state.vmcs_host_cr3 = cr3;
/* Save the most likely value for this task's CR4 in the VMCS. */
cr4 = cr4_read_shadow();
@@ -6486,7 +6514,7 @@ static __init int hardware_setup(void)
enable_ept_ad_bits = 0;
}
- if (!cpu_has_vmx_ept_ad_bits())
+ if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
enable_ept_ad_bits = 0;
if (!cpu_has_vmx_unrestricted_guest())
@@ -6896,97 +6924,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
return 0;
}
-/*
- * This function performs the various checks including
- * - if it's 4KB aligned
- * - No bits beyond the physical address width are set
- * - Returns 0 on success or else 1
- * (Intel SDM Section 30.3)
- */
-static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
- gpa_t *vmpointer)
+static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
{
gva_t gva;
- gpa_t vmptr;
struct x86_exception e;
- struct page *page;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- int maxphyaddr = cpuid_maxphyaddr(vcpu);
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
return 1;
- if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
- sizeof(vmptr), &e)) {
+ if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
+ sizeof(*vmpointer), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
- switch (exit_reason) {
- case EXIT_REASON_VMON:
- /*
- * SDM 3: 24.11.5
- * The first 4 bytes of VMXON region contain the supported
- * VMCS revision identifier
- *
- * Note - IA32_VMX_BASIC[48] will never be 1
- * for the nested case;
- * which replaces physical address width with 32
- *
- */
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
- }
-
- page = nested_get_page(vcpu, vmptr);
- if (page == NULL) {
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
- }
- if (*(u32 *)kmap(page) != VMCS12_REVISION) {
- kunmap(page);
- nested_release_page_clean(page);
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
- }
- kunmap(page);
- nested_release_page_clean(page);
- vmx->nested.vmxon_ptr = vmptr;
- break;
- case EXIT_REASON_VMCLEAR:
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
- nested_vmx_failValid(vcpu,
- VMXERR_VMCLEAR_INVALID_ADDRESS);
- return kvm_skip_emulated_instruction(vcpu);
- }
-
- if (vmptr == vmx->nested.vmxon_ptr) {
- nested_vmx_failValid(vcpu,
- VMXERR_VMCLEAR_VMXON_POINTER);
- return kvm_skip_emulated_instruction(vcpu);
- }
- break;
- case EXIT_REASON_VMPTRLD:
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
- nested_vmx_failValid(vcpu,
- VMXERR_VMPTRLD_INVALID_ADDRESS);
- return kvm_skip_emulated_instruction(vcpu);
- }
-
- if (vmptr == vmx->nested.vmxon_ptr) {
- nested_vmx_failValid(vcpu,
- VMXERR_VMPTRLD_VMXON_POINTER);
- return kvm_skip_emulated_instruction(vcpu);
- }
- break;
- default:
- return 1; /* shouldn't happen */
- }
-
- if (vmpointer)
- *vmpointer = vmptr;
return 0;
}
@@ -7048,6 +7000,8 @@ out_msr_bitmap:
static int handle_vmon(struct kvm_vcpu *vcpu)
{
int ret;
+ gpa_t vmptr;
+ struct page *page;
struct vcpu_vmx *vmx = to_vmx(vcpu);
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -7077,9 +7031,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1;
}
- if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
+ if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
-
+
+ /*
+ * SDM 3: 24.11.5
+ * The first 4 bytes of VMXON region contain the supported
+ * VMCS revision identifier
+ *
+ * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
+ * which replaces physical address width with 32
+ */
+ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+ nested_vmx_failInvalid(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+
+ page = nested_get_page(vcpu, vmptr);
+ if (page == NULL) {
+ nested_vmx_failInvalid(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+ if (*(u32 *)kmap(page) != VMCS12_REVISION) {
+ kunmap(page);
+ nested_release_page_clean(page);
+ nested_vmx_failInvalid(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+ kunmap(page);
+ nested_release_page_clean(page);
+
+ vmx->nested.vmxon_ptr = vmptr;
ret = enter_vmx_operation(vcpu);
if (ret)
return ret;
@@ -7195,9 +7177,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
+ if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
+ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+ nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+
+ if (vmptr == vmx->nested.vmxon_ptr) {
+ nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+
if (vmptr == vmx->nested.current_vmptr)
nested_release_vmcs12(vmx);
@@ -7527,9 +7519,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
+ if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
+ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
+ nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+
+ if (vmptr == vmx->nested.vmxon_ptr) {
+ nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+
if (vmx->nested.current_vmptr != vmptr) {
struct vmcs12 *new_vmcs12;
struct page *page;
@@ -7895,11 +7897,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
int cr = exit_qualification & 15;
- int reg = (exit_qualification >> 8) & 15;
- unsigned long val = kvm_register_readl(vcpu, reg);
+ int reg;
+ unsigned long val;
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
+ reg = (exit_qualification >> 8) & 15;
+ val = kvm_register_readl(vcpu, reg);
switch (cr) {
case 0:
if (vmcs12->cr0_guest_host_mask &
@@ -7954,6 +7958,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
* lmsw can change bits 1..3 of cr0, and only set bit 0 of
* cr0. Other attempted changes are ignored, with no exit.
*/
+ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
if (vmcs12->cr0_guest_host_mask & 0xe &
(val ^ vmcs12->cr0_read_shadow))
return true;
@@ -8114,7 +8119,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case EXIT_REASON_PREEMPTION_TIMER:
return false;
case EXIT_REASON_PML_FULL:
- /* We don't expose PML support to L1. */
+ /* We emulate PML support to L1. */
return false;
default:
return true;
@@ -8657,6 +8662,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
);
}
}
+STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
static bool vmx_has_high_real_mode_segbase(void)
{
@@ -8825,7 +8831,7 @@ static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long debugctlmsr, cr4;
+ unsigned long debugctlmsr, cr3, cr4;
/* Don't enter VMX if guest state is invalid, let the exit handler
start emulation until we arrive back to a valid state */
@@ -8847,6 +8853,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+ cr3 = __get_current_cr3_fast();
+ if (unlikely(cr3 != vmx->host_state.vmcs_host_cr3)) {
+ vmcs_writel(HOST_CR3, cr3);
+ vmx->host_state.vmcs_host_cr3 = cr3;
+ }
+
cr4 = cr4_read_shadow();
if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
vmcs_writel(HOST_CR4, cr4);
@@ -9033,6 +9045,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx);
}
+STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
{
@@ -9364,13 +9377,20 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exit_reason;
+ unsigned long exit_qualification = vcpu->arch.exit_qualification;
- if (fault->error_code & PFERR_RSVD_MASK)
+ if (vmx->nested.pml_full) {
+ exit_reason = EXIT_REASON_PML_FULL;
+ vmx->nested.pml_full = false;
+ exit_qualification &= INTR_INFO_UNBLOCK_NMI;
+ } else if (fault->error_code & PFERR_RSVD_MASK)
exit_reason = EXIT_REASON_EPT_MISCONFIG;
else
exit_reason = EXIT_REASON_EPT_VIOLATION;
- nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification);
+
+ nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
vmcs12->guest_physical_address = fault->address;
}
@@ -9713,6 +9733,22 @@ static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
return 0;
}
+static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ u64 address = vmcs12->pml_address;
+ int maxphyaddr = cpuid_maxphyaddr(vcpu);
+
+ if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML)) {
+ if (!nested_cpu_has_ept(vmcs12) ||
+ !IS_ALIGNED(address, 4096) ||
+ address >> maxphyaddr)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
struct vmx_msr_entry *e)
{
@@ -9886,7 +9922,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
bool from_vmentry, u32 *entry_failure_code)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 exec_control;
+ u32 exec_control, vmcs12_exec_ctrl;
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
@@ -10017,8 +10053,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_APIC_REGISTER_VIRT);
if (nested_cpu_has(vmcs12,
- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
- exec_control |= vmcs12->secondary_vm_exec_control;
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
+ vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
+ ~SECONDARY_EXEC_ENABLE_PML;
+ exec_control |= vmcs12_exec_ctrl;
+ }
if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
vmcs_write64(EOI_EXIT_BITMAP0,
@@ -10248,6 +10287,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ if (nested_vmx_check_pml_controls(vcpu, vmcs12))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
vmx->nested.nested_vmx_procbased_ctls_low,
vmx->nested.nested_vmx_procbased_ctls_high) ||
@@ -10266,6 +10308,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmx->nested.nested_vmx_entry_ctls_high))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
!nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
!nested_cr3_valid(vcpu, vmcs12->host_cr3))
@@ -11143,6 +11188,46 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
kvm_flush_pml_buffers(kvm);
}
+static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ gpa_t gpa;
+ struct page *page = NULL;
+ u64 *pml_address;
+
+ if (is_guest_mode(vcpu)) {
+ WARN_ON_ONCE(vmx->nested.pml_full);
+
+ /*
+ * Check if PML is enabled for the nested guest.
+ * Whether eptp bit 6 is set is already checked
+ * as part of A/D emulation.
+ */
+ vmcs12 = get_vmcs12(vcpu);
+ if (!nested_cpu_has_pml(vmcs12))
+ return 0;
+
+ if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
+ vmx->nested.pml_full = true;
+ return 1;
+ }
+
+ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+
+ page = nested_get_page(vcpu, vmcs12->pml_address);
+ if (!page)
+ return 0;
+
+ pml_address = kmap(page);
+ pml_address[vmcs12->guest_pml_index--] = gpa;
+ kunmap(page);
+ nested_release_page_clean(page);
+ }
+
+ return 0;
+}
+
static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *memslot,
gfn_t offset, unsigned long mask)
@@ -11502,6 +11587,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.slot_disable_log_dirty = vmx_slot_disable_log_dirty,
.flush_log_dirty = vmx_flush_log_dirty,
.enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
+ .write_log_dirty = vmx_write_pml_buffer,
.pre_block = vmx_pre_block,
.post_block = vmx_post_block,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 464da936c53d..0e846f0cb83b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1763,6 +1763,7 @@ u64 get_kvmclock_ns(struct kvm *kvm)
{
struct kvm_arch *ka = &kvm->arch;
struct pvclock_vcpu_time_info hv_clock;
+ u64 ret;
spin_lock(&ka->pvclock_gtod_sync_lock);
if (!ka->use_master_clock) {
@@ -1774,10 +1775,17 @@ u64 get_kvmclock_ns(struct kvm *kvm)
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
spin_unlock(&ka->pvclock_gtod_sync_lock);
+ /* both __this_cpu_read() and rdtsc() should be on the same cpu */
+ get_cpu();
+
kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
&hv_clock.tsc_shift,
&hv_clock.tsc_to_system_mul);
- return __pvclock_read_cycles(&hv_clock, rdtsc());
+ ret = __pvclock_read_cycles(&hv_clock, rdtsc());
+
+ put_cpu();
+
+ return ret;
}
static void kvm_setup_pvclock_page(struct kvm_vcpu *v)
@@ -3288,11 +3296,14 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
}
}
+#define XSAVE_MXCSR_OFFSET 24
+
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
u64 xstate_bv =
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
+ u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
if (boot_cpu_has(X86_FEATURE_XSAVE)) {
/*
@@ -3300,11 +3311,13 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
* CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility
* with old userspace.
*/
- if (xstate_bv & ~kvm_supported_xcr0())
+ if (xstate_bv & ~kvm_supported_xcr0() ||
+ mxcsr & ~mxcsr_feature_mask)
return -EINVAL;
load_xsave(vcpu, (u8 *)guest_xsave->region);
} else {
- if (xstate_bv & ~XFEATURE_MASK_FPSSE)
+ if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
+ mxcsr & ~mxcsr_feature_mask)
return -EINVAL;
memcpy(&vcpu->arch.guest_fpu.state.fxsave,
guest_xsave->region, sizeof(struct fxregs_state));
@@ -4818,16 +4831,20 @@ emul_write:
static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
{
- /* TODO: String I/O for in kernel device */
- int r;
+ int r = 0, i;
- if (vcpu->arch.pio.in)
- r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
- vcpu->arch.pio.size, pd);
- else
- r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
- vcpu->arch.pio.port, vcpu->arch.pio.size,
- pd);
+ for (i = 0; i < vcpu->arch.pio.count; i++) {
+ if (vcpu->arch.pio.in)
+ r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
+ vcpu->arch.pio.size, pd);
+ else
+ r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
+ vcpu->arch.pio.port, vcpu->arch.pio.size,
+ pd);
+ if (r)
+ break;
+ pd += vcpu->arch.pio.size;
+ }
return r;
}
@@ -4865,6 +4882,8 @@ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
if (vcpu->arch.pio.count)
goto data_avail;
+ memset(vcpu->arch.pio_data, 0, size * count);
+
ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
if (ret) {
data_avail:
@@ -5048,6 +5067,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
if (var.unusable) {
memset(desc, 0, sizeof(*desc));
+ if (base3)
+ *base3 = 0;
return false;
}
@@ -5292,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
ctxt->eflags = kvm_get_rflags(vcpu);
+ ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
+
ctxt->eip = kvm_rip_read(vcpu);
ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
(ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
@@ -5507,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
return dr6;
}
-static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
+static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
{
struct kvm_run *kvm_run = vcpu->run;
- /*
- * rflags is the old, "raw" value of the flags. The new value has
- * not been saved yet.
- *
- * This is correct even for TF set by the guest, because "the
- * processor will not generate this exception after the instruction
- * that sets the TF flag".
- */
- if (unlikely(rflags & X86_EFLAGS_TF)) {
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
- kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
- DR6_RTM;
- kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
- kvm_run->debug.arch.exception = DB_VECTOR;
- kvm_run->exit_reason = KVM_EXIT_DEBUG;
- *r = EMULATE_USER_EXIT;
- } else {
- /*
- * "Certain debug exceptions may clear bit 0-3. The
- * remaining contents of the DR6 register are never
- * cleared by the processor".
- */
- vcpu->arch.dr6 &= ~15;
- vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
- kvm_queue_exception(vcpu, DB_VECTOR);
- }
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+ kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
+ kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
+ kvm_run->debug.arch.exception = DB_VECTOR;
+ kvm_run->exit_reason = KVM_EXIT_DEBUG;
+ *r = EMULATE_USER_EXIT;
+ } else {
+ /*
+ * "Certain debug exceptions may clear bit 0-3. The
+ * remaining contents of the DR6 register are never
+ * cleared by the processor".
+ */
+ vcpu->arch.dr6 &= ~15;
+ vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
+ kvm_queue_exception(vcpu, DB_VECTOR);
}
}
@@ -5546,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
int r = EMULATE_DONE;
kvm_x86_ops->skip_emulated_instruction(vcpu);
- kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+
+ /*
+ * rflags is the old, "raw" value of the flags. The new value has
+ * not been saved yet.
+ *
+ * This is correct even for TF set by the guest, because "the
+ * processor will not generate this exception after the instruction
+ * that sets the TF flag".
+ */
+ if (unlikely(rflags & X86_EFLAGS_TF))
+ kvm_vcpu_do_singlestep(vcpu, &r);
return r == EMULATE_DONE;
}
EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
@@ -5705,8 +5727,9 @@ restart:
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
- if (r == EMULATE_DONE)
- kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+ if (r == EMULATE_DONE &&
+ (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+ kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
__kvm_set_rflags(vcpu, ctxt->eflags);
@@ -8373,10 +8396,13 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
if (vcpu->arch.pv.pv_unhalted)
return true;
- if (atomic_read(&vcpu->arch.nmi_queued))
+ if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+ (vcpu->arch.nmi_pending &&
+ kvm_x86_ops->nmi_allowed(vcpu)))
return true;
- if (kvm_test_request(KVM_REQ_SMI, vcpu))
+ if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
+ (vcpu->arch.smi_pending && !is_smm(vcpu)))
return true;
if (kvm_arch_interrupt_allowed(vcpu) &&
@@ -8583,8 +8609,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
return true;
else
- return !kvm_event_needs_reinjection(vcpu) &&
- kvm_x86_ops->interrupt_allowed(vcpu);
+ return kvm_can_do_async_pf(vcpu);
}
void kvm_arch_start_assignment(struct kvm *kvm)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index c5959576c315..020f75cc8cf6 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -37,7 +37,7 @@ ENTRY(copy_user_generic_unrolled)
movl %edx,%ecx
andl $63,%edx
shrl $6,%ecx
- jz 17f
+ jz .L_copy_short_string
1: movq (%rsi),%r8
2: movq 1*8(%rsi),%r9
3: movq 2*8(%rsi),%r10
@@ -58,7 +58,8 @@ ENTRY(copy_user_generic_unrolled)
leaq 64(%rdi),%rdi
decl %ecx
jnz 1b
-17: movl %edx,%ecx
+.L_copy_short_string:
+ movl %edx,%ecx
andl $7,%edx
shrl $3,%ecx
jz 20f
@@ -174,6 +175,8 @@ EXPORT_SYMBOL(copy_user_generic_string)
*/
ENTRY(copy_user_enhanced_fast_string)
ASM_STAC
+ cmpl $64,%edx
+ jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
movl %edx,%ecx
1: rep
movsb
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 7e48807b2fa1..45a53dfe1859 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -55,7 +55,7 @@ ENTRY(csum_partial_copy_generic)
movq %r12, 3*8(%rsp)
movq %r14, 4*8(%rsp)
movq %r13, 5*8(%rsp)
- movq %rbp, 6*8(%rsp)
+ movq %r15, 6*8(%rsp)
movq %r8, (%rsp)
movq %r9, 1*8(%rsp)
@@ -74,7 +74,7 @@ ENTRY(csum_partial_copy_generic)
/* main loop. clear in 64 byte blocks */
/* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
/* r11: temp3, rdx: temp4, r12 loopcnt */
- /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
+ /* r10: temp5, r15: temp6, r14 temp7, r13 temp8 */
.p2align 4
.Lloop:
source
@@ -89,7 +89,7 @@ ENTRY(csum_partial_copy_generic)
source
movq 32(%rdi), %r10
source
- movq 40(%rdi), %rbp
+ movq 40(%rdi), %r15
source
movq 48(%rdi), %r14
source
@@ -103,7 +103,7 @@ ENTRY(csum_partial_copy_generic)
adcq %r11, %rax
adcq %rdx, %rax
adcq %r10, %rax
- adcq %rbp, %rax
+ adcq %r15, %rax
adcq %r14, %rax
adcq %r13, %rax
@@ -121,7 +121,7 @@ ENTRY(csum_partial_copy_generic)
dest
movq %r10, 32(%rsi)
dest
- movq %rbp, 40(%rsi)
+ movq %r15, 40(%rsi)
dest
movq %r14, 48(%rsi)
dest
@@ -203,7 +203,7 @@ ENTRY(csum_partial_copy_generic)
movq 3*8(%rsp), %r12
movq 4*8(%rsp), %r14
movq 5*8(%rsp), %r13
- movq 6*8(%rsp), %rbp
+ movq 6*8(%rsp), %r15
addq $7*8, %rsp
ret
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
index 5761a4f19455..ab2d1d73e9e7 100644
--- a/arch/x86/lib/kaslr.c
+++ b/arch/x86/lib/kaslr.c
@@ -5,6 +5,7 @@
* kernel starts. This file is included in the compressed kernel and
* normally linked in the regular.
*/
+#include <asm/asm.h>
#include <asm/kaslr.h>
#include <asm/msr.h>
#include <asm/archrandom.h>
@@ -79,7 +80,7 @@ unsigned long kaslr_get_random_long(const char *purpose)
}
/* Circular multiply for better bit diffusion */
- asm("mul %3"
+ asm(_ASM_MUL "%3"
: "=a" (random), "=d" (raw)
: "a" (random), "rm" (mix_const));
random += raw;
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index c81556409bbb..10ffa7e8519f 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -13,14 +13,14 @@
.macro op_safe_regs op
ENTRY(\op\()_safe_regs)
pushq %rbx
- pushq %rbp
+ pushq %r12
movq %rdi, %r10 /* Save pointer */
xorl %r11d, %r11d /* Return value */
movl (%rdi), %eax
movl 4(%rdi), %ecx
movl 8(%rdi), %edx
movl 12(%rdi), %ebx
- movl 20(%rdi), %ebp
+ movl 20(%rdi), %r12d
movl 24(%rdi), %esi
movl 28(%rdi), %edi
1: \op
@@ -29,10 +29,10 @@ ENTRY(\op\()_safe_regs)
movl %ecx, 4(%r10)
movl %edx, 8(%r10)
movl %ebx, 12(%r10)
- movl %ebp, 20(%r10)
+ movl %r12d, 20(%r10)
movl %esi, 24(%r10)
movl %edi, 28(%r10)
- popq %rbp
+ popq %r12
popq %rbx
ret
3:
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 767be7c76034..12e377184ee4 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -1009,7 +1009,7 @@ GrpTable: Grp15
1: fxstor | RDGSBASE Ry (F3),(11B)
2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
-4: XSAVE
+4: XSAVE | ptwrite Ey (F3),(11B)
5: XRSTOR | lfence (11B)
6: XSAVEOPT | clwb (66) | mfence (11B)
7: clflush | clflushopt (66) | sfence (11B)
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 5e044d506b7a..a179254a5122 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -27,7 +27,7 @@ static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
#ifdef CONFIG_MODIFY_LDT_SYSCALL
seg >>= 3;
mutex_lock(&current->mm->context.lock);
- if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
+ if (current->mm->context.ldt && seg < current->mm->context.ldt->nr_entries)
ret = current->mm->context.ldt->entries[seg];
mutex_unlock(&current->mm->context.lock);
#endif
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 96d2b847e09e..0fbdcb64f9f8 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -2,7 +2,7 @@
KCOV_INSTRUMENT_tlb.o := n
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
- pat.o pgtable.o physaddr.o gup.o setup_nx.o tlb.o
+ pat.o pgtable.o physaddr.o setup_nx.o tlb.o
# Make sure __phys_addr has no stackprotector
nostackp := $(call cc-option, -fno-stack-protector)
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index bce6990b1d81..0470826d2bdc 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -431,7 +431,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
bool checkwx)
{
#ifdef CONFIG_X86_64
- pgd_t *start = (pgd_t *) &init_level4_pgt;
+ pgd_t *start = (pgd_t *) &init_top_pgt;
#else
pgd_t *start = swapper_pg_dir;
#endif
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 35ea061010a1..0ea8afcb929c 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -162,6 +162,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
if (fixup_exception(regs, trapnr))
return;
+ if (fixup_bug(regs, trapnr))
+ return;
+
fail:
early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
(unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 8ad91a01cbc8..2a1fa10c6a98 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -346,7 +346,7 @@ static noinline int vmalloc_fault(unsigned long address)
* Do _not_ use "current" here. We might be inside
* an interrupt in the middle of a task switch..
*/
- pgd_paddr = read_cr3();
+ pgd_paddr = read_cr3_pa();
pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
if (!pmd_k)
return -1;
@@ -388,7 +388,7 @@ static bool low_pfn(unsigned long pfn)
static void dump_pagetable(unsigned long address)
{
- pgd_t *base = __va(read_cr3());
+ pgd_t *base = __va(read_cr3_pa());
pgd_t *pgd = &base[pgd_index(address)];
p4d_t *p4d;
pud_t *pud;
@@ -451,7 +451,7 @@ static noinline int vmalloc_fault(unsigned long address)
* happen within a race in page table update. In the later
* case just flush:
*/
- pgd = (pgd_t *)__va(read_cr3()) + pgd_index(address);
+ pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
pgd_ref = pgd_offset_k(address);
if (pgd_none(*pgd_ref))
return -1;
@@ -555,7 +555,7 @@ static int bad_address(void *p)
static void dump_pagetable(unsigned long address)
{
- pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
+ pgd_t *base = __va(read_cr3_pa());
pgd_t *pgd = base + pgd_index(address);
p4d_t *p4d;
pud_t *pud;
@@ -700,7 +700,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
pgd_t *pgd;
pte_t *pte;
- pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
+ pgd = __va(read_cr3_pa());
pgd += pgd_index(address);
pte = lookup_address_in_pgd(pgd, address, &level);
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
deleted file mode 100644
index 456dfdfd2249..000000000000
--- a/arch/x86/mm/gup.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * Lockless get_user_pages_fast for x86
- *
- * Copyright (C) 2008 Nick Piggin
- * Copyright (C) 2008 Novell Inc.
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/vmstat.h>
-#include <linux/highmem.h>
-#include <linux/swap.h>
-#include <linux/memremap.h>
-
-#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
-
-static inline pte_t gup_get_pte(pte_t *ptep)
-{
-#ifndef CONFIG_X86_PAE
- return READ_ONCE(*ptep);
-#else
- /*
- * With get_user_pages_fast, we walk down the pagetables without taking
- * any locks. For this we would like to load the pointers atomically,
- * but that is not possible (without expensive cmpxchg8b) on PAE. What
- * we do have is the guarantee that a pte will only either go from not
- * present to present, or present to not present or both -- it will not
- * switch to a completely different present page without a TLB flush in
- * between; something that we are blocking by holding interrupts off.
- *
- * Setting ptes from not present to present goes:
- * ptep->pte_high = h;
- * smp_wmb();
- * ptep->pte_low = l;
- *
- * And present to not present goes:
- * ptep->pte_low = 0;
- * smp_wmb();
- * ptep->pte_high = 0;
- *
- * We must ensure here that the load of pte_low sees l iff pte_high
- * sees h. We load pte_high *after* loading pte_low, which ensures we
- * don't see an older value of pte_high. *Then* we recheck pte_low,
- * which ensures that we haven't picked up a changed pte high. We might
- * have got rubbish values from pte_low and pte_high, but we are
- * guaranteed that pte_low will not have the present bit set *unless*
- * it is 'l'. And get_user_pages_fast only operates on present ptes, so
- * we're safe.
- *
- * gup_get_pte should not be used or copied outside gup.c without being
- * very careful -- it does not atomically load the pte or anything that
- * is likely to be useful for you.
- */
- pte_t pte;
-
-retry:
- pte.pte_low = ptep->pte_low;
- smp_rmb();
- pte.pte_high = ptep->pte_high;
- smp_rmb();
- if (unlikely(pte.pte_low != ptep->pte_low))
- goto retry;
-
- return pte;
-#endif
-}
-
-static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
-{
- while ((*nr) - nr_start) {
- struct page *page = pages[--(*nr)];
-
- ClearPageReferenced(page);
- put_page(page);
- }
-}
-
-/*
- * 'pteval' can come from a pte, pmd, pud or p4d. We only check
- * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
- * same value on all 4 types.
- */
-static inline int pte_allows_gup(unsigned long pteval, int write)
-{
- unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
-
- if (write)
- need_pte_bits |= _PAGE_RW;
-
- if ((pteval & need_pte_bits) != need_pte_bits)
- return 0;
-
- /* Check memory protection keys permissions. */
- if (!__pkru_allows_pkey(pte_flags_pkey(pteval), write))
- return 0;
-
- return 1;
-}
-
-/*
- * The performance critical leaf functions are made noinline otherwise gcc
- * inlines everything into a single function which results in too much
- * register pressure.
- */
-static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- struct dev_pagemap *pgmap = NULL;
- int nr_start = *nr, ret = 0;
- pte_t *ptep, *ptem;
-
- /*
- * Keep the original mapped PTE value (ptem) around since we
- * might increment ptep off the end of the page when finishing
- * our loop iteration.
- */
- ptem = ptep = pte_offset_map(&pmd, addr);
- do {
- pte_t pte = gup_get_pte(ptep);
- struct page *page;
-
- /* Similar to the PMD case, NUMA hinting must take slow path */
- if (pte_protnone(pte))
- break;
-
- if (!pte_allows_gup(pte_val(pte), write))
- break;
-
- if (pte_devmap(pte)) {
- pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
- if (unlikely(!pgmap)) {
- undo_dev_pagemap(nr, nr_start, pages);
- break;
- }
- } else if (pte_special(pte))
- break;
-
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
- page = pte_page(pte);
- get_page(page);
- put_dev_pagemap(pgmap);
- SetPageReferenced(page);
- pages[*nr] = page;
- (*nr)++;
-
- } while (ptep++, addr += PAGE_SIZE, addr != end);
- if (addr == end)
- ret = 1;
- pte_unmap(ptem);
-
- return ret;
-}
-
-static inline void get_head_page_multiple(struct page *page, int nr)
-{
- VM_BUG_ON_PAGE(page != compound_head(page), page);
- VM_BUG_ON_PAGE(page_count(page) == 0, page);
- page_ref_add(page, nr);
- SetPageReferenced(page);
-}
-
-static int __gup_device_huge(unsigned long pfn, unsigned long addr,
- unsigned long end, struct page **pages, int *nr)
-{
- int nr_start = *nr;
- struct dev_pagemap *pgmap = NULL;
-
- do {
- struct page *page = pfn_to_page(pfn);
-
- pgmap = get_dev_pagemap(pfn, pgmap);
- if (unlikely(!pgmap)) {
- undo_dev_pagemap(nr, nr_start, pages);
- return 0;
- }
- SetPageReferenced(page);
- pages[*nr] = page;
- get_page(page);
- put_dev_pagemap(pgmap);
- (*nr)++;
- pfn++;
- } while (addr += PAGE_SIZE, addr != end);
- return 1;
-}
-
-static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
- unsigned long end, struct page **pages, int *nr)
-{
- unsigned long fault_pfn;
-
- fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- return __gup_device_huge(fault_pfn, addr, end, pages, nr);
-}
-
-static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
- unsigned long end, struct page **pages, int *nr)
-{
- unsigned long fault_pfn;
-
- fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
- return __gup_device_huge(fault_pfn, addr, end, pages, nr);
-}
-
-static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- struct page *head, *page;
- int refs;
-
- if (!pte_allows_gup(pmd_val(pmd), write))
- return 0;
-
- VM_BUG_ON(!pfn_valid(pmd_pfn(pmd)));
- if (pmd_devmap(pmd))
- return __gup_device_huge_pmd(pmd, addr, end, pages, nr);
-
- /* hugepages are never "special" */
- VM_BUG_ON(pmd_flags(pmd) & _PAGE_SPECIAL);
-
- refs = 0;
- head = pmd_page(pmd);
- page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- do {
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
- get_head_page_multiple(head, refs);
-
- return 1;
-}
-
-static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pmd_t *pmdp;
-
- pmdp = pmd_offset(&pud, addr);
- do {
- pmd_t pmd = *pmdp;
-
- next = pmd_addr_end(addr, end);
- if (pmd_none(pmd))
- return 0;
- if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) {
- /*
- * NUMA hinting faults need to be handled in the GUP
- * slowpath for accounting purposes and so that they
- * can be serialised against THP migration.
- */
- if (pmd_protnone(pmd))
- return 0;
- if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
- return 0;
- } else {
- if (!gup_pte_range(pmd, addr, next, write, pages, nr))
- return 0;
- }
- } while (pmdp++, addr = next, addr != end);
-
- return 1;
-}
-
-static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- struct page *head, *page;
- int refs;
-
- if (!pte_allows_gup(pud_val(pud), write))
- return 0;
-
- VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
- if (pud_devmap(pud))
- return __gup_device_huge_pud(pud, addr, end, pages, nr);
-
- /* hugepages are never "special" */
- VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL);
-
- refs = 0;
- head = pud_page(pud);
- page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
- do {
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
- get_head_page_multiple(head, refs);
-
- return 1;
-}
-
-static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pud_t *pudp;
-
- pudp = pud_offset(&p4d, addr);
- do {
- pud_t pud = *pudp;
-
- next = pud_addr_end(addr, end);
- if (pud_none(pud))
- return 0;
- if (unlikely(pud_large(pud))) {
- if (!gup_huge_pud(pud, addr, next, write, pages, nr))
- return 0;
- } else {
- if (!gup_pmd_range(pud, addr, next, write, pages, nr))
- return 0;
- }
- } while (pudp++, addr = next, addr != end);
-
- return 1;
-}
-
-static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- unsigned long next;
- p4d_t *p4dp;
-
- p4dp = p4d_offset(&pgd, addr);
- do {
- p4d_t p4d = *p4dp;
-
- next = p4d_addr_end(addr, end);
- if (p4d_none(p4d))
- return 0;
- BUILD_BUG_ON(p4d_large(p4d));
- if (!gup_pud_range(p4d, addr, next, write, pages, nr))
- return 0;
- } while (p4dp++, addr = next, addr != end);
-
- return 1;
-}
-
-/*
- * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
- * back to the regular GUP.
- */
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- unsigned long flags;
- pgd_t *pgdp;
- int nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
- (void __user *)start, len)))
- return 0;
-
- /*
- * XXX: batch / limit 'nr', to avoid large irq off latency
- * needs some instrumenting to determine the common sizes used by
- * important workloads (eg. DB2), and whether limiting the batch size
- * will decrease performance.
- *
- * It seems like we're in the clear for the moment. Direct-IO is
- * the main guy that batches up lots of get_user_pages, and even
- * they are limited to 64-at-a-time which is not so many.
- */
- /*
- * This doesn't prevent pagetable teardown, but does prevent
- * the pagetables and pages from being freed on x86.
- *
- * So long as we atomically load page table pointers versus teardown
- * (which we do on x86, with the above PAE exception), we can follow the
- * address down to the the page and take a ref on it.
- */
- local_irq_save(flags);
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- break;
- if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
- break;
- } while (pgdp++, addr = next, addr != end);
- local_irq_restore(flags);
-
- return nr;
-}
-
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start: starting user address
- * @nr_pages: number of pages from start to pin
- * @write: whether pages will be written to
- * @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
- *
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
- * If not successful, it will fall back to taking the lock and
- * calling get_user_pages().
- *
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- */
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next;
- pgd_t *pgdp;
- int nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
-
- end = start + len;
- if (end < start)
- goto slow_irqon;
-
-#ifdef CONFIG_X86_64
- if (end >> __VIRTUAL_MASK_SHIFT)
- goto slow_irqon;
-#endif
-
- /*
- * XXX: batch / limit 'nr', to avoid large irq off latency
- * needs some instrumenting to determine the common sizes used by
- * important workloads (eg. DB2), and whether limiting the batch size
- * will decrease performance.
- *
- * It seems like we're in the clear for the moment. Direct-IO is
- * the main guy that batches up lots of get_user_pages, and even
- * they are limited to 64-at-a-time which is not so many.
- */
- /*
- * This doesn't prevent pagetable teardown, but does prevent
- * the pagetables and pages from being freed on x86.
- *
- * So long as we atomically load page table pointers versus teardown
- * (which we do on x86, with the above PAE exception), we can follow the
- * address down to the the page and take a ref on it.
- */
- local_irq_disable();
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = *pgdp;
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- goto slow;
- if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
- goto slow;
- } while (pgdp++, addr = next, addr != end);
- local_irq_enable();
-
- VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
- return nr;
-
- {
- int ret;
-
-slow:
- local_irq_enable();
-slow_irqon:
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
-
- ret = get_user_pages_unlocked(start,
- (end - start) >> PAGE_SHIFT,
- pages, write ? FOLL_WRITE : 0);
-
- /* Have to be a bit careful with return values */
- if (nr > 0) {
- if (ret < 0)
- ret = nr;
- else
- ret += nr;
- }
-
- return ret;
- }
-}
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 302f43fd9c28..adad702b39cd 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -148,7 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index 04210a29dd60..adab1595f4bd 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -13,7 +13,7 @@ static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
if (pmd_present(*pmd))
continue;
- set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
+ set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
}
}
@@ -30,6 +30,18 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
if (next > end)
next = end;
+ if (info->direct_gbpages) {
+ pud_t pudval;
+
+ if (pud_present(*pud))
+ continue;
+
+ addr &= PUD_MASK;
+ pudval = __pud((addr - info->offset) | info->page_flag);
+ set_pud(pud, pudval);
+ continue;
+ }
+
if (pud_present(*pud)) {
pmd = pmd_offset(pud, 0);
ident_pmd_init(info, pmd, addr, next);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index cbc87ea98751..673541eb3b3f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -161,16 +161,16 @@ static int page_size_mask;
static void __init probe_page_size_mask(void)
{
-#if !defined(CONFIG_KMEMCHECK)
/*
* For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
* use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
*/
- if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
+ if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK))
page_size_mask |= 1 << PG_LEVEL_2M;
-#endif
+ else
+ direct_gbpages = 0;
/* Enable PSE if available */
if (boot_cpu_has(X86_FEATURE_PSE))
@@ -811,10 +811,8 @@ void __init zone_sizes_init(void)
}
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
-#ifdef CONFIG_SMP
- .active_mm = &init_mm,
+ .loaded_mm = &init_mm,
.state = 0,
-#endif
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
};
EXPORT_SYMBOL_GPL(cpu_tlbstate);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 41270b96403d..dae6a5e5ad4a 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -92,12 +92,50 @@ __setup("noexec32=", nonx32_setup);
* When memory was added make sure all the processes MM have
* suitable PGD entries in the local PGD level page.
*/
+#ifdef CONFIG_X86_5LEVEL
void sync_global_pgds(unsigned long start, unsigned long end)
{
- unsigned long address;
+ unsigned long addr;
+
+ for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
+ const pgd_t *pgd_ref = pgd_offset_k(addr);
+ struct page *page;
+
+ /* Check for overflow */
+ if (addr < start)
+ break;
+
+ if (pgd_none(*pgd_ref))
+ continue;
+
+ spin_lock(&pgd_lock);
+ list_for_each_entry(page, &pgd_list, lru) {
+ pgd_t *pgd;
+ spinlock_t *pgt_lock;
+
+ pgd = (pgd_t *)page_address(page) + pgd_index(addr);
+ /* the pgt_lock only for Xen */
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+ spin_lock(pgt_lock);
+
+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
- for (address = start; address <= end; address += PGDIR_SIZE) {
- pgd_t *pgd_ref = pgd_offset_k(address);
+ spin_unlock(pgt_lock);
+ }
+ spin_unlock(&pgd_lock);
+ }
+}
+#else
+void sync_global_pgds(unsigned long start, unsigned long end)
+{
+ unsigned long addr;
+
+ for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
+ pgd_t *pgd_ref = pgd_offset_k(addr);
const p4d_t *p4d_ref;
struct page *page;
@@ -106,7 +144,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
* handle synchonization on p4d level.
*/
BUILD_BUG_ON(pgd_none(*pgd_ref));
- p4d_ref = p4d_offset(pgd_ref, address);
+ p4d_ref = p4d_offset(pgd_ref, addr);
if (p4d_none(*p4d_ref))
continue;
@@ -117,8 +155,8 @@ void sync_global_pgds(unsigned long start, unsigned long end)
p4d_t *p4d;
spinlock_t *pgt_lock;
- pgd = (pgd_t *)page_address(page) + pgd_index(address);
- p4d = p4d_offset(pgd, address);
+ pgd = (pgd_t *)page_address(page) + pgd_index(addr);
+ p4d = p4d_offset(pgd, addr);
/* the pgt_lock only for Xen */
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);
@@ -135,6 +173,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
spin_unlock(&pgd_lock);
}
}
+#endif
/*
* NOTE: This function is marked __ref because it calls __init function
@@ -585,6 +624,57 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
return paddr_last;
}
+static unsigned long __meminit
+phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
+ unsigned long page_size_mask)
+{
+ unsigned long paddr_next, paddr_last = paddr_end;
+ unsigned long vaddr = (unsigned long)__va(paddr);
+ int i = p4d_index(vaddr);
+
+ if (!IS_ENABLED(CONFIG_X86_5LEVEL))
+ return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask);
+
+ for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) {
+ p4d_t *p4d;
+ pud_t *pud;
+
+ vaddr = (unsigned long)__va(paddr);
+ p4d = p4d_page + p4d_index(vaddr);
+ paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
+
+ if (paddr >= paddr_end) {
+ if (!after_bootmem &&
+ !e820__mapped_any(paddr & P4D_MASK, paddr_next,
+ E820_TYPE_RAM) &&
+ !e820__mapped_any(paddr & P4D_MASK, paddr_next,
+ E820_TYPE_RESERVED_KERN))
+ set_p4d(p4d, __p4d(0));
+ continue;
+ }
+
+ if (!p4d_none(*p4d)) {
+ pud = pud_offset(p4d, 0);
+ paddr_last = phys_pud_init(pud, paddr,
+ paddr_end,
+ page_size_mask);
+ __flush_tlb_all();
+ continue;
+ }
+
+ pud = alloc_low_page();
+ paddr_last = phys_pud_init(pud, paddr, paddr_end,
+ page_size_mask);
+
+ spin_lock(&init_mm.page_table_lock);
+ p4d_populate(&init_mm, p4d, pud);
+ spin_unlock(&init_mm.page_table_lock);
+ }
+ __flush_tlb_all();
+
+ return paddr_last;
+}
+
/*
* Create page table mapping for the physical memory for specific physical
* addresses. The virtual and physical addresses have to be aligned on PMD level
@@ -606,26 +696,26 @@ kernel_physical_mapping_init(unsigned long paddr_start,
for (; vaddr < vaddr_end; vaddr = vaddr_next) {
pgd_t *pgd = pgd_offset_k(vaddr);
p4d_t *p4d;
- pud_t *pud;
vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
- BUILD_BUG_ON(pgd_none(*pgd));
- p4d = p4d_offset(pgd, vaddr);
- if (p4d_val(*p4d)) {
- pud = (pud_t *)p4d_page_vaddr(*p4d);
- paddr_last = phys_pud_init(pud, __pa(vaddr),
+ if (pgd_val(*pgd)) {
+ p4d = (p4d_t *)pgd_page_vaddr(*pgd);
+ paddr_last = phys_p4d_init(p4d, __pa(vaddr),
__pa(vaddr_end),
page_size_mask);
continue;
}
- pud = alloc_low_page();
- paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
+ p4d = alloc_low_page();
+ paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
page_size_mask);
spin_lock(&init_mm.page_table_lock);
- p4d_populate(&init_mm, p4d, pud);
+ if (IS_ENABLED(CONFIG_X86_5LEVEL))
+ pgd_populate(&init_mm, pgd, p4d);
+ else
+ p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
spin_unlock(&init_mm.page_table_lock);
pgd_changed = true;
}
@@ -990,7 +1080,13 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
pud_base = pud_offset(p4d, 0);
remove_pud_table(pud_base, addr, next, direct);
- free_pud_table(pud_base, p4d);
+ /*
+ * For 4-level page tables we do not want to free PUDs, but in the
+ * 5-level case we should free them. This code will have to change
+ * to adapt for boot-time switching between 4 and 5 level page tables.
+ */
+ if (CONFIG_PGTABLE_LEVELS == 5)
+ free_pud_table(pud_base, p4d);
}
if (direct)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index bbc558b88a88..4c1b5fd0c7ad 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -424,7 +424,7 @@ static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
{
/* Don't assume we're using swapper_pg_dir at this point */
- pgd_t *base = __va(read_cr3());
+ pgd_t *base = __va(read_cr3_pa());
pgd_t *pgd = &base[pgd_index(addr)];
p4d_t *p4d = p4d_offset(pgd, addr);
pud_t *pud = pud_offset(p4d, addr);
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 0c7d8129bed6..88215ac16b24 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -12,7 +12,7 @@
#include <asm/tlbflush.h>
#include <asm/sections.h>
-extern pgd_t early_level4_pgt[PTRS_PER_PGD];
+extern pgd_t early_top_pgt[PTRS_PER_PGD];
extern struct range pfn_mapped[E820_MAX_ENTRIES];
static int __init map_range(struct range *range)
@@ -109,8 +109,8 @@ void __init kasan_early_init(void)
for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
kasan_zero_p4d[i] = __p4d(p4d_val);
- kasan_map_early_shadow(early_level4_pgt);
- kasan_map_early_shadow(init_level4_pgt);
+ kasan_map_early_shadow(early_top_pgt);
+ kasan_map_early_shadow(init_top_pgt);
}
void __init kasan_init(void)
@@ -121,8 +121,8 @@ void __init kasan_init(void)
register_die_notifier(&kasan_die_notifier);
#endif
- memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
- load_cr3(early_level4_pgt);
+ memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
+ load_cr3(early_top_pgt);
__flush_tlb_all();
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
@@ -148,7 +148,7 @@ void __init kasan_init(void)
kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
(void *)KASAN_SHADOW_END);
- load_cr3(init_level4_pgt);
+ load_cr3(init_top_pgt);
__flush_tlb_all();
/*
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index aed206475aa7..af599167fe3c 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -6,12 +6,12 @@
*
* Entropy is generated using the KASLR early boot functions now shared in
* the lib directory (originally written by Kees Cook). Randomization is
- * done on PGD & PUD page table levels to increase possible addresses. The
- * physical memory mapping code was adapted to support PUD level virtual
- * addresses. This implementation on the best configuration provides 30,000
- * possible virtual addresses in average for each memory region. An additional
- * low memory page is used to ensure each CPU can start with a PGD aligned
- * virtual address (for realmode).
+ * done on PGD & P4D/PUD page table levels to increase possible addresses.
+ * The physical memory mapping code was adapted to support P4D/PUD level
+ * virtual addresses. This implementation on the best configuration provides
+ * 30,000 possible virtual addresses in average for each memory region.
+ * An additional low memory page is used to ensure each CPU can start with
+ * a PGD aligned virtual address (for realmode).
*
* The order of each memory region is not changed. The feature looks at
* the available space for the regions based on different configuration
@@ -70,7 +70,7 @@ static __initdata struct kaslr_memory_region {
unsigned long *base;
unsigned long size_tb;
} kaslr_regions[] = {
- { &page_offset_base, 64/* Maximum */ },
+ { &page_offset_base, 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT) /* Maximum */ },
{ &vmalloc_base, VMALLOC_SIZE_TB },
{ &vmemmap_base, 1 },
};
@@ -142,7 +142,10 @@ void __init kernel_randomize_memory(void)
*/
entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
prandom_bytes_state(&rand_state, &rand, sizeof(rand));
- entropy = (rand % (entropy + 1)) & PUD_MASK;
+ if (IS_ENABLED(CONFIG_X86_5LEVEL))
+ entropy = (rand % (entropy + 1)) & P4D_MASK;
+ else
+ entropy = (rand % (entropy + 1)) & PUD_MASK;
vaddr += entropy;
*kaslr_regions[i].base = vaddr;
@@ -151,27 +154,21 @@ void __init kernel_randomize_memory(void)
* randomization alignment.
*/
vaddr += get_padding(&kaslr_regions[i]);
- vaddr = round_up(vaddr + 1, PUD_SIZE);
+ if (IS_ENABLED(CONFIG_X86_5LEVEL))
+ vaddr = round_up(vaddr + 1, P4D_SIZE);
+ else
+ vaddr = round_up(vaddr + 1, PUD_SIZE);
remain_entropy -= entropy;
}
}
-/*
- * Create PGD aligned trampoline table to allow real mode initialization
- * of additional CPUs. Consume only 1 low memory page.
- */
-void __meminit init_trampoline(void)
+static void __meminit init_trampoline_pud(void)
{
unsigned long paddr, paddr_next;
pgd_t *pgd;
pud_t *pud_page, *pud_page_tramp;
int i;
- if (!kaslr_memory_enabled()) {
- init_trampoline_default();
- return;
- }
-
pud_page_tramp = alloc_low_page();
paddr = 0;
@@ -192,3 +189,49 @@ void __meminit init_trampoline(void)
set_pgd(&trampoline_pgd_entry,
__pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
}
+
+static void __meminit init_trampoline_p4d(void)
+{
+ unsigned long paddr, paddr_next;
+ pgd_t *pgd;
+ p4d_t *p4d_page, *p4d_page_tramp;
+ int i;
+
+ p4d_page_tramp = alloc_low_page();
+
+ paddr = 0;
+ pgd = pgd_offset_k((unsigned long)__va(paddr));
+ p4d_page = (p4d_t *) pgd_page_vaddr(*pgd);
+
+ for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) {
+ p4d_t *p4d, *p4d_tramp;
+ unsigned long vaddr = (unsigned long)__va(paddr);
+
+ p4d_tramp = p4d_page_tramp + p4d_index(paddr);
+ p4d = p4d_page + p4d_index(vaddr);
+ paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
+
+ *p4d_tramp = *p4d;
+ }
+
+ set_pgd(&trampoline_pgd_entry,
+ __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
+}
+
+/*
+ * Create PGD aligned trampoline table to allow real mode initialization
+ * of additional CPUs. Consume only 1 low memory page.
+ */
+void __meminit init_trampoline(void)
+{
+
+ if (!kaslr_memory_enabled()) {
+ init_trampoline_default();
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_X86_5LEVEL))
+ init_trampoline_p4d();
+ else
+ init_trampoline_pud();
+}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 19ad095b41df..797295e792b2 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -74,9 +74,6 @@ static int mmap_is_legacy(void)
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
- if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
- return 1;
-
return sysctl_legacy_va_layout;
}
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 6b7ce6279133..aca6295350f3 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -100,5 +100,6 @@ void __init initmem_init(void)
printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
(ulong) pfn_to_kaddr(highstart_pfn));
+ __vmalloc_start_set = true;
setup_bootmem_allocator();
}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 1dcd2be4cce4..c8520b2c62d2 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -186,7 +186,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
unsigned int i, level;
unsigned long addr;
- BUG_ON(irqs_disabled());
+ BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
WARN_ON(PAGE_ALIGN(start) != start);
on_each_cpu(__cpa_flush_range, NULL, 1);
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
index 38868adf07ea..f6ae6830b341 100644
--- a/arch/x86/mm/testmmiotrace.c
+++ b/arch/x86/mm/testmmiotrace.c
@@ -9,7 +9,7 @@
#include <linux/mmiotrace.h>
static unsigned long mmio_address;
-module_param(mmio_address, ulong, 0);
+module_param_hw(mmio_address, ulong, iomem, 0);
MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
"(or 8 MB if read_far is non-zero).");
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 6e7bedf69af7..014d07a80053 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -15,7 +15,7 @@
#include <linux/debugfs.h>
/*
- * Smarter SMP flushing macros.
+ * TLB flushing, formerly SMP-only
* c/o Linus Torvalds.
*
* These mean you can really definitely utterly forget about
@@ -28,39 +28,28 @@
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
*/
-#ifdef CONFIG_SMP
-
-struct flush_tlb_info {
- struct mm_struct *flush_mm;
- unsigned long flush_start;
- unsigned long flush_end;
-};
-
-/*
- * We cannot call mmdrop() because we are in interrupt context,
- * instead update mm->cpu_vm_mask.
- */
void leave_mm(int cpu)
{
- struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
+ struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+
+ /*
+ * It's plausible that we're in lazy TLB mode while our mm is init_mm.
+ * If so, our callers still expect us to flush the TLB, but there
+ * aren't any user TLB entries in init_mm to worry about.
+ *
+ * This needs to happen before any other sanity checks due to
+ * intel_idle's shenanigans.
+ */
+ if (loaded_mm == &init_mm)
+ return;
+
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
BUG();
- if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
- cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
- load_cr3(swapper_pg_dir);
- /*
- * This gets called in the idle path where RCU
- * functions differently. Tracing normally
- * uses RCU, so we have to call the tracepoint
- * specially here.
- */
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
- }
+
+ switch_mm(NULL, &init_mm, NULL);
}
EXPORT_SYMBOL_GPL(leave_mm);
-#endif /* CONFIG_SMP */
-
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
@@ -75,216 +64,167 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
+ struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
- if (likely(prev != next)) {
- if (IS_ENABLED(CONFIG_VMAP_STACK)) {
- /*
- * If our current stack is in vmalloc space and isn't
- * mapped in the new pgd, we'll double-fault. Forcibly
- * map it.
- */
- unsigned int stack_pgd_index = pgd_index(current_stack_pointer());
-
- pgd_t *pgd = next->pgd + stack_pgd_index;
-
- if (unlikely(pgd_none(*pgd)))
- set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
- }
+ /*
+ * NB: The scheduler will call us with prev == next when
+ * switching from lazy TLB mode to normal mode if active_mm
+ * isn't changing. When this happens, there is no guarantee
+ * that CR3 (and hence cpu_tlbstate.loaded_mm) matches next.
+ *
+ * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
+ */
-#ifdef CONFIG_SMP
- this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
- this_cpu_write(cpu_tlbstate.active_mm, next);
-#endif
+ this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
- cpumask_set_cpu(cpu, mm_cpumask(next));
+ if (real_prev == next) {
+ /*
+ * There's nothing to do: we always keep the per-mm control
+ * regs in sync with cpu_tlbstate.loaded_mm. Just
+ * sanity-check mm_cpumask.
+ */
+ if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(next))))
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+ return;
+ }
+ if (IS_ENABLED(CONFIG_VMAP_STACK)) {
/*
- * Re-load page tables.
- *
- * This logic has an ordering constraint:
- *
- * CPU 0: Write to a PTE for 'next'
- * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
- * CPU 1: set bit 1 in next's mm_cpumask
- * CPU 1: load from the PTE that CPU 0 writes (implicit)
- *
- * We need to prevent an outcome in which CPU 1 observes
- * the new PTE value and CPU 0 observes bit 1 clear in
- * mm_cpumask. (If that occurs, then the IPI will never
- * be sent, and CPU 0's TLB will contain a stale entry.)
- *
- * The bad outcome can occur if either CPU's load is
- * reordered before that CPU's store, so both CPUs must
- * execute full barriers to prevent this from happening.
- *
- * Thus, switch_mm needs a full barrier between the
- * store to mm_cpumask and any operation that could load
- * from next->pgd. TLB fills are special and can happen
- * due to instruction fetches or for no reason at all,
- * and neither LOCK nor MFENCE orders them.
- * Fortunately, load_cr3() is serializing and gives the
- * ordering guarantee we need.
- *
+ * If our current stack is in vmalloc space and isn't
+ * mapped in the new pgd, we'll double-fault. Forcibly
+ * map it.
*/
- load_cr3(next->pgd);
+ unsigned int stack_pgd_index = pgd_index(current_stack_pointer());
- trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ pgd_t *pgd = next->pgd + stack_pgd_index;
- /* Stop flush ipis for the previous mm */
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ if (unlikely(pgd_none(*pgd)))
+ set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
+ }
- /* Load per-mm CR4 state */
- load_mm_cr4(next);
+ this_cpu_write(cpu_tlbstate.loaded_mm, next);
-#ifdef CONFIG_MODIFY_LDT_SYSCALL
- /*
- * Load the LDT, if the LDT is different.
- *
- * It's possible that prev->context.ldt doesn't match
- * the LDT register. This can happen if leave_mm(prev)
- * was called and then modify_ldt changed
- * prev->context.ldt but suppressed an IPI to this CPU.
- * In this case, prev->context.ldt != NULL, because we
- * never set context.ldt to NULL while the mm still
- * exists. That means that next->context.ldt !=
- * prev->context.ldt, because mms never share an LDT.
- */
- if (unlikely(prev->context.ldt != next->context.ldt))
- load_mm_ldt(next);
-#endif
+ WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ /*
+ * Re-load page tables.
+ *
+ * This logic has an ordering constraint:
+ *
+ * CPU 0: Write to a PTE for 'next'
+ * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
+ * CPU 1: set bit 1 in next's mm_cpumask
+ * CPU 1: load from the PTE that CPU 0 writes (implicit)
+ *
+ * We need to prevent an outcome in which CPU 1 observes
+ * the new PTE value and CPU 0 observes bit 1 clear in
+ * mm_cpumask. (If that occurs, then the IPI will never
+ * be sent, and CPU 0's TLB will contain a stale entry.)
+ *
+ * The bad outcome can occur if either CPU's load is
+ * reordered before that CPU's store, so both CPUs must
+ * execute full barriers to prevent this from happening.
+ *
+ * Thus, switch_mm needs a full barrier between the
+ * store to mm_cpumask and any operation that could load
+ * from next->pgd. TLB fills are special and can happen
+ * due to instruction fetches or for no reason at all,
+ * and neither LOCK nor MFENCE orders them.
+ * Fortunately, load_cr3() is serializing and gives the
+ * ordering guarantee we need.
+ */
+ load_cr3(next->pgd);
+
+ /*
+ * This gets called via leave_mm() in the idle path where RCU
+ * functions differently. Tracing normally uses RCU, so we have to
+ * call the tracepoint specially here.
+ */
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+
+ /* Stop flush ipis for the previous mm */
+ WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
+ real_prev != &init_mm);
+ cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
+
+ /* Load per-mm CR4 and LDTR state */
+ load_mm_cr4(next);
+ switch_ldt(real_prev, next);
+}
+
+static void flush_tlb_func_common(const struct flush_tlb_info *f,
+ bool local, enum tlb_flush_reason reason)
+{
+ /* This code cannot presently handle being reentered. */
+ VM_WARN_ON(!irqs_disabled());
+
+ if (this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) {
+ leave_mm(smp_processor_id());
+ return;
}
-#ifdef CONFIG_SMP
- else {
- this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
- BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
-
- if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
- /*
- * On established mms, the mm_cpumask is only changed
- * from irq context, from ptep_clear_flush() while in
- * lazy tlb mode, and here. Irqs are blocked during
- * schedule, protecting us from simultaneous changes.
- */
- cpumask_set_cpu(cpu, mm_cpumask(next));
- /*
- * We were in lazy tlb mode and leave_mm disabled
- * tlb flush IPI delivery. We must reload CR3
- * to make sure to use no freed page tables.
- *
- * As above, load_cr3() is serializing and orders TLB
- * fills with respect to the mm_cpumask write.
- */
- load_cr3(next->pgd);
- trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
- load_mm_cr4(next);
- load_mm_ldt(next);
+ if (f->end == TLB_FLUSH_ALL) {
+ local_flush_tlb();
+ if (local)
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ trace_tlb_flush(reason, TLB_FLUSH_ALL);
+ } else {
+ unsigned long addr;
+ unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT;
+ addr = f->start;
+ while (addr < f->end) {
+ __flush_tlb_single(addr);
+ addr += PAGE_SIZE;
}
+ if (local)
+ count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
+ trace_tlb_flush(reason, nr_pages);
}
-#endif
}
-#ifdef CONFIG_SMP
+static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
+{
+ const struct flush_tlb_info *f = info;
-/*
- * The flush IPI assumes that a thread switch happens in this order:
- * [cpu0: the cpu that switches]
- * 1) switch_mm() either 1a) or 1b)
- * 1a) thread switch to a different mm
- * 1a1) set cpu_tlbstate to TLBSTATE_OK
- * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
- * if cpu0 was in lazy tlb mode.
- * 1a2) update cpu active_mm
- * Now cpu0 accepts tlb flushes for the new mm.
- * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
- * Now the other cpus will send tlb flush ipis.
- * 1a4) change cr3.
- * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
- * Stop ipi delivery for the old mm. This is not synchronized with
- * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
- * mm, and in the worst case we perform a superfluous tlb flush.
- * 1b) thread switch without mm change
- * cpu active_mm is correct, cpu0 already handles flush ipis.
- * 1b1) set cpu_tlbstate to TLBSTATE_OK
- * 1b2) test_and_set the cpu bit in cpu_vm_mask.
- * Atomically set the bit [other cpus will start sending flush ipis],
- * and test the bit.
- * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
- * 2) switch %%esp, ie current
- *
- * The interrupt must handle 2 special cases:
- * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
- * - the cpu performs speculative tlb reads, i.e. even if the cpu only
- * runs in kernel space, the cpu could load tlb entries for user space
- * pages.
- *
- * The good news is that cpu_tlbstate is local to each cpu, no
- * write/read ordering problems.
- */
+ flush_tlb_func_common(f, true, reason);
+}
-/*
- * TLB flush funcation:
- * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
- * 2) Leave the mm if we are in the lazy tlb mode.
- */
-static void flush_tlb_func(void *info)
+static void flush_tlb_func_remote(void *info)
{
- struct flush_tlb_info *f = info;
+ const struct flush_tlb_info *f = info;
inc_irq_stat(irq_tlb_count);
- if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
+ if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
return;
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
- if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
- if (f->flush_end == TLB_FLUSH_ALL) {
- local_flush_tlb();
- trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
- } else {
- unsigned long addr;
- unsigned long nr_pages =
- (f->flush_end - f->flush_start) / PAGE_SIZE;
- addr = f->flush_start;
- while (addr < f->flush_end) {
- __flush_tlb_single(addr);
- addr += PAGE_SIZE;
- }
- trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
- }
- } else
- leave_mm(smp_processor_id());
-
+ flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
}
void native_flush_tlb_others(const struct cpumask *cpumask,
- struct mm_struct *mm, unsigned long start,
- unsigned long end)
+ const struct flush_tlb_info *info)
{
- struct flush_tlb_info info;
-
- info.flush_mm = mm;
- info.flush_start = start;
- info.flush_end = end;
-
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
- if (end == TLB_FLUSH_ALL)
+ if (info->end == TLB_FLUSH_ALL)
trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
else
trace_tlb_flush(TLB_REMOTE_SEND_IPI,
- (end - start) >> PAGE_SHIFT);
+ (info->end - info->start) >> PAGE_SHIFT);
if (is_uv_system()) {
unsigned int cpu;
cpu = smp_processor_id();
- cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
+ cpumask = uv_flush_tlb_others(cpumask, info);
if (cpumask)
- smp_call_function_many(cpumask, flush_tlb_func,
- &info, 1);
+ smp_call_function_many(cpumask, flush_tlb_func_remote,
+ (void *)info, 1);
return;
}
- smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
+ smp_call_function_many(cpumask, flush_tlb_func_remote,
+ (void *)info, 1);
}
/*
@@ -302,85 +242,41 @@ static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag)
{
- unsigned long addr;
- /* do a global flush by default */
- unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
-
- preempt_disable();
+ int cpu;
- if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
- base_pages_to_flush = (end - start) >> PAGE_SHIFT;
- if (base_pages_to_flush > tlb_single_page_flush_ceiling)
- base_pages_to_flush = TLB_FLUSH_ALL;
+ struct flush_tlb_info info = {
+ .mm = mm,
+ };
- if (current->active_mm != mm) {
- /* Synchronize with switch_mm. */
- smp_mb();
+ cpu = get_cpu();
- goto out;
- }
-
- if (!current->mm) {
- leave_mm(smp_processor_id());
+ /* Synchronize with switch_mm. */
+ smp_mb();
- /* Synchronize with switch_mm. */
- smp_mb();
-
- goto out;
- }
-
- /*
- * Both branches below are implicit full barriers (MOV to CR or
- * INVLPG) that synchronize with switch_mm.
- */
- if (base_pages_to_flush == TLB_FLUSH_ALL) {
- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
- local_flush_tlb();
+ /* Should we flush just the requested range? */
+ if ((end != TLB_FLUSH_ALL) &&
+ !(vmflag & VM_HUGETLB) &&
+ ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) {
+ info.start = start;
+ info.end = end;
} else {
- /* flush range by one by one 'invlpg' */
- for (addr = start; addr < end; addr += PAGE_SIZE) {
- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
- __flush_tlb_single(addr);
- }
- }
- trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
-out:
- if (base_pages_to_flush == TLB_FLUSH_ALL) {
- start = 0UL;
- end = TLB_FLUSH_ALL;
+ info.start = 0UL;
+ info.end = TLB_FLUSH_ALL;
}
- if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(mm_cpumask(mm), mm, start, end);
- preempt_enable();
-}
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
-{
- struct mm_struct *mm = vma->vm_mm;
-
- preempt_disable();
-
- if (current->active_mm == mm) {
- if (current->mm) {
- /*
- * Implicit full barrier (INVLPG) that synchronizes
- * with switch_mm.
- */
- __flush_tlb_one(start);
- } else {
- leave_mm(smp_processor_id());
-
- /* Synchronize with switch_mm. */
- smp_mb();
- }
+ if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
+ VM_WARN_ON(irqs_disabled());
+ local_irq_disable();
+ flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
+ local_irq_enable();
}
- if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
-
- preempt_enable();
+ if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
+ flush_tlb_others(mm_cpumask(mm), &info);
+ put_cpu();
}
+
static void do_flush_tlb_all(void *info)
{
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
@@ -401,7 +297,7 @@ static void do_kernel_range_flush(void *info)
unsigned long addr;
/* flush range by one by one 'invlpg' */
- for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
+ for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
__flush_tlb_single(addr);
}
@@ -410,16 +306,40 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
/* Balance as user space task's flush, a bit conservative */
if (end == TLB_FLUSH_ALL ||
- (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
+ (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
on_each_cpu(do_flush_tlb_all, NULL, 1);
} else {
struct flush_tlb_info info;
- info.flush_start = start;
- info.flush_end = end;
+ info.start = start;
+ info.end = end;
on_each_cpu(do_kernel_range_flush, &info, 1);
}
}
+void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
+{
+ struct flush_tlb_info info = {
+ .mm = NULL,
+ .start = 0UL,
+ .end = TLB_FLUSH_ALL,
+ };
+
+ int cpu = get_cpu();
+
+ if (cpumask_test_cpu(cpu, &batch->cpumask)) {
+ VM_WARN_ON(irqs_disabled());
+ local_irq_disable();
+ flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN);
+ local_irq_enable();
+ }
+
+ if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
+ flush_tlb_others(&batch->cpumask, &info);
+ cpumask_clear(&batch->cpumask);
+
+ put_cpu();
+}
+
static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -465,5 +385,3 @@ static int __init create_tlb_single_page_flush_ceiling(void)
return 0;
}
late_initcall(create_tlb_single_page_flush_ceiling);
-
-#endif /* CONFIG_SMP */
diff --git a/arch/x86/net/Makefile b/arch/x86/net/Makefile
index 90568c33ddb0..fefb4b619598 100644
--- a/arch/x86/net/Makefile
+++ b/arch/x86/net/Makefile
@@ -1,4 +1,6 @@
#
# Arch-specific network modules
#
+OBJECT_FILES_NON_STANDARD_bpf_jit.o += y
+
obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_comp.o
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
index f1d83b34c329..2f56e1ed61c3 100644
--- a/arch/x86/platform/efi/Makefile
+++ b/arch/x86/platform/efi/Makefile
@@ -1,4 +1,5 @@
OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_efi_stub_$(BITS).o := y
obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o
obj-$(CONFIG_EARLY_PRINTK_EFI) += early_printk.o
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 7e76a4d8304b..f084d8718ac4 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -828,9 +828,11 @@ static void __init kexec_enter_virtual_mode(void)
/*
* We don't do virtual mode, since we don't do runtime services, on
- * non-native EFI
+ * non-native EFI. With efi=old_map, we don't do runtime services in
+ * kexec kernel because in the initial boot something else might
+ * have been mapped at these virtual addresses.
*/
- if (!efi_is_native()) {
+ if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
efi_memmap_unmap();
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return;
@@ -1012,7 +1014,6 @@ static void __init __efi_enter_virtual_mode(void)
* necessary relocation fixups for the new virtual addresses.
*/
efi_runtime_update_mappings();
- efi_dump_pagetable();
/* clean DUMMY object */
efi_delete_dummy_variable();
@@ -1027,6 +1028,8 @@ void __init efi_enter_virtual_mode(void)
kexec_enter_virtual_mode();
else
__efi_enter_virtual_mode();
+
+ efi_dump_pagetable();
}
/*
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 3481268da3d0..52f7faa1538f 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -44,7 +44,14 @@ int __init efi_alloc_page_tables(void)
}
void efi_sync_low_kernel_mappings(void) {}
-void __init efi_dump_pagetable(void) {}
+
+void __init efi_dump_pagetable(void)
+{
+#ifdef CONFIG_EFI_PGT_DUMP
+ ptdump_walk_pgd_level(NULL, swapper_pg_dir);
+#endif
+}
+
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
return 0;
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index c488625c9712..9bf72f5bfedb 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -71,14 +71,16 @@ static void __init early_code_mapping_set_exec(int executable)
pgd_t * __init efi_call_phys_prolog(void)
{
- unsigned long vaddress;
- pgd_t *save_pgd;
+ unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
+ pgd_t *save_pgd, *pgd_k, *pgd_efi;
+ p4d_t *p4d, *p4d_k, *p4d_efi;
+ pud_t *pud;
int pgd;
- int n_pgds;
+ int n_pgds, i, j;
if (!efi_enabled(EFI_OLD_MEMMAP)) {
- save_pgd = (pgd_t *)read_cr3();
+ save_pgd = (pgd_t *)__read_cr3();
write_cr3((unsigned long)efi_scratch.efi_pgt);
goto out;
}
@@ -88,10 +90,49 @@ pgd_t * __init efi_call_phys_prolog(void)
n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
+ /*
+ * Build 1:1 identity mapping for efi=old_map usage. Note that
+ * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
+ * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
+ * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
+ * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
+ * This means here we can only reuse the PMD tables of the direct mapping.
+ */
for (pgd = 0; pgd < n_pgds; pgd++) {
- save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
- vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
- set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
+ addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
+ vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
+ pgd_efi = pgd_offset_k(addr_pgd);
+ save_pgd[pgd] = *pgd_efi;
+
+ p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
+ if (!p4d) {
+ pr_err("Failed to allocate p4d table!\n");
+ goto out;
+ }
+
+ for (i = 0; i < PTRS_PER_P4D; i++) {
+ addr_p4d = addr_pgd + i * P4D_SIZE;
+ p4d_efi = p4d + p4d_index(addr_p4d);
+
+ pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
+ if (!pud) {
+ pr_err("Failed to allocate pud table!\n");
+ goto out;
+ }
+
+ for (j = 0; j < PTRS_PER_PUD; j++) {
+ addr_pud = addr_p4d + j * PUD_SIZE;
+
+ if (addr_pud > (max_pfn << PAGE_SHIFT))
+ break;
+
+ vaddr = (unsigned long)__va(addr_pud);
+
+ pgd_k = pgd_offset_k(vaddr);
+ p4d_k = p4d_offset(pgd_k, vaddr);
+ pud[j] = *pud_offset(p4d_k, vaddr);
+ }
+ }
}
out:
__flush_tlb_all();
@@ -104,8 +145,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
/*
* After the lock is released, the original page table is restored.
*/
- int pgd_idx;
+ int pgd_idx, i;
int nr_pgds;
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
if (!efi_enabled(EFI_OLD_MEMMAP)) {
write_cr3((unsigned long)save_pgd);
@@ -115,9 +159,28 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
- for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++)
+ for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
+ pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
+ if (!(pgd_val(*pgd) & _PAGE_PRESENT))
+ continue;
+
+ for (i = 0; i < PTRS_PER_P4D; i++) {
+ p4d = p4d_offset(pgd,
+ pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
+
+ if (!(p4d_val(*p4d) & _PAGE_PRESENT))
+ continue;
+
+ pud = (pud_t *)p4d_page_vaddr(*p4d);
+ pud_free(&init_mm, pud);
+ }
+
+ p4d = (p4d_t *)pgd_page_vaddr(*pgd);
+ p4d_free(&init_mm, p4d);
+ }
+
kfree(save_pgd);
__flush_tlb_all();
@@ -526,7 +589,10 @@ void __init efi_runtime_update_mappings(void)
void __init efi_dump_pagetable(void)
{
#ifdef CONFIG_EFI_PGT_DUMP
- ptdump_walk_pgd_level(NULL, efi_pgd);
+ if (efi_enabled(EFI_OLD_MEMMAP))
+ ptdump_walk_pgd_level(NULL, swapper_pg_dir);
+ else
+ ptdump_walk_pgd_level(NULL, efi_pgd);
#endif
}
@@ -583,7 +649,7 @@ efi_status_t efi_thunk_set_virtual_address_map(
efi_sync_low_kernel_mappings();
local_irq_save(flags);
- efi_scratch.prev_cr3 = read_cr3();
+ efi_scratch.prev_cr3 = __read_cr3();
write_cr3((unsigned long)efi_scratch.efi_pgt);
__flush_tlb_all();
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 26615991d69c..8a99a2e96537 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -15,12 +15,66 @@
#include <asm/e820/api.h>
#include <asm/efi.h>
#include <asm/uv/uv.h>
+#include <asm/cpu_device_id.h>
#define EFI_MIN_RESERVE 5120
#define EFI_DUMMY_GUID \
EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9)
+#define QUARK_CSH_SIGNATURE 0x5f435348 /* _CSH */
+#define QUARK_SECURITY_HEADER_SIZE 0x400
+
+/*
+ * Header prepended to the standard EFI capsule on Quark systems the are based
+ * on Intel firmware BSP.
+ * @csh_signature: Unique identifier to sanity check signed module
+ * presence ("_CSH").
+ * @version: Current version of CSH used. Should be one for Quark A0.
+ * @modulesize: Size of the entire module including the module header
+ * and payload.
+ * @security_version_number_index: Index of SVN to use for validation of signed
+ * module.
+ * @security_version_number: Used to prevent against roll back of modules.
+ * @rsvd_module_id: Currently unused for Clanton (Quark).
+ * @rsvd_module_vendor: Vendor Identifier. For Intel products value is
+ * 0x00008086.
+ * @rsvd_date: BCD representation of build date as yyyymmdd, where
+ * yyyy=4 digit year, mm=1-12, dd=1-31.
+ * @headersize: Total length of the header including including any
+ * padding optionally added by the signing tool.
+ * @hash_algo: What Hash is used in the module signing.
+ * @cryp_algo: What Crypto is used in the module signing.
+ * @keysize: Total length of the key data including including any
+ * padding optionally added by the signing tool.
+ * @signaturesize: Total length of the signature including including any
+ * padding optionally added by the signing tool.
+ * @rsvd_next_header: 32-bit pointer to the next Secure Boot Module in the
+ * chain, if there is a next header.
+ * @rsvd: Reserved, padding structure to required size.
+ *
+ * See also QuartSecurityHeader_t in
+ * Quark_EDKII_v1.2.1.1/QuarkPlatformPkg/Include/QuarkBootRom.h
+ * from https://downloadcenter.intel.com/download/23197/Intel-Quark-SoC-X1000-Board-Support-Package-BSP
+ */
+struct quark_security_header {
+ u32 csh_signature;
+ u32 version;
+ u32 modulesize;
+ u32 security_version_number_index;
+ u32 security_version_number;
+ u32 rsvd_module_id;
+ u32 rsvd_module_vendor;
+ u32 rsvd_date;
+ u32 headersize;
+ u32 hash_algo;
+ u32 cryp_algo;
+ u32 keysize;
+ u32 signaturesize;
+ u32 rsvd_next_header;
+ u32 rsvd[2];
+};
+
static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 };
static bool efi_no_storage_paranoia;
@@ -360,6 +414,9 @@ void __init efi_free_boot_services(void)
free_bootmem_late(start, size);
}
+ if (!num_entries)
+ return;
+
new_size = efi.memmap.desc_size * num_entries;
new_phys = efi_memmap_alloc(num_entries);
if (!new_phys) {
@@ -501,3 +558,86 @@ bool efi_poweroff_required(void)
{
return acpi_gbl_reduced_hardware || acpi_no_s5;
}
+
+#ifdef CONFIG_EFI_CAPSULE_QUIRK_QUARK_CSH
+
+static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff,
+ size_t hdr_bytes)
+{
+ struct quark_security_header *csh = *pkbuff;
+
+ /* Only process data block that is larger than the security header */
+ if (hdr_bytes < sizeof(struct quark_security_header))
+ return 0;
+
+ if (csh->csh_signature != QUARK_CSH_SIGNATURE ||
+ csh->headersize != QUARK_SECURITY_HEADER_SIZE)
+ return 1;
+
+ /* Only process data block if EFI header is included */
+ if (hdr_bytes < QUARK_SECURITY_HEADER_SIZE +
+ sizeof(efi_capsule_header_t))
+ return 0;
+
+ pr_debug("Quark security header detected\n");
+
+ if (csh->rsvd_next_header != 0) {
+ pr_err("multiple Quark security headers not supported\n");
+ return -EINVAL;
+ }
+
+ *pkbuff += csh->headersize;
+ cap_info->total_size = csh->headersize;
+
+ /*
+ * Update the first page pointer to skip over the CSH header.
+ */
+ cap_info->pages[0] += csh->headersize;
+
+ return 1;
+}
+
+#define ICPU(family, model, quirk_handler) \
+ { X86_VENDOR_INTEL, family, model, X86_FEATURE_ANY, \
+ (unsigned long)&quirk_handler }
+
+static const struct x86_cpu_id efi_capsule_quirk_ids[] = {
+ ICPU(5, 9, qrk_capsule_setup_info), /* Intel Quark X1000 */
+ { }
+};
+
+int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
+ size_t hdr_bytes)
+{
+ int (*quirk_handler)(struct capsule_info *, void **, size_t);
+ const struct x86_cpu_id *id;
+ int ret;
+
+ if (hdr_bytes < sizeof(efi_capsule_header_t))
+ return 0;
+
+ cap_info->total_size = 0;
+
+ id = x86_match_cpu(efi_capsule_quirk_ids);
+ if (id) {
+ /*
+ * The quirk handler is supposed to return
+ * - a value > 0 if the setup should continue, after advancing
+ * kbuff as needed
+ * - 0 if not enough hdr_bytes are available yet
+ * - a negative error code otherwise
+ */
+ quirk_handler = (typeof(quirk_handler))id->driver_data;
+ ret = quirk_handler(cap_info, &kbuff, hdr_bytes);
+ if (ret <= 0)
+ return ret;
+ }
+
+ memcpy(&cap_info->header, kbuff, sizeof(cap_info->header));
+
+ cap_info->total_size += cap_info->header.imagesize;
+
+ return __efi_capsule_setup_info(cap_info);
+}
+
+#endif
diff --git a/arch/x86/platform/olpc/olpc-xo1-pm.c b/arch/x86/platform/olpc/olpc-xo1-pm.c
index c5350fd27d70..0668aaff8bfe 100644
--- a/arch/x86/platform/olpc/olpc-xo1-pm.c
+++ b/arch/x86/platform/olpc/olpc-xo1-pm.c
@@ -77,7 +77,7 @@ static int xo1_power_state_enter(suspend_state_t pm_state)
asmlinkage __visible int xo1_do_sleep(u8 sleep_state)
{
- void *pgd_addr = __va(read_cr3());
+ void *pgd_addr = __va(read_cr3_pa());
/* Program wakeup mask (using dword access to CS5536_PM1_EN) */
outl(wakeup_mask << 16, acpi_base + CS5536_PM1_STS);
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 42e65fee5673..2983faab5b18 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -456,12 +456,13 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
*/
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
- struct cyc2ns_data *data = cyc2ns_read_begin();
+ struct cyc2ns_data data;
unsigned long long ns;
- ns = mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
+ cyc2ns_read_begin(&data);
+ ns = mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
+ cyc2ns_read_end();
- cyc2ns_read_end(data);
return ns;
}
@@ -470,12 +471,13 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
*/
static inline unsigned long long ns_2_cycles(unsigned long long ns)
{
- struct cyc2ns_data *data = cyc2ns_read_begin();
+ struct cyc2ns_data data;
unsigned long long cyc;
- cyc = (ns << data->cyc2ns_shift) / data->cyc2ns_mul;
+ cyc2ns_read_begin(&data);
+ cyc = (ns << data.cyc2ns_shift) / data.cyc2ns_mul;
+ cyc2ns_read_end();
- cyc2ns_read_end(data);
return cyc;
}
@@ -1121,11 +1123,9 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
* done. The returned pointer is valid till preemption is re-enabled.
*/
const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- unsigned int cpu)
+ const struct flush_tlb_info *info)
{
+ unsigned int cpu = smp_processor_id();
int locals = 0, remotes = 0, hubs = 0;
struct bau_desc *bau_desc;
struct cpumask *flush_mask;
@@ -1179,8 +1179,8 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
record_send_statistics(stat, locals, hubs, remotes, bau_desc);
- if (!end || (end - start) <= PAGE_SIZE)
- address = start;
+ if (!info->end || (info->end - info->start) <= PAGE_SIZE)
+ address = info->start;
else
address = TLB_FLUSH_ALL;
diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile
index a6a198c33623..05041871ac90 100644
--- a/arch/x86/power/Makefile
+++ b/arch/x86/power/Makefile
@@ -1,3 +1,5 @@
+OBJECT_FILES_NON_STANDARD_hibernate_asm_$(BITS).o := y
+
# __restore_processor_state() restores %gs after S3 resume and so should not
# itself be stack-protected
nostackp := $(call cc-option, -fno-stack-protector)
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 6b05a9219ea2..78459a6d455a 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -129,7 +129,7 @@ static void __save_processor_state(struct saved_context *ctxt)
*/
ctxt->cr0 = read_cr0();
ctxt->cr2 = read_cr2();
- ctxt->cr3 = read_cr3();
+ ctxt->cr3 = __read_cr3();
ctxt->cr4 = __read_cr4();
#ifdef CONFIG_X86_64
ctxt->cr8 = read_cr8();
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 6a61194ffd58..e3e62c8a8e70 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -104,7 +104,7 @@ static int set_up_temporary_mappings(void)
{
struct x86_mapping_info info = {
.alloc_pgt_page = alloc_pgt_page,
- .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
+ .page_flag = __PAGE_KERNEL_LARGE_EXEC,
.offset = __PAGE_OFFSET,
};
unsigned long mstart, mend;
@@ -150,7 +150,8 @@ static int relocate_restore_code(void)
memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
/* Make the page containing the relocated code executable */
- pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
+ pgd = (pgd_t *)__va(read_cr3_pa()) +
+ pgd_index(relocated_restore_code);
p4d = p4d_offset(pgd, relocated_restore_code);
if (p4d_large(*p4d)) {
set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index a163a90af4aa..cd4be19c36dc 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -102,7 +102,7 @@ static void __init setup_real_mode(void)
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
- trampoline_pgd[511] = init_level4_pgt[511].pgd;
+ trampoline_pgd[511] = init_top_pgt[511].pgd;
#endif
}
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index a5c9910d234f..09a085bde0d4 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -125,7 +125,7 @@ int poke_user(struct task_struct *child, long addr, long data)
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
(addr <= offsetof(struct user, u_debugreg[7]))) {
addr -= offsetof(struct user, u_debugreg[0]);
- addr = addr >> 2;
+ addr = addr >> 3;
if ((addr == 4) || (addr == 5))
return -EIO;
child->thread.arch.debugregs[addr] = data;
diff --git a/arch/x86/um/shared/sysdep/kernel-offsets.h b/arch/x86/um/shared/sysdep/kernel-offsets.h
index 46a9df99f3c5..7e1d35b6ad5c 100644
--- a/arch/x86/um/shared/sysdep/kernel-offsets.h
+++ b/arch/x86/um/shared/sysdep/kernel-offsets.h
@@ -2,16 +2,9 @@
#include <linux/sched.h>
#include <linux/elf.h>
#include <linux/crypto.h>
+#include <linux/kbuild.h>
#include <asm/mman.h>
-#define DEFINE(sym, val) \
- asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
-#define OFFSET(sym, str, mem) \
- DEFINE(sym, offsetof(struct str, mem));
-
void foo(void)
{
#include <common-offsets.h>
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index fffb0a16f9e3..bced7a369a11 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -1,3 +1,6 @@
+OBJECT_FILES_NON_STANDARD_xen-asm_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_xen-pvh.o := y
+
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_spinlock.o = -pg
diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
index 30bb2e80cfe7..a18703be9ead 100644
--- a/arch/x86/xen/efi.c
+++ b/arch/x86/xen/efi.c
@@ -54,38 +54,6 @@ static efi_system_table_t efi_systab_xen __initdata = {
.tables = EFI_INVALID_TABLE_ADDR /* Initialized later. */
};
-static const struct efi efi_xen __initconst = {
- .systab = NULL, /* Initialized later. */
- .runtime_version = 0, /* Initialized later. */
- .mps = EFI_INVALID_TABLE_ADDR,
- .acpi = EFI_INVALID_TABLE_ADDR,
- .acpi20 = EFI_INVALID_TABLE_ADDR,
- .smbios = EFI_INVALID_TABLE_ADDR,
- .smbios3 = EFI_INVALID_TABLE_ADDR,
- .sal_systab = EFI_INVALID_TABLE_ADDR,
- .boot_info = EFI_INVALID_TABLE_ADDR,
- .hcdp = EFI_INVALID_TABLE_ADDR,
- .uga = EFI_INVALID_TABLE_ADDR,
- .uv_systab = EFI_INVALID_TABLE_ADDR,
- .fw_vendor = EFI_INVALID_TABLE_ADDR,
- .runtime = EFI_INVALID_TABLE_ADDR,
- .config_table = EFI_INVALID_TABLE_ADDR,
- .get_time = xen_efi_get_time,
- .set_time = xen_efi_set_time,
- .get_wakeup_time = xen_efi_get_wakeup_time,
- .set_wakeup_time = xen_efi_set_wakeup_time,
- .get_variable = xen_efi_get_variable,
- .get_next_variable = xen_efi_get_next_variable,
- .set_variable = xen_efi_set_variable,
- .query_variable_info = xen_efi_query_variable_info,
- .update_capsule = xen_efi_update_capsule,
- .query_capsule_caps = xen_efi_query_capsule_caps,
- .get_next_high_mono_count = xen_efi_get_next_high_mono_count,
- .reset_system = xen_efi_reset_system,
- .set_virtual_address_map = NULL, /* Not used under Xen. */
- .flags = 0 /* Initialized later. */
-};
-
static efi_system_table_t __init *xen_efi_probe(void)
{
struct xen_platform_op op = {
@@ -102,7 +70,18 @@ static efi_system_table_t __init *xen_efi_probe(void)
/* Here we know that Xen runs on EFI platform. */
- efi = efi_xen;
+ efi.get_time = xen_efi_get_time;
+ efi.set_time = xen_efi_set_time;
+ efi.get_wakeup_time = xen_efi_get_wakeup_time;
+ efi.set_wakeup_time = xen_efi_set_wakeup_time;
+ efi.get_variable = xen_efi_get_variable;
+ efi.get_next_variable = xen_efi_get_next_variable;
+ efi.set_variable = xen_efi_set_variable;
+ efi.query_variable_info = xen_efi_query_variable_info;
+ efi.update_capsule = xen_efi_update_capsule;
+ efi.query_capsule_caps = xen_efi_query_capsule_caps;
+ efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
+ efi.reset_system = xen_efi_reset_system;
efi_systab_xen.tables = info->cfg.addr;
efi_systab_xen.nr_tables = info->cfg.nent;
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index a732bc2b9dfc..f33eef4ebd12 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -142,9 +142,7 @@ static void __init xen_banner(void)
struct xen_extraversion extra;
HYPERVISOR_xen_version(XENVER_extraversion, &extra);
- pr_info("Booting paravirtualized kernel %son %s\n",
- xen_feature(XENFEAT_auto_translated_physmap) ?
- "with PVH extensions " : "", pv_info.name);
+ pr_info("Booting paravirtualized kernel on %s\n", pv_info.name);
printk(KERN_INFO "Xen version: %d.%d%s%s\n",
version >> 16, version & 0xffff, extra.extraversion,
xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
@@ -277,31 +275,19 @@ static bool __init xen_check_mwait(void)
static bool __init xen_check_xsave(void)
{
- unsigned int err, eax, edx;
+ unsigned int cx, xsave_mask;
- /*
- * Xen 4.0 and older accidentally leaked the host XSAVE flag into guest
- * view, despite not being able to support guests using the
- * functionality. Probe for the actual availability of XSAVE by seeing
- * whether xgetbv executes successfully or raises #UD.
- */
- asm volatile("1: .byte 0x0f,0x01,0xd0\n\t" /* xgetbv */
- "xor %[err], %[err]\n"
- "2:\n\t"
- ".pushsection .fixup,\"ax\"\n\t"
- "3: movl $1,%[err]\n\t"
- "jmp 2b\n\t"
- ".popsection\n\t"
- _ASM_EXTABLE(1b, 3b)
- : [err] "=r" (err), "=a" (eax), "=d" (edx)
- : "c" (0));
-
- return err == 0;
+ cx = cpuid_ecx(1);
+
+ xsave_mask = (1 << (X86_FEATURE_XSAVE % 32)) |
+ (1 << (X86_FEATURE_OSXSAVE % 32));
+
+ /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
+ return (cx & xsave_mask) == xsave_mask;
}
static void __init xen_init_capabilities(void)
{
- setup_clear_cpu_cap(X86_BUG_SYSRET_SS_ATTRS);
setup_force_cpu_cap(X86_FEATURE_XENPV);
setup_clear_cpu_cap(X86_FEATURE_DCA);
setup_clear_cpu_cap(X86_FEATURE_APERFMPERF);
@@ -317,10 +303,7 @@ static void __init xen_init_capabilities(void)
else
setup_clear_cpu_cap(X86_FEATURE_MWAIT);
- if (xen_check_xsave()) {
- setup_force_cpu_cap(X86_FEATURE_XSAVE);
- setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
- } else {
+ if (!xen_check_xsave()) {
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
setup_clear_cpu_cap(X86_FEATURE_OSXSAVE);
}
@@ -972,15 +955,10 @@ static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
void xen_setup_shared_info(void)
{
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- set_fixmap(FIX_PARAVIRT_BOOTMAP,
- xen_start_info->shared_info);
+ set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info);
- HYPERVISOR_shared_info =
- (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
- } else
- HYPERVISOR_shared_info =
- (struct shared_info *)__va(xen_start_info->shared_info);
+ HYPERVISOR_shared_info =
+ (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
#ifndef CONFIG_SMP
/* In UP this is as good a place as any to set up shared info */
@@ -988,6 +966,13 @@ void xen_setup_shared_info(void)
#endif
xen_setup_mfn_list_list();
+
+ /*
+ * Now that shared info is set up we can start using routines that
+ * point to pvclock area.
+ */
+ if (system_state == SYSTEM_BOOTING)
+ xen_init_time_ops();
}
/* This is called once we have the cpu_possible_mask */
@@ -1286,8 +1271,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
x86_init.oem.arch_setup = xen_arch_setup;
x86_init.oem.banner = xen_banner;
- xen_init_time_ops();
-
/*
* Set up some pagetable state before starting to set any ptes.
*/
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 5e375a5e815f..3be06f3caf3c 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -42,7 +42,7 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
}
EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
-void xen_flush_tlb_all(void)
+static void xen_flush_tlb_all(void)
{
struct mmuext_op *op;
struct multicall_space mcs;
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 9d9ae6650aa1..1d7a7213a310 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -355,10 +355,8 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
pteval_t flags = val & PTE_FLAGS_MASK;
unsigned long mfn;
- if (!xen_feature(XENFEAT_auto_translated_physmap))
- mfn = __pfn_to_mfn(pfn);
- else
- mfn = pfn;
+ mfn = __pfn_to_mfn(pfn);
+
/*
* If there's no mfn for the pfn, then just create an
* empty non-present pte. Unfortunately this loses
@@ -647,9 +645,6 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
limit--;
BUG_ON(limit >= FIXADDR_TOP);
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return 0;
-
/*
* 64-bit has a great big hole in the middle of the address
* space, which contains the Xen mappings. On 32-bit these
@@ -980,37 +975,32 @@ static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
spin_unlock(&mm->page_table_lock);
}
-
-#ifdef CONFIG_SMP
-/* Another cpu may still have their %cr3 pointing at the pagetable, so
- we need to repoint it somewhere else before we can unpin it. */
-static void drop_other_mm_ref(void *info)
+static void drop_mm_ref_this_cpu(void *info)
{
struct mm_struct *mm = info;
- struct mm_struct *active_mm;
-
- active_mm = this_cpu_read(cpu_tlbstate.active_mm);
- if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
+ if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
leave_mm(smp_processor_id());
- /* If this cpu still has a stale cr3 reference, then make sure
- it has been flushed. */
+ /*
+ * If this cpu still has a stale cr3 reference, then make sure
+ * it has been flushed.
+ */
if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
- load_cr3(swapper_pg_dir);
+ xen_mc_flush();
}
+#ifdef CONFIG_SMP
+/*
+ * Another cpu may still have their %cr3 pointing at the pagetable, so
+ * we need to repoint it somewhere else before we can unpin it.
+ */
static void xen_drop_mm_ref(struct mm_struct *mm)
{
cpumask_var_t mask;
unsigned cpu;
- if (current->active_mm == mm) {
- if (current->mm == mm)
- load_cr3(swapper_pg_dir);
- else
- leave_mm(smp_processor_id());
- }
+ drop_mm_ref_this_cpu(mm);
/* Get the "official" set of cpus referring to our pagetable. */
if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
@@ -1018,31 +1008,31 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
&& per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
continue;
- smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
+ smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
}
return;
}
cpumask_copy(mask, mm_cpumask(mm));
- /* It's possible that a vcpu may have a stale reference to our
- cr3, because its in lazy mode, and it hasn't yet flushed
- its set of pending hypercalls yet. In this case, we can
- look at its actual current cr3 value, and force it to flush
- if needed. */
+ /*
+ * It's possible that a vcpu may have a stale reference to our
+ * cr3, because its in lazy mode, and it hasn't yet flushed
+ * its set of pending hypercalls yet. In this case, we can
+ * look at its actual current cr3 value, and force it to flush
+ * if needed.
+ */
for_each_online_cpu(cpu) {
if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
cpumask_set_cpu(cpu, mask);
}
- if (!cpumask_empty(mask))
- smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
+ smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
free_cpumask_var(mask);
}
#else
static void xen_drop_mm_ref(struct mm_struct *mm)
{
- if (current->active_mm == mm)
- load_cr3(swapper_pg_dir);
+ drop_mm_ref_this_cpu(mm);
}
#endif
@@ -1289,9 +1279,6 @@ static void __init xen_pagetable_cleanhighmap(void)
static void __init xen_pagetable_p2m_setup(void)
{
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return;
-
xen_vmalloc_p2m_tree();
#ifdef CONFIG_X86_64
@@ -1314,8 +1301,7 @@ static void __init xen_pagetable_init(void)
xen_build_mfn_list_list();
/* Remap memory freed due to conflicts with E820 map */
- if (!xen_feature(XENFEAT_auto_translated_physmap))
- xen_remap_memory();
+ xen_remap_memory();
xen_setup_shared_info();
}
@@ -1375,8 +1361,7 @@ static void xen_flush_tlb_single(unsigned long addr)
}
static void xen_flush_tlb_others(const struct cpumask *cpus,
- struct mm_struct *mm, unsigned long start,
- unsigned long end)
+ const struct flush_tlb_info *info)
{
struct {
struct mmuext_op op;
@@ -1388,7 +1373,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
} *args;
struct multicall_space mcs;
- trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
+ trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
if (cpumask_empty(cpus))
return; /* nothing to do */
@@ -1402,9 +1387,10 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
- if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
+ if (info->end != TLB_FLUSH_ALL &&
+ (info->end - info->start) <= PAGE_SIZE) {
args->op.cmd = MMUEXT_INVLPG_MULTI;
- args->op.arg1.linear_addr = start;
+ args->op.arg1.linear_addr = info->start;
}
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
@@ -1479,8 +1465,8 @@ static void xen_write_cr3(unsigned long cr3)
* At the start of the day - when Xen launches a guest, it has already
* built pagetables for the guest. We diligently look over them
* in xen_setup_kernel_pagetable and graft as appropriate them in the
- * init_level4_pgt and its friends. Then when we are happy we load
- * the new init_level4_pgt - and continue on.
+ * init_top_pgt and its friends. Then when we are happy we load
+ * the new init_top_pgt - and continue on.
*
* The generic code starts (start_kernel) and 'init_mem_mapping' sets
* up the rest of the pagetables. When it has completed it loads the cr3.
@@ -1923,23 +1909,22 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
pt_end = pt_base + xen_start_info->nr_pt_frames;
/* Zap identity mapping */
- init_level4_pgt[0] = __pgd(0);
-
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- /* Pre-constructed entries are in pfn, so convert to mfn */
- /* L4[272] -> level3_ident_pgt
- * L4[511] -> level3_kernel_pgt */
- convert_pfn_mfn(init_level4_pgt);
-
- /* L3_i[0] -> level2_ident_pgt */
- convert_pfn_mfn(level3_ident_pgt);
- /* L3_k[510] -> level2_kernel_pgt
- * L3_k[511] -> level2_fixmap_pgt */
- convert_pfn_mfn(level3_kernel_pgt);
-
- /* L3_k[511][506] -> level1_fixmap_pgt */
- convert_pfn_mfn(level2_fixmap_pgt);
- }
+ init_top_pgt[0] = __pgd(0);
+
+ /* Pre-constructed entries are in pfn, so convert to mfn */
+ /* L4[272] -> level3_ident_pgt */
+ /* L4[511] -> level3_kernel_pgt */
+ convert_pfn_mfn(init_top_pgt);
+
+ /* L3_i[0] -> level2_ident_pgt */
+ convert_pfn_mfn(level3_ident_pgt);
+ /* L3_k[510] -> level2_kernel_pgt */
+ /* L3_k[511] -> level2_fixmap_pgt */
+ convert_pfn_mfn(level3_kernel_pgt);
+
+ /* L3_k[511][506] -> level1_fixmap_pgt */
+ convert_pfn_mfn(level2_fixmap_pgt);
+
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
@@ -1960,36 +1945,32 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* Copy the initial P->M table mappings if necessary. */
i = pgd_index(xen_start_info->mfn_list);
if (i && i < pgd_index(__START_KERNEL_map))
- init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
-
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- /* Make pagetable pieces RO */
- set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
- set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
- set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
- set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
- set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
- set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
- set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
- set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
-
- /* Pin down new L4 */
- pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
- PFN_DOWN(__pa_symbol(init_level4_pgt)));
-
- /* Unpin Xen-provided one */
- pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
+ init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
+
+ /* Make pagetable pieces RO */
+ set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
+ set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
+
+ /* Pin down new L4 */
+ pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
+ PFN_DOWN(__pa_symbol(init_top_pgt)));
+
+ /* Unpin Xen-provided one */
+ pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
- /*
- * At this stage there can be no user pgd, and no page
- * structure to attach it to, so make sure we just set kernel
- * pgd.
- */
- xen_mc_batch();
- __xen_write_cr3(true, __pa(init_level4_pgt));
- xen_mc_issue(PARAVIRT_LAZY_CPU);
- } else
- native_write_cr3(__pa(init_level4_pgt));
+ /*
+ * At this stage there can be no user pgd, and no page structure to
+ * attach it to, so make sure we just set kernel pgd.
+ */
+ xen_mc_batch();
+ __xen_write_cr3(true, __pa(init_top_pgt));
+ xen_mc_issue(PARAVIRT_LAZY_CPU);
/* We can't that easily rip out L3 and L2, as the Xen pagetables are
* set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
@@ -2025,7 +2006,8 @@ static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
/*
* Translate a virtual address to a physical one without relying on mapped
- * page tables.
+ * page tables. Don't rely on big pages being aligned in (guest) physical
+ * space!
*/
static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
{
@@ -2035,7 +2017,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
pmd_t pmd;
pte_t pte;
- pa = read_cr3();
+ pa = read_cr3_pa();
pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
sizeof(pgd)));
if (!pgd_present(pgd))
@@ -2046,7 +2028,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
sizeof(pud)));
if (!pud_present(pud))
return 0;
- pa = pud_pfn(pud) << PAGE_SHIFT;
+ pa = pud_val(pud) & PTE_PFN_MASK;
if (pud_large(pud))
return pa + (vaddr & ~PUD_MASK);
@@ -2054,7 +2036,7 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
sizeof(pmd)));
if (!pmd_present(pmd))
return 0;
- pa = pmd_pfn(pmd) << PAGE_SHIFT;
+ pa = pmd_val(pmd) & PTE_PFN_MASK;
if (pmd_large(pmd))
return pa + (vaddr & ~PMD_MASK);
@@ -2115,7 +2097,7 @@ void __init xen_relocate_p2m(void)
pt_phys = pmd_phys + PFN_PHYS(n_pmd);
p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
- pgd = __va(read_cr3());
+ pgd = __va(read_cr3_pa());
new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
idx_p4d = 0;
save_pud = n_pud;
@@ -2222,7 +2204,7 @@ static void __init xen_write_cr3_init(unsigned long cr3)
{
unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
- BUG_ON(read_cr3() != __pa(initial_page_table));
+ BUG_ON(read_cr3_pa() != __pa(initial_page_table));
BUG_ON(cr3 != __pa(swapper_pg_dir));
/*
@@ -2402,9 +2384,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
static void __init xen_post_allocator_init(void)
{
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return;
-
pv_mmu_ops.set_pte = xen_set_pte;
pv_mmu_ops.set_pmd = xen_set_pmd;
pv_mmu_ops.set_pud = xen_set_pud;
@@ -2510,9 +2489,6 @@ void __init xen_init_mmu_ops(void)
{
x86_init.paging.pagetable_init = xen_pagetable_init;
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return;
-
pv_mmu_ops = xen_mmu_ops;
memset(dummy_mapping, 0xff, PAGE_SIZE);
@@ -2649,9 +2625,6 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
* this function are redundant and can be ignored.
*/
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return 0;
-
if (unlikely(order > MAX_CONTIG_ORDER))
return -ENOMEM;
@@ -2688,9 +2661,6 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
int success;
unsigned long vstart;
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return;
-
if (unlikely(order > MAX_CONTIG_ORDER))
return;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 090c7eb4dca9..a1895a8e85c1 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -406,7 +406,7 @@ static void __init xen_time_init(void)
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
}
-void __init xen_init_time_ops(void)
+void __ref xen_init_time_ops(void)
{
pv_time_ops = xen_time_ops;
diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S
index 5e246716d58f..e1a5fbeae08d 100644
--- a/arch/x86/xen/xen-pvh.S
+++ b/arch/x86/xen/xen-pvh.S
@@ -87,7 +87,7 @@ ENTRY(pvh_start_xen)
wrmsr
/* Enable pre-constructed page tables. */
- mov $_pa(init_level4_pgt), %eax
+ mov $_pa(init_top_pgt), %eax
mov %eax, %cr3
mov $(X86_CR0_PG | X86_CR0_PE), %eax
mov %eax, %cr0
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index f71f88ea7646..19707db966f1 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { }
# define PLATFORM_NR_IRQS 0
#endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
-#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS)
+#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
+#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
#if VARIANT_NR_IRQS == 0
static inline void variant_init_irq(void) { }
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 003eeee3fbc6..30ee8c608853 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -213,8 +213,6 @@ struct mm_struct;
#define release_segments(mm) do { } while(0)
#define forget_segments() do { } while (0)
-#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
-
extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild
index 56aad54e7fb7..b15bf6bc0e94 100644
--- a/arch/xtensa/include/uapi/asm/Kbuild
+++ b/arch/xtensa/include/uapi/asm/Kbuild
@@ -1,25 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += byteorder.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += swab.h
-header-y += termbits.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a265edd6ac37..99341028cc77 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{
int irq = irq_find_mapping(NULL, hwirq);
- if (hwirq >= NR_IRQS) {
- printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
- __func__, hwirq);
- }
-
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
{
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 394ef08300b6..33bfa5270d95 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -593,8 +593,7 @@ c_show(struct seq_file *f, void *slot)
(ccount_freq/10000) % 100,
loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100);
-
- seq_printf(f,"flags\t\t: "
+ seq_puts(f, "flags\t\t: "
#if XCHAL_HAVE_NMI
"nmi "
#endif
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 06937928cb72..74afbf02d07e 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
/* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr)
return -ENOMEM;
- if (!vmm || addr + len <= vmm->vm_start)
+ if (!vmm || addr + len <= vm_start_gap(vmm))
return addr;
addr = vmm->vm_end;
if (flags & MAP_SHARED)
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 30d9fc21e076..162c77e53ca8 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -118,7 +118,7 @@ SECTIONS
SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4)
SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
- SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 48)
+ SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20)
SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
#endif
@@ -306,13 +306,13 @@ SECTIONS
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal,
- DOUBLEEXC_VECTOR_VADDR - 48,
+ DOUBLEEXC_VECTOR_VADDR - 20,
SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
DOUBLEEXC_VECTOR_VADDR,
- 48,
+ 20,
.DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 02e94bb3ad3e..c45b90bb9339 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -317,8 +317,7 @@ static int __init simdisk_init(void)
if (simdisk_count > MAX_SIMDISK_COUNT)
simdisk_count = MAX_SIMDISK_COUNT;
- sddev = kmalloc(simdisk_count * sizeof(struct simdisk),
- GFP_KERNEL);
+ sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL);
if (sddev == NULL)
goto out_unregister;
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index dbeea2b440a1..1fda7e20dfcb 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -24,16 +24,18 @@
/* Interrupt configuration. */
-#define PLATFORM_NR_IRQS 10
+#define PLATFORM_NR_IRQS 0
/* Default assignment of LX60 devices to external interrupts. */
#ifdef CONFIG_XTENSA_MX
#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
#define OETH_IRQ XCHAL_EXTINT4_NUM
+#define C67X00_IRQ XCHAL_EXTINT8_NUM
#else
#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
#define OETH_IRQ XCHAL_EXTINT1_NUM
+#define C67X00_IRQ XCHAL_EXTINT5_NUM
#endif
/*
@@ -63,5 +65,5 @@
#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
#define C67X00_SIZE 0x10
-#define C67X00_IRQ 5
+
#endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 779be723eb2b..42285f35d313 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -175,8 +175,8 @@ static struct resource ethoc_res[] = {
.flags = IORESOURCE_MEM,
},
[2] = { /* IRQ number */
- .start = OETH_IRQ,
- .end = OETH_IRQ,
+ .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
+ .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@@ -213,8 +213,8 @@ static struct resource c67x00_res[] = {
.flags = IORESOURCE_MEM,
},
[1] = { /* IRQ number */
- .start = C67X00_IRQ,
- .end = C67X00_IRQ,
+ .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
+ .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@@ -247,7 +247,7 @@ static struct resource serial_resource = {
static struct plat_serial8250_port serial_platform_data[] = {
[0] = {
.mapbase = DUART16552_PADDR,
- .irq = DUART16552_INTNUM,
+ .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32,